From e025e225d3489ed9bf91c159f166d07ae0af4d4b Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Wed, 20 Mar 2019 16:25:13 -0700 Subject: [PATCH 01/22] implement spinnaker_pipeline_document data source --- spinnaker/api/pipeline.go | 126 +++++ spinnaker/datasource_pipeline.go | 2 - spinnaker/datasource_pipeline_document.go | 501 ++++++++++++++++++ .../datasource_pipeline_document_test.go | 157 ++++++ spinnaker/provider.go | 3 +- .../resource_pipeline_template_config.go | 42 +- 6 files changed, 798 insertions(+), 33 deletions(-) create mode 100644 spinnaker/datasource_pipeline_document.go create mode 100644 spinnaker/datasource_pipeline_document_test.go diff --git a/spinnaker/api/pipeline.go b/spinnaker/api/pipeline.go index 7e549fb..488a543 100644 --- a/spinnaker/api/pipeline.go +++ b/spinnaker/api/pipeline.go @@ -8,6 +8,104 @@ import ( gate "github.com/spinnaker/spin/cmd/gateclient" ) +type PipelineConfig struct { + Pipeline + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name"` + Application string `json:"application"` + Triggers []map[string]interface{} `json:"triggers,omitempty"` + ExpectedArtifacts []map[string]interface{} `json:"expectedArtifacts,omitempty"` + Notifications []map[string]interface{} `json:"notifications,omitempty"` + LastModifiedBy string `json:"lastModifiedBy"` + Config interface{} `json:"config,omitempty"` + UpdateTs string `json:"updateTs,omitempty"` +} + +type PipelineDocument struct { + Pipeline + AppConfig map[string]string `json:"appConfig,omitempty" mapstructure:"config"` +} + +type PipelineParameter struct { + Description string `json:"description,omitempty"` + Default string `json:"default,omitempty"` + Name string `json:"name"` + Required bool `json:"required"` + HasOptions bool `json:"hasOptions"` + Label string `json:"label,omitempty"` + Options []string `json:"options,omitempty"` +} + +type Pipeline struct { + Description string `json:"description,omitempty"` + ExecutionEngine string `json:"executionEngine,omitempty" mapstructure:"engine"` + Parallel *bool `json:"parallel,omitempty"` + LimitConcurrent *bool `json:"limitConcurrent,omitempty" mapstructure:"limit_concurrent"` + KeepWaitingPipelines *bool `json:"keepWaitingPipelines,omitempty" mapstructure:"wait"` + Stages []*Stage `json:"stages,omitempty" mapstructure:"stage"` + Parameters []*PipelineParameter `json:"parameterConfig,omitempty" mapstructure:"parameter"` +} + +type Stage struct { + Account string `json:"account,omitempty"` + Application string `json:"application,omitempty"` + CloudProvider string `json:"cloudProvider,omitempty" mapstructure:"cloud_provider"` + CloudProviderType string `json:"cloudProviderType,omitempty" mapstructure:"cloud_provider_type"` + Annotations map[string]string `json:"annotations,omitempty"` + Clusters string `json:"clusters,omitempty"` + CompleteOtherBranchesThenFail bool `json:"completeOtherBranchesThenFail,omitempty" mapstructure:"complete_other_branches_then_fail"` + ContinuePipeline bool `json:"continuePipeline,omitempty" mapstructure:"continue_pipeline"` + FailPipeline bool `json:"failPipeline,omitempty" mapstructure:"fail_pipeline"` + FailOnFailedExpressions bool `json:"failOnFailedExpressions,omitempty" mapstructure:"fail_on_failed_expression"` + Instructions string `json:"instructions,omitempty"` + JudgmentInputs []struct { + Value string `json:"value"` + } `json:"judgmentInputs,omitempty"` + StageEnabled struct { + Expression string `json:"expression,omitempty"` + Type string `json:"type,omitempty"` + } `json:"stageEnabled,omitempty"` + Pipeline string `json:"pipeline,omitempty"` + PipelineParameters map[string]string `json:"pipelineParameters,omitempty" mapstructure:"pipeline_parameters"` + Variables []struct { + Key string `json:"key"` + Value string `json:"value"` + } `json:"variables,omitempty" mapstructure:"-"` + Containers []struct { + Args []string `json:"args,omitempty"` + Command []string `json:"command,omitempty"` + EnvVars []struct { + Name string `json:"name"` + Value string `json:"value"` + } `json:"envVars,omitempty"` + ImageDescription struct { + Account string `json:"account,omitempty"` + ImageID string `json:"imageId,omitempty" mapstructure:"id"` + Registry string `json:"registry,omitempty"` + Repository string `json:"repository,omitempty"` + Tag string `json:"tag,omitempty"` + } `json:"imageDescription,omitempty" mapstructure:"image"` + ImagePullPolicy string `json:"imagePullPolicy,omitempty" mapstructure:"image_pull_policy"` + Name string `json:"name,omitempty"` + Ports []struct { + ContainerPort int `json:"containerPort,omitempty" mapstructure:"container"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + } `json:"ports,omitempty"` + } `json:"containers,omitempty" mapstructure:"container"` + DeferredInitialization *bool `json:"deferredInitialization,omitempty" mapstructure:"deferred_initialization"` + DNSPolicy string `json:"dnsPolicy,omitempty" mapstructure:"dns_policy"` + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + RefID string `json:"refId,omitempty" mapstructure:"ref_id"` + RequisiteStageRefIds []interface{} `json:"requisiteStageRefIds,omitempty" mapstructure:"requisite_stage_refids"` + Type string `json:"type,omitempty"` + StatusUrlResolution string `json:"statusUrlResolution,omitempty" mapstructure:"status_url_resolution"` + WaitTime int `json:"waitTime,omitempty" mapstructure:"wait_time"` + WaitForCompletion bool `json:"waitForCompletion,omitempty" mapstructure:"wait_for_completion"` +} + func CreatePipeline(client *gate.GatewayClient, pipeline interface{}) error { resp, err := client.PipelineControllerApi.SavePipelineUsingPOST(client.Context, pipeline) @@ -81,3 +179,31 @@ func DeletePipeline(client *gate.GatewayClient, applicationName, pipelineName st return nil } + +func (oldDoc *PipelineDocument) Merge(newDoc *PipelineDocument) { + // Update some fields of oldDoc to match newDoc now + if newDoc.AppConfig != nil { + oldDoc.AppConfig = newDoc.AppConfig + } + if newDoc.Description != "" { + oldDoc.Description = newDoc.Description + } + if newDoc.ExecutionEngine != "" { + oldDoc.ExecutionEngine = newDoc.ExecutionEngine + } + if newDoc.Parallel != nil { + oldDoc.Parallel = newDoc.Parallel + } + if newDoc.LimitConcurrent != nil { + oldDoc.LimitConcurrent = newDoc.LimitConcurrent + } + if newDoc.KeepWaitingPipelines != nil { + oldDoc.KeepWaitingPipelines = newDoc.KeepWaitingPipelines + } + if newDoc.Parameters != nil { + oldDoc.Parameters = append(oldDoc.Parameters, newDoc.Parameters...) + } + if newDoc.Stages != nil { + oldDoc.Stages = append(oldDoc.Stages, newDoc.Stages...) + } +} diff --git a/spinnaker/datasource_pipeline.go b/spinnaker/datasource_pipeline.go index 2448737..84d164c 100644 --- a/spinnaker/datasource_pipeline.go +++ b/spinnaker/datasource_pipeline.go @@ -9,12 +9,10 @@ func datasourcePipeline() *schema.Resource { Schema: map[string]*schema.Schema{ "application": { Type: schema.TypeString, - ForceNew: true, Required: true, }, "name": { Type: schema.TypeString, - ForceNew: true, Required: true, }, "pipeline": { diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go new file mode 100644 index 0000000..f3ffcf0 --- /dev/null +++ b/spinnaker/datasource_pipeline_document.go @@ -0,0 +1,501 @@ +package spinnaker + +import ( + "encoding/json" + "sort" + "strconv" + + "github.com/armory-io/terraform-provider-spinnaker/spinnaker/api" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "github.com/mitchellh/mapstructure" +) + +func datasourcePipelineDocument() *schema.Resource { + return &schema.Resource{ + Read: datasourcePipelineDocumentRead, + Schema: map[string]*schema.Schema{ + "json": { + Type: schema.TypeString, + Computed: true, + }, + "source_json": { + Type: schema.TypeString, + Optional: true, + }, + "override_json": { + Type: schema.TypeString, + Optional: true, + }, + "config": { + Type: schema.TypeMap, + Optional: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "engine": { + Type: schema.TypeString, + Optional: true, + }, + "parallel": { + Type: schema.TypeBool, + Optional: true, + }, + "limit_concurrent": { + Type: schema.TypeBool, + Optional: true, + }, + "wait": { + Type: schema.TypeBool, + Optional: true, + }, + "parameter": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + }, + "default": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "required": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "label": { + Type: schema.TypeString, + Optional: true, + }, + "options": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "stage": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account": { + Type: schema.TypeString, + Optional: true, + }, + "cloud_provider": { + Type: schema.TypeString, + Optional: true, + }, + "cloud_provider_type": { + Type: schema.TypeString, + Optional: true, + }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + }, + "clusters": { + Type: schema.TypeString, + Optional: true, + }, + "complete_other_branches_then_fail": { + Type: schema.TypeBool, + Optional: true, + }, + "continue_pipeline": { + Type: schema.TypeBool, + Optional: true, + }, + "fail_pipeline": { + Type: schema.TypeBool, + Optional: true, + }, + "fail_on_failed_expression": { + Type: schema.TypeBool, + Optional: true, + }, + "pipeline": { + Type: schema.TypeString, + Optional: true, + }, + "pipeline_parameters": { + Type: schema.TypeMap, + Optional: true, + }, + "application": { + Type: schema.TypeString, + Optional: true, + }, + "variables": { + Type: schema.TypeMap, + Optional: true, + }, + "container": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "args": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "env": { + Type: schema.TypeMap, + Optional: true, + }, + "image": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account": { + Type: schema.TypeString, + Optional: true, + }, + "id": { + Type: schema.TypeString, + Optional: true, + }, + "registry": { + Type: schema.TypeString, + Optional: true, + }, + "repository": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Default: "latest", + }, + }, + }, + }, + "image_pull_policy": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container": { + Type: schema.TypeInt, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "protocol": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "deferred_initialization": { + Type: schema.TypeBool, + Optional: true, + }, + "dns_policy": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + "ref_id": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + }, + "wait_for_completion": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "instructions": { + Type: schema.TypeString, + Optional: true, + }, + "judgment_inputs": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "requisite_stage_refids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "wait_time": { + Type: schema.TypeInt, + Optional: true, + }, + "status_url_resolution": { + Type: schema.TypeString, + Optional: true, + }, + "stage_enabled": { + Type: schema.TypeMap, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func datasourcePipelineDocumentRead(d *schema.ResourceData, meta interface{}) error { + spDoc := &api.PipelineDocument{} + + mergeDoc := &api.PipelineDocument{} + if sourceJSON, hasSourceJSON := d.GetOk("source_json"); hasSourceJSON { + if err := json.Unmarshal([]byte(sourceJSON.(string)), mergeDoc); err != nil { + return err + } + } + + if appConfig, ok := d.GetOk("config"); ok { + spDoc.AppConfig = appConfig.(map[string]string) + } + + if description, ok := d.GetOk("description"); ok { + spDoc.Description = description.(string) + } + + if executionEngine, ok := d.GetOk("engine"); ok { + spDoc.ExecutionEngine = executionEngine.(string) + } + + if parallel, ok := d.GetOkExists("parallel"); ok { + spDoc.Parallel = Bool(parallel.(bool)) + } + if limitConcurrent, ok := d.GetOkExists("limit_concurrent"); ok { + spDoc.LimitConcurrent = Bool(limitConcurrent.(bool)) + } + if keepWaiting, ok := d.GetOkExists("wait"); ok { + spDoc.KeepWaitingPipelines = Bool(keepWaiting.(bool)) + } + + // decouple parameters + if parameters, ok := d.GetOk("parameter"); ok { + spDoc.Parameters = parametersDecodeDocument(parameters) + } + + // decouple stages + if stages, ok := d.GetOk("stage"); ok { + stgs, err := stageDecodeDocument(stages) + if err != nil { + return err + } + spDoc.Stages = stgs + } + + if overrideJSON, hasOverrideJSON := d.GetOk("override_json"); hasOverrideJSON { + overrideDoc := &api.PipelineDocument{} + if err := json.Unmarshal([]byte(overrideJSON.(string)), overrideDoc); err != nil { + return err + } + mergeDoc.Merge(overrideDoc) + } + + // Perform overrides. If no overrides/source provided it will do nothing. + mergeDoc.Merge(spDoc) + render, err := json.Marshal(&mergeDoc) + if err != nil { + return err + } + + jsonDocument := string(render) + d.SetId(strconv.Itoa(hashcode.String(jsonDocument))) + d.Set("json", jsonDocument) + + return nil +} + +// parametersDecodeDocument iterates over each parameter. The schema for the parameters +// is being set to TypeSet, which means in that case order does not matter. +// The parameter "hasOptions" is assumed based on the fact if the "options" are being +// populated or not +func parametersDecodeDocument(parameters interface{}) []*api.PipelineParameter { + var selParams = parameters.(*schema.Set).List() + params := make([]*api.PipelineParameter, len(selParams)) + + for i, param := range selParams { + fparam := param.(map[string]interface{}) + pm := &api.PipelineParameter{ + Description: fparam["description"].(string), + Default: fparam["default"].(string), + Name: fparam["name"].(string), + Required: fparam["required"].(bool), + Label: fparam["label"].(string), + } + + if opts := fparam["options"].([]interface{}); len(opts) > 0 { + pm.HasOptions = true + for _, opt := range opts { + pm.Options = append(pm.Options, opt.(string)) + } + } + params[i] = pm + } + return params +} + +// stageDecodeDocument iterate over the stages in defined order and map +// all the fields into expected pipeline's json format. +func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { + var stages = field.([]interface{}) + stgs := make([]*api.Stage, len(stages)) + + for i, stg := range stages { + stageField := stg.(map[string]interface{}) + sg := &api.Stage{} + + err := mapstructure.Decode(stageField, &sg) + if err != nil { + return nil, err + } + + // extract env variables if any + extractEnvs(stageField["container"].([]interface{}), sg) + + // extract variables + if vars, ok := stageField["variables"]; ok { + for key, value := range vars.(map[string]interface{}) { + sg.Variables = append(sg.Variables, struct { + Key string `json:"key"` + Value string `json:"value"` + }{ + Key: key, + Value: value.(string), + }) + } + } + + // evaluate if judgment manual is set, map string values to map + if judgmentInputs, ok := stageField["judgment_inputs"]; ok { + for _, inpt := range judgmentInputs.([]interface{}) { + sg.JudgmentInputs = append(sg.JudgmentInputs, struct { + Value string `json:"value"` + }{ + Value: inpt.(string), + }) + } + } + + // stage_enabled is being populated in pipeline's json only if + // the Execution Optional within the job definition is set to "Conditional on Expression" + // The only accepted type in spinnaker at this moment is "expression" + // Instead of accepting: + // { + // expression: "", + // type: "expression" + // } + // accept: + // expression: "VALUE" + // and assume the type + if stageEnabled, ok := stageField["stage_enabled"]; ok { + // most likely will be iterated only once + for key, value := range stageEnabled.(map[string]interface{}) { + sg.StageEnabled.Expression = value.(string) + sg.StageEnabled.Type = key + } + } + + stgs[i] = sg + } + return stgs, nil +} + +// extractEnvs transforms document env variables into pipeline's json +// Since spinnaker internally sorts all the slices, +// we need to do the same before passing to pipelineDiffSuppressFunc, +// otherwise two jsons will always diff. +// +// Spinnaker expect envVars to be in a format: +// { +// Name: "name", +// Value: "value" +// } +// Although for the simplicity we accept it in the form of: +// name=value, which makes data source document more readable and short. +func extractEnvs(fields []interface{}, sg *api.Stage) { + for i, elem := range fields { + if env, ok := elem.(map[string]interface{})["env"]; ok { + var envKeys []string + envs := env.(map[string]interface{}) + + for k := range envs { + envKeys = append(envKeys, k) + } + sort.Strings(envKeys) + + for _, k := range envKeys { + sg.Containers[i].EnvVars = append(sg.Containers[i].EnvVars, struct { + Name string `json:"name"` + Value string `json:"value"` + }{ + Name: k, + Value: envs[k].(string), + }) + } + } + } +} + +// Bool (helper func) returns a bool value from the pointer to bool +// primary reason for using that is to set some json variables +// only if the bool has been explicity set. +// the tag `omitempty` will be only applied if *bool == nil +func Bool(v bool) *bool { + return &v +} diff --git a/spinnaker/datasource_pipeline_document_test.go b/spinnaker/datasource_pipeline_document_test.go new file mode 100644 index 0000000..889d161 --- /dev/null +++ b/spinnaker/datasource_pipeline_document_test.go @@ -0,0 +1,157 @@ +package spinnaker + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/armory-io/terraform-provider-spinnaker/spinnaker/api" + "github.com/hashicorp/terraform/helper/schema" +) + +var tfToGo = map[string]string{ + "TypeBool": "bool", + "TypeInt": "int", + "TypeFloat": "float64", + "TypeString": "string", + "TypeList": "slice", + "TypeMap": "map", + "TypeSet": "slice", +} + +func TestDocumentSchemaMatchesStruct(t *testing.T) { + schemas := make(map[string]string) + GetFieldsFromSchema(".pipeline", datasourcePipelineDocument(), schemas) + + tags := make(map[string]string) + GetFieldTagsFromStruct("", api.PipelineDocument{}, "mapstructure", tags) + + // some of the fields have different format. The reason for that is to simplify + // terraform definition of the reasurce. Because of that schema.Resource does not match + // 1:1 the json format. There is a transformation performed in: + // parametersDecodeDocument() and stageDecodeDocument(). Other fields are just computed + // and not present in the either struct or Schema + skipFields := []string{ + ".pipeline", ".pipeline.json", ".pipeline.parameter.hasoptions", ".pipeline.stage.container.env", + ".pipeline.stage.judgment_inputs", ".pipeline.stage.container.image", ".pipeline.stage.stage_enabled", + ".pipeline.stage.variables", ".pipeline.stage.container.envvars", "pipeline.stage.container.envvars.name", + ".pipeline.stage.container.envvars.value", ".pipeline.stage.container.image", ".pipeline.stage.judgmentinputs", + ".pipeline.stage.judgmentinputs.value", ".pipeline.stage.stageenabled", ".pipeline.stage.stageenabled.expression", + ".pipeline.stage.stageenabled.type", ".pipeline.stage.variables", ".pipeline.stage.variables.key", + ".pipeline.stage.variables.value", ".pipeline.source_json", ".pipeline.override_json", ".pipeline.stage.container.envvars.name", + ".pipeline.limit_concurrent", ".pipeline.parallel", ".pipeline.stage.deferred_initialization", ".pipeline.wait", + } + + // transofrm some of the fields to make comparision more accurate. + schemas[".config"] = schemas[".pipeline.config"] + delete(schemas, ".pipeline.config") + + // some of the fields have different type. In struct representation + // they will have more likely `struct` type, in schema either map or slice + assertEqual(t, schemas[".pipeline.stage.container.env"], "map") + assertEqual(t, schemas[".pipeline.stage.judgment_inputs"], "slice") + assertEqual(t, schemas[".pipeline.stage.container.image"], "map") + assertEqual(t, schemas[".pipeline.stage.stage_enabled"], "map") + assertEqual(t, schemas[".pipeline.stage.variables"], "map") + + assertEqual(t, tags[".pipeline.stage.container.envvars"], "slice") + assertEqual(t, tags[".pipeline.stage.container.envvars.name"], "string") + assertEqual(t, tags[".pipeline.stage.container.envvars.value"], "string") + assertEqual(t, tags[".pipeline.stage.container.image"], "struct") + assertEqual(t, tags[".pipeline.stage.judgmentinputs"], "slice") + assertEqual(t, tags[".pipeline.stage.judgmentinputs.value"], "string") + assertEqual(t, tags[".pipeline.stage.stageenabled"], "struct") + assertEqual(t, tags[".pipeline.stage.stageenabled.expression"], "string") + assertEqual(t, tags[".pipeline.stage.stageenabled.type"], "string") + assertEqual(t, tags[".pipeline.stage.variables"], "slice") + assertEqual(t, tags[".pipeline.stage.variables.key"], "string") + assertEqual(t, tags[".pipeline.stage.variables.value"], "string") + assertEqual(t, tags[".pipeline.limit_concurrent"], "ptr") + assertEqual(t, tags[".pipeline.parallel"], "ptr") + assertEqual(t, tags[".pipeline.stage.deferred_initialization"], "ptr") + assertEqual(t, tags[".pipeline.wait"], "ptr") + // cleanup different values and make assertion + for _, skip := range skipFields { + delete(schemas, skip) + delete(tags, skip) + } + + // Final assertion + if !reflect.DeepEqual(tags, schemas) { + t.Fatal("PipelineDocument struct and data_source_spinnaker_pipeline_document do not match!") + } +} + +func GetFieldsFromSchema(prefix string, resource *schema.Resource, schemas map[string]string) { + for name, value := range resource.Schema { + key := fmt.Sprintf("%s.%s", prefix, name) + + switch value.Type { + case schema.TypeList, schema.TypeSet, schema.TypeMap: + if elem, ok := value.Elem.(*schema.Resource); ok { + schemas[key] = tfToGo[value.Type.String()] + GetFieldsFromSchema(key, elem, schemas) + } else { + schemas[key] = tfToGo[value.Type.String()] + } + default: + schemas[key] = tfToGo[value.Type.String()] + } + } +} + +// mapTags extracts given tag from the given structure strct +// and build a map +func GetFieldTagsFromStruct(prefix string, source interface{}, tagName string, tags map[string]string) { + sourceType := reflect.TypeOf(source) + sourceValue := reflect.ValueOf(source) + + if sourceValue.Type().Kind() == reflect.Struct { + for i := 0; i < sourceType.NumField(); i++ { + field := sourceType.Field(i) + + var key string + if tag := field.Tag.Get(tagName); tag != "" { + // Some values are skipped by mapstructure since they have different type in Schema.Resource + // and in the struct. Most likely those are embeded structs with Key/Value fields. + // the reason for keeping that that way is to simplify the terraform definition and the same time + // keep compatibility with the json output generated for spinnaker + // affected fields are tagged with `mapstructure:"-"` + if tag == "-" { + tag = strings.ToLower(field.Name) + } + + key = fmt.Sprintf("%s.%s", prefix, tag) + tags[key] = field.Type.Kind().String() + } else { + key = fmt.Sprintf("%s.%s", prefix, strings.ToLower(field.Name)) + tags[key] = field.Type.Kind().String() + } + + switch field.Type.Kind() { + case reflect.Struct: + GetFieldTagsFromStruct(key, sourceValue.Field(i).Interface(), tagName, tags) + case reflect.Slice: + var elem reflect.Type + + kind := sourceType.Field(i).Type.Elem().Kind() + if kind == reflect.Ptr { + elem = sourceType.Field(i).Type.Elem().Elem() + } else if kind == reflect.Interface { + continue + } else { + elem = sourceType.Field(i).Type.Elem() + } + indirect := reflect.Indirect(reflect.New(elem)) + GetFieldTagsFromStruct(key, indirect.Interface(), tagName, tags) + } + } + } +} + +func assertEqual(t *testing.T, a interface{}, b interface{}) { + if a != b { + t.Fatalf("%s != %s", a, b) + } +} diff --git a/spinnaker/provider.go b/spinnaker/provider.go index 7821a8a..9705f3b 100644 --- a/spinnaker/provider.go +++ b/spinnaker/provider.go @@ -41,7 +41,8 @@ func Provider() *schema.Provider { "spinnaker_pipeline_template_config": resourcePipelineTemplateConfig(), }, DataSourcesMap: map[string]*schema.Resource{ - "spinnaker_pipeline": datasourcePipeline(), + "spinnaker_pipeline": datasourcePipeline(), + "spinnaker_pipeline_document": datasourcePipelineDocument(), }, ConfigureFunc: providerConfigureFunc, } diff --git a/spinnaker/resource_pipeline_template_config.go b/spinnaker/resource_pipeline_template_config.go index 8eda962..b00419b 100644 --- a/spinnaker/resource_pipeline_template_config.go +++ b/spinnaker/resource_pipeline_template_config.go @@ -11,26 +11,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" ) -type PipelineConfig struct { - ID string `json:"id,omitempty"` - Type string `json:"type,omitempty"` - Name string `json:"name"` - Application string `json:"application"` - Description string `json:"description,omitempty"` - ExecutionEngine string `json:"executionEngine,omitempty"` - Parallel bool `json:"parallel"` - LimitConcurrent bool `json:"limitConcurrent"` - KeepWaitingPipelines bool `json:"keepWaitingPipelines"` - Stages []map[string]interface{} `json:"stages,omitempty"` - Triggers []map[string]interface{} `json:"triggers,omitempty"` - ExpectedArtifacts []map[string]interface{} `json:"expectedArtifacts,omitempty"` - Parameters []map[string]interface{} `json:"parameterConfig,omitempty"` - Notifications []map[string]interface{} `json:"notifications,omitempty"` - LastModifiedBy string `json:"lastModifiedBy"` - Config interface{} `json:"config,omitempty"` - UpdateTs string `json:"updateTs"` -} - func resourcePipelineTemplateConfig() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -98,7 +78,7 @@ func resourcePipelineTemplateConfigRead(data *schema.ResourceData, meta interfac application := data.Get("application").(string) name := data.Get("name").(string) - p := PipelineConfig{} + p := api.PipelineConfig{} if _, err := api.GetPipeline(client, application, name, &p); err != nil { if err.Error() == api.ErrCodeNoSuchEntityException { data.SetId("") @@ -174,7 +154,7 @@ func resourcePipelineTemplateConfigExists(data *schema.ResourceData, meta interf return false, nil } -func buildConfig(data *schema.ResourceData) (*PipelineConfig, error) { +func buildConfig(data *schema.ResourceData) (*api.PipelineConfig, error) { config := data.Get("pipeline_config").(string) d, err := yaml.YAMLToJSON([]byte(config)) @@ -203,14 +183,16 @@ func buildConfig(data *schema.ResourceData) (*PipelineConfig, error) { return nil, fmt.Errorf("application not set in pipeline configuration") } - pConfig := &PipelineConfig{ - Name: name, - Application: application, - Type: "templatedPipeline", - Parallel: data.Get("parallel").(bool), - LimitConcurrent: data.Get("limit_concurrent").(bool), - KeepWaitingPipelines: data.Get("keep_waiting").(bool), - Config: jsonContent, + pConfig := &api.PipelineConfig{ + Name: name, + Application: application, + Type: "templatedPipeline", + Pipeline: api.Pipeline{ + Parallel: Bool(data.Get("parallel").(bool)), + LimitConcurrent: Bool(data.Get("limit_concurrent").(bool)), + KeepWaitingPipelines: Bool(data.Get("keep_waiting").(bool)), + }, + Config: jsonContent, } if c, ok := jsonContent["configuration"].(map[string]interface{}); ok { From 87dee8fe43047689e79d3ee08631b1f39116c078 Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Mon, 1 Apr 2019 15:14:17 -0700 Subject: [PATCH 02/22] add preconditions to stages --- spinnaker/api/pipeline.go | 15 ++++- spinnaker/datasource_pipeline_document.go | 55 +++++++++++++++++++ .../datasource_pipeline_document_test.go | 10 +++- 3 files changed, 78 insertions(+), 2 deletions(-) diff --git a/spinnaker/api/pipeline.go b/spinnaker/api/pipeline.go index 488a543..9a8d4f9 100644 --- a/spinnaker/api/pipeline.go +++ b/spinnaker/api/pipeline.go @@ -94,6 +94,19 @@ type Stage struct { Protocol string `json:"protocol,omitempty"` } `json:"ports,omitempty"` } `json:"containers,omitempty" mapstructure:"container"` + Preconditions []struct { + CloudProvider string `json:"cloudProvider,omitempty" mapstructure:"cloud_provider"` + Context struct { + Cluster string `json:"cluster,omitempty"` + Comparison string `json:"comparison,omitempty"` + Credentials string `json:"credentials,omitempty"` + Expected int `json:"expected,omitempty"` + Regions []string `json:"regions,omitempty"` + Expression string `json:"expression,omitempty"` + } `json:"context,omitempty"` + FailPipeline bool `json:"failPipeline" mapstructure:"fail_pipeline"` + Type string `json:"type"` + } `json:"preconditions,omitempty" mapstructure:"precondition"` DeferredInitialization *bool `json:"deferredInitialization,omitempty" mapstructure:"deferred_initialization"` DNSPolicy string `json:"dnsPolicy,omitempty" mapstructure:"dns_policy"` Name string `json:"name,omitempty"` @@ -101,7 +114,7 @@ type Stage struct { RefID string `json:"refId,omitempty" mapstructure:"ref_id"` RequisiteStageRefIds []interface{} `json:"requisiteStageRefIds,omitempty" mapstructure:"requisite_stage_refids"` Type string `json:"type,omitempty"` - StatusUrlResolution string `json:"statusUrlResolution,omitempty" mapstructure:"status_url_resolution"` + StatusURLResolution string `json:"statusUrlResolution,omitempty" mapstructure:"status_url_resolution"` WaitTime int `json:"waitTime,omitempty" mapstructure:"wait_time"` WaitForCompletion bool `json:"waitForCompletion,omitempty" mapstructure:"wait_for_completion"` } diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index f3ffcf0..3ec24ad 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -144,6 +144,61 @@ func datasourcePipelineDocument() *schema.Resource { Type: schema.TypeMap, Optional: true, }, + "precondition": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_provider": { + Type: schema.TypeString, + Optional: true, + }, + "fail_pipeline": { + Type: schema.TypeBool, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + }, + "context": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Optional: true, + }, + "comparison": { + Type: schema.TypeString, + Optional: true, + }, + "credentials": { + Type: schema.TypeString, + Optional: true, + }, + "expression": { + Type: schema.TypeString, + Optional: true, + }, + "expected": { + Type: schema.TypeInt, + Optional: true, + }, + "regions": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, "container": { Type: schema.TypeList, Optional: true, diff --git a/spinnaker/datasource_pipeline_document_test.go b/spinnaker/datasource_pipeline_document_test.go index 889d161..9b95526 100644 --- a/spinnaker/datasource_pipeline_document_test.go +++ b/spinnaker/datasource_pipeline_document_test.go @@ -41,6 +41,7 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { ".pipeline.stage.stageenabled.type", ".pipeline.stage.variables", ".pipeline.stage.variables.key", ".pipeline.stage.variables.value", ".pipeline.source_json", ".pipeline.override_json", ".pipeline.stage.container.envvars.name", ".pipeline.limit_concurrent", ".pipeline.parallel", ".pipeline.stage.deferred_initialization", ".pipeline.wait", + ".pipeline.stage.precondition.context", } // transofrm some of the fields to make comparision more accurate. @@ -48,12 +49,18 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { delete(schemas, ".pipeline.config") // some of the fields have different type. In struct representation - // they will have more likely `struct` type, in schema either map or slice + // they will have more likely `struct` type, in schema either map or slice, + // more rare bool ==> ptr mapping assertEqual(t, schemas[".pipeline.stage.container.env"], "map") assertEqual(t, schemas[".pipeline.stage.judgment_inputs"], "slice") assertEqual(t, schemas[".pipeline.stage.container.image"], "map") assertEqual(t, schemas[".pipeline.stage.stage_enabled"], "map") assertEqual(t, schemas[".pipeline.stage.variables"], "map") + assertEqual(t, schemas[".pipeline.limit_concurrent"], "bool") + assertEqual(t, schemas[".pipeline.parallel"], "bool") + assertEqual(t, schemas[".pipeline.stage.deferred_initialization"], "bool") + assertEqual(t, schemas[".pipeline.wait"], "bool") + assertEqual(t, schemas[".pipeline.stage.precondition.context"], "map") assertEqual(t, tags[".pipeline.stage.container.envvars"], "slice") assertEqual(t, tags[".pipeline.stage.container.envvars.name"], "string") @@ -71,6 +78,7 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { assertEqual(t, tags[".pipeline.parallel"], "ptr") assertEqual(t, tags[".pipeline.stage.deferred_initialization"], "ptr") assertEqual(t, tags[".pipeline.wait"], "ptr") + assertEqual(t, tags[".pipeline.stage.precondition.context"], "struct") // cleanup different values and make assertion for _, skip := range skipFields { delete(schemas, skip) From e037006e56820e4a55847882d067d5a2d23ad9fe Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Tue, 2 Apr 2019 10:58:26 -0700 Subject: [PATCH 03/22] a bit of code refactoring --- spinnaker/api/helper.go | 54 +++++++++++++++++++++++++++++++++++++++ spinnaker/api/pipeline.go | 28 -------------------- 2 files changed, 54 insertions(+), 28 deletions(-) create mode 100644 spinnaker/api/helper.go diff --git a/spinnaker/api/helper.go b/spinnaker/api/helper.go new file mode 100644 index 0000000..477cf01 --- /dev/null +++ b/spinnaker/api/helper.go @@ -0,0 +1,54 @@ +package api + +// Merge takes two PipelineDocuments and merge them +func (oldDoc *PipelineDocument) Merge(newDoc *PipelineDocument) { + // Update some fields of oldDoc to match newDoc now + if newDoc.AppConfig != nil { + oldDoc.AppConfig = newDoc.AppConfig + } + if newDoc.Description != "" { + oldDoc.Description = newDoc.Description + } + if newDoc.ExecutionEngine != "" { + oldDoc.ExecutionEngine = newDoc.ExecutionEngine + } + if newDoc.Parallel != nil { + oldDoc.Parallel = newDoc.Parallel + } + if newDoc.LimitConcurrent != nil { + oldDoc.LimitConcurrent = newDoc.LimitConcurrent + } + if newDoc.KeepWaitingPipelines != nil { + oldDoc.KeepWaitingPipelines = newDoc.KeepWaitingPipelines + } + if newDoc.Parameters != nil { + for _, newParam := range newDoc.Parameters { + found := false + for idx, oldParam := range oldDoc.Parameters { + if oldParam.Name == newParam.Name { + oldDoc.Parameters[idx] = newParam + found = true + continue + } + } + if !found { + oldDoc.Parameters = append(oldDoc.Parameters, newParam) + } + } + } + if newDoc.Stages != nil { + for _, newStage := range newDoc.Stages { + found := false + for idx, oldStage := range oldDoc.Stages { + if oldStage.Name == newStage.Name { + oldDoc.Stages[idx] = newStage + found = true + continue + } + } + if !found { + oldDoc.Stages = append(oldDoc.Stages, newStage) + } + } + } +} diff --git a/spinnaker/api/pipeline.go b/spinnaker/api/pipeline.go index 9a8d4f9..90d27ab 100644 --- a/spinnaker/api/pipeline.go +++ b/spinnaker/api/pipeline.go @@ -192,31 +192,3 @@ func DeletePipeline(client *gate.GatewayClient, applicationName, pipelineName st return nil } - -func (oldDoc *PipelineDocument) Merge(newDoc *PipelineDocument) { - // Update some fields of oldDoc to match newDoc now - if newDoc.AppConfig != nil { - oldDoc.AppConfig = newDoc.AppConfig - } - if newDoc.Description != "" { - oldDoc.Description = newDoc.Description - } - if newDoc.ExecutionEngine != "" { - oldDoc.ExecutionEngine = newDoc.ExecutionEngine - } - if newDoc.Parallel != nil { - oldDoc.Parallel = newDoc.Parallel - } - if newDoc.LimitConcurrent != nil { - oldDoc.LimitConcurrent = newDoc.LimitConcurrent - } - if newDoc.KeepWaitingPipelines != nil { - oldDoc.KeepWaitingPipelines = newDoc.KeepWaitingPipelines - } - if newDoc.Parameters != nil { - oldDoc.Parameters = append(oldDoc.Parameters, newDoc.Parameters...) - } - if newDoc.Stages != nil { - oldDoc.Stages = append(oldDoc.Stages, newDoc.Stages...) - } -} From 0526bd170f1b857f68abefebb84ac8c6702a3395 Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Wed, 3 Apr 2019 13:55:26 -0700 Subject: [PATCH 04/22] add ForceNew to application within resource_application --- spinnaker/resource_application.go | 1 + 1 file changed, 1 insertion(+) diff --git a/spinnaker/resource_application.go b/spinnaker/resource_application.go index 5b58fe7..7488f1f 100644 --- a/spinnaker/resource_application.go +++ b/spinnaker/resource_application.go @@ -13,6 +13,7 @@ func resourceApplication() *schema.Resource { "application": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "email": { Type: schema.TypeString, From 45033e18b8df447098bc63156e1c30aaafe2d49a Mon Sep 17 00:00:00 2001 From: Vincent Janelle Date: Wed, 15 May 2019 17:19:24 -0700 Subject: [PATCH 05/22] Options is actually a list of maps the list of options was not actually showing up, this pushes the correct format of options to be a list of maps, each with a key of "value". Much sense. --- spinnaker/api/pipeline.go | 18 +++++++++++------- spinnaker/datasource_pipeline_document.go | 2 +- spinnaker/datasource_pipeline_document_test.go | 3 ++- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/spinnaker/api/pipeline.go b/spinnaker/api/pipeline.go index 90d27ab..cca74d7 100644 --- a/spinnaker/api/pipeline.go +++ b/spinnaker/api/pipeline.go @@ -28,13 +28,17 @@ type PipelineDocument struct { } type PipelineParameter struct { - Description string `json:"description,omitempty"` - Default string `json:"default,omitempty"` - Name string `json:"name"` - Required bool `json:"required"` - HasOptions bool `json:"hasOptions"` - Label string `json:"label,omitempty"` - Options []string `json:"options,omitempty"` + Description string `json:"description,omitempty"` + Default string `json:"default,omitempty"` + Name string `json:"name"` + Required bool `json:"required"` + HasOptions bool `json:"hasOptions"` + Options []*Options `json:"options,omitempty"` + Label string `json:"label,omitempty"` +} + +type Options struct { + Value string `json:"value"` } type Pipeline struct { diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index 3ec24ad..b35dc25 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -437,7 +437,7 @@ func parametersDecodeDocument(parameters interface{}) []*api.PipelineParameter { if opts := fparam["options"].([]interface{}); len(opts) > 0 { pm.HasOptions = true for _, opt := range opts { - pm.Options = append(pm.Options, opt.(string)) + pm.Options = append(pm.Options, &api.Options{Value: opt.(string)}) } } params[i] = pm diff --git a/spinnaker/datasource_pipeline_document_test.go b/spinnaker/datasource_pipeline_document_test.go index 9b95526..8852c9a 100644 --- a/spinnaker/datasource_pipeline_document_test.go +++ b/spinnaker/datasource_pipeline_document_test.go @@ -41,7 +41,7 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { ".pipeline.stage.stageenabled.type", ".pipeline.stage.variables", ".pipeline.stage.variables.key", ".pipeline.stage.variables.value", ".pipeline.source_json", ".pipeline.override_json", ".pipeline.stage.container.envvars.name", ".pipeline.limit_concurrent", ".pipeline.parallel", ".pipeline.stage.deferred_initialization", ".pipeline.wait", - ".pipeline.stage.precondition.context", + ".pipeline.stage.precondition.context", ".pipeline.parameter.options.value", } // transofrm some of the fields to make comparision more accurate. @@ -74,6 +74,7 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { assertEqual(t, tags[".pipeline.stage.variables"], "slice") assertEqual(t, tags[".pipeline.stage.variables.key"], "string") assertEqual(t, tags[".pipeline.stage.variables.value"], "string") + assertEqual(t, tags[".pipeline.parameter.options.value"], "string") assertEqual(t, tags[".pipeline.limit_concurrent"], "ptr") assertEqual(t, tags[".pipeline.parallel"], "ptr") assertEqual(t, tags[".pipeline.stage.deferred_initialization"], "ptr") From e6b3f8f987702aae9c57d907c6e90dc00fdf52dc Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Tue, 23 Jul 2019 15:07:31 -0700 Subject: [PATCH 06/22] split pipeline stage in a set of separate structs --- spinnaker/api/pipeline.go | 74 +--------------- spinnaker/api/stage.go | 180 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 181 insertions(+), 73 deletions(-) create mode 100644 spinnaker/api/stage.go diff --git a/spinnaker/api/pipeline.go b/spinnaker/api/pipeline.go index cca74d7..636adb8 100644 --- a/spinnaker/api/pipeline.go +++ b/spinnaker/api/pipeline.go @@ -33,8 +33,8 @@ type PipelineParameter struct { Name string `json:"name"` Required bool `json:"required"` HasOptions bool `json:"hasOptions"` - Options []*Options `json:"options,omitempty"` Label string `json:"label,omitempty"` + Options []*Options `json:"options,omitempty"` } type Options struct { @@ -51,78 +51,6 @@ type Pipeline struct { Parameters []*PipelineParameter `json:"parameterConfig,omitempty" mapstructure:"parameter"` } -type Stage struct { - Account string `json:"account,omitempty"` - Application string `json:"application,omitempty"` - CloudProvider string `json:"cloudProvider,omitempty" mapstructure:"cloud_provider"` - CloudProviderType string `json:"cloudProviderType,omitempty" mapstructure:"cloud_provider_type"` - Annotations map[string]string `json:"annotations,omitempty"` - Clusters string `json:"clusters,omitempty"` - CompleteOtherBranchesThenFail bool `json:"completeOtherBranchesThenFail,omitempty" mapstructure:"complete_other_branches_then_fail"` - ContinuePipeline bool `json:"continuePipeline,omitempty" mapstructure:"continue_pipeline"` - FailPipeline bool `json:"failPipeline,omitempty" mapstructure:"fail_pipeline"` - FailOnFailedExpressions bool `json:"failOnFailedExpressions,omitempty" mapstructure:"fail_on_failed_expression"` - Instructions string `json:"instructions,omitempty"` - JudgmentInputs []struct { - Value string `json:"value"` - } `json:"judgmentInputs,omitempty"` - StageEnabled struct { - Expression string `json:"expression,omitempty"` - Type string `json:"type,omitempty"` - } `json:"stageEnabled,omitempty"` - Pipeline string `json:"pipeline,omitempty"` - PipelineParameters map[string]string `json:"pipelineParameters,omitempty" mapstructure:"pipeline_parameters"` - Variables []struct { - Key string `json:"key"` - Value string `json:"value"` - } `json:"variables,omitempty" mapstructure:"-"` - Containers []struct { - Args []string `json:"args,omitempty"` - Command []string `json:"command,omitempty"` - EnvVars []struct { - Name string `json:"name"` - Value string `json:"value"` - } `json:"envVars,omitempty"` - ImageDescription struct { - Account string `json:"account,omitempty"` - ImageID string `json:"imageId,omitempty" mapstructure:"id"` - Registry string `json:"registry,omitempty"` - Repository string `json:"repository,omitempty"` - Tag string `json:"tag,omitempty"` - } `json:"imageDescription,omitempty" mapstructure:"image"` - ImagePullPolicy string `json:"imagePullPolicy,omitempty" mapstructure:"image_pull_policy"` - Name string `json:"name,omitempty"` - Ports []struct { - ContainerPort int `json:"containerPort,omitempty" mapstructure:"container"` - Name string `json:"name,omitempty"` - Protocol string `json:"protocol,omitempty"` - } `json:"ports,omitempty"` - } `json:"containers,omitempty" mapstructure:"container"` - Preconditions []struct { - CloudProvider string `json:"cloudProvider,omitempty" mapstructure:"cloud_provider"` - Context struct { - Cluster string `json:"cluster,omitempty"` - Comparison string `json:"comparison,omitempty"` - Credentials string `json:"credentials,omitempty"` - Expected int `json:"expected,omitempty"` - Regions []string `json:"regions,omitempty"` - Expression string `json:"expression,omitempty"` - } `json:"context,omitempty"` - FailPipeline bool `json:"failPipeline" mapstructure:"fail_pipeline"` - Type string `json:"type"` - } `json:"preconditions,omitempty" mapstructure:"precondition"` - DeferredInitialization *bool `json:"deferredInitialization,omitempty" mapstructure:"deferred_initialization"` - DNSPolicy string `json:"dnsPolicy,omitempty" mapstructure:"dns_policy"` - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - RefID string `json:"refId,omitempty" mapstructure:"ref_id"` - RequisiteStageRefIds []interface{} `json:"requisiteStageRefIds,omitempty" mapstructure:"requisite_stage_refids"` - Type string `json:"type,omitempty"` - StatusURLResolution string `json:"statusUrlResolution,omitempty" mapstructure:"status_url_resolution"` - WaitTime int `json:"waitTime,omitempty" mapstructure:"wait_time"` - WaitForCompletion bool `json:"waitForCompletion,omitempty" mapstructure:"wait_for_completion"` -} - func CreatePipeline(client *gate.GatewayClient, pipeline interface{}) error { resp, err := client.PipelineControllerApi.SavePipelineUsingPOST(client.Context, pipeline) diff --git a/spinnaker/api/stage.go b/spinnaker/api/stage.go new file mode 100644 index 0000000..be912a9 --- /dev/null +++ b/spinnaker/api/stage.go @@ -0,0 +1,180 @@ +package api + +type ManualJudgment struct { + JudgmentInputs []struct { + Value string `json:"value"` + } `json:"judgmentInputs,omitempty"` + Instructions string `json:"instructions,omitempty"` +} + +type Notification struct { + Address string `json:"address,omitempty"` + Cc string `json:"cc,omitempty"` + Level string `json:"level,omitempty"` + Type string `json:"type,omitempty"` + Message struct { + StageCompleted struct { + Text string `json:",omitempty"` + } `json:"stage.completed,omitempty"` + StageFailed struct { + Text string `json:",omitempty"` + } `json:"stage.failed,omitempty"` + StageStarting struct { + Text string `json:",omitempty"` + } `json:"stage.starting,omitempty"` + } `json:"message,omitempty"` + When []string `json:"when,omitempty"` +} + +type RunJob struct { + Kubernetes `mapstructure:",squash"` +} + +type Kubernetes struct { + Annotations map[string]string `json:"annotations,omitempty"` + Namespace string `json:"namespace,omitempty"` + + Containers []struct { + Args []string `json:"args,omitempty"` + Command []string `json:"command,omitempty"` + EnvVars []struct { + Name string `json:"name"` + Value string `json:"value"` + } `json:"envVars,omitempty"` + ImageDescription struct { + Account string `json:"account,omitempty"` + ImageID string `json:"imageId,omitempty" mapstructure:"id"` + Registry string `json:"registry,omitempty"` + Repository string `json:"repository,omitempty"` + Tag string `json:"tag,omitempty"` + } `json:"imageDescription,omitempty" mapstructure:"image"` + ImagePullPolicy string `json:"imagePullPolicy,omitempty" mapstructure:"image_pull_policy"` + Name string `json:"name,omitempty"` + Ports []struct { + ContainerPort int `json:"containerPort,omitempty" mapstructure:"container"` + HostPort int `json:"hostPort,omitempty" mapstructure:"host"` + HostIP string `json:"hostIp,omitempty" mapstructure:"hostip"` + Name string `json:"name,omitempty"` + Protocol string `json:"protocol,omitempty"` + } `json:"ports,omitempty"` + Limits struct { + CPU string `json:",omitempty"` + Memory string `json:",omitempty"` + } `json:"limits,omitempty"` + Volumes []struct { + MountPath string `json:"mountPath,omitempty" mapstructure:"mount_path"` + Name string `json:"name,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" mapstructure:"read_only"` + SubPath string `json:"subPath,omitempty" mapstructure:"sub_path"` + } `json:"volumeMounts,omitempty"` + } `json:"containers,omitempty" mapstructure:"container"` + + NodeSelector map[string]string `json:"nodeSelector,omitempty" mapstructure:"node_selector"` + ServiceAccountName string `json:"serviceAccountName,omitempty" mapstructure:"service_account_name"` + DNSPolicy string `json:"dnsPolicy,omitempty" mapstructure:"dns_policy"` + Labels map[string]string `json:"labels,omitempty"` +} + +type CheckPrecondition struct { + Preconditions []struct { + CloudProvider string `json:"cloudProvider,omitempty" mapstructure:"cloud_provider"` + Context struct { + Cluster string `json:"cluster,omitempty"` + Comparison string `json:"comparison,omitempty"` + Credentials string `json:"credentials,omitempty"` + Expected int `json:"expected,omitempty"` + Regions []string `json:"regions,omitempty"` + Expression string `json:"expression,omitempty"` + } `json:"context,omitempty"` + FailPipeline bool `json:"failPipeline" mapstructure:"fail_pipeline"` + Type string `json:"type"` + } `json:"preconditions,omitempty" mapstructure:"precondition"` +} + +type RunPipeline struct { + Application string `json:"application,omitempty"` + Pipeline string `json:"pipeline,omitempty"` + PipelineParameters map[string]string `json:"pipelineParameters,omitempty" mapstructure:"pipeline_parameters"` +} + +type EvaluateVariables struct { + Variables []struct { + Key string `json:"key"` + Value string `json:"value"` + } `json:"variables,omitempty" mapstructure:"-"` +} + +type Wait struct { + SkipText string `json:"skipWaitText,omitempty" mapstructure:"skip_text"` + WaitTime int `json:"waitTime,omitempty" mapstructure:"wait_time"` +} + +type DeployManifest struct { + Moniker map[string]string `json:"moniker,omitempty"` + SkipExpressionEvaluation bool `json:"skipExpressionEvaluation,omitempty" mapstructure:"skip_expression_evaluation"` + Manifests []map[string]interface{} `json:"manifests,omitempty" mapstructure:"-"` +} + +type RunJobManifest struct { + Alias string `json:"alias,omitempty" mapstructure:"-"` + ConsumeArtifactSource string `json:"consumeArtifactSource,omitempty" mapstructure:"consume_artifact_source"` + PropertyFile string `json:"propertyFile,omitempty" mapstructure:"property_file"` + Manifest map[string]interface{} `json:"manifest,omitempty" mapstructure:"-"` +} + +type PatchManifest struct { + App string `json:"app,omitempty" mapstructure:"-"` + Location string `json:"location,omitempty"` + ManifestName string `json:"manifestName,omitempty" mapstructure:"-"` + Mode string `json:"mode,omitempty"` + Options struct { + MergeStrategy string `json:"mergeStrategy,omitempty"` + Record bool `json:"record,omitempty"` + } `json:"options,omitempty"` + PatchBody map[string]interface{} `json:"patchBody,omitempty" mapstructure:"-"` +} +type KubernetesManifest struct { + Source string `json:"source,omitempty"` + ManifestArtifactAccount string `json:"manifestArtifactAccount,omitempty" mapstructure:"manifest_artifact_account"` + + RunJobManifest `mapstructure:",squash"` + DeployManifest `mapstructure:",squash"` + PatchManifest `mapstructure:",squash"` +} + +type Stage struct { + // shared properties among all the stage types + Account string `json:"account,omitempty"` + CloudProvider string `json:"cloudProvider,omitempty" mapstructure:"cloud_provider"` + CloudProviderType string `json:"cloudProviderType,omitempty" mapstructure:"cloud_provider_type"` + CompleteOtherBranchesThenFail *bool `json:"completeOtherBranchesThenFail,omitempty" mapstructure:"complete_other_branches_then_fail"` + ContinuePipeline *bool `json:"continuePipeline,omitempty" mapstructure:"continue_pipeline"` + FailPipeline *bool `json:"failPipeline,omitempty" mapstructure:"fail_pipeline"` + FailOnFailedExpressions *bool `json:"failOnFailedExpressions,omitempty" mapstructure:"fail_on_failed_expression"` + StageEnabled *StageEnabled `json:"stageEnabled,omitempty"` + DeferredInitialization bool `json:"deferredInitialization,omitempty" mapstructure:"deferred_initialization"` + Name string `json:"name,omitempty"` + RefID string `json:"refId,omitempty" mapstructure:"id"` + RequisiteStageRefIds []string `json:"requisiteStageRefIds,omitempty" mapstructure:"depends_on"` + Type string `json:"type,omitempty"` + WaitForCompletion bool `json:"waitForCompletion" mapstructure:"wait_for_completion"` + Timeout int `json:"stageTimeoutMs,omitempty" mapstructure:"timeout"` + + // Notifications and SendNotifcations is shared between: + // runPipeline and manualJudgment + Notifications []*Notification `json:"notification,omitempty" mapstructure:"notification"` + SendNotification bool `json:"sendNotifications"` + + // embedded structs/stage types + ManualJudgment `json:",omitempty" mapstructure:",squash"` + RunJob `json:",omitempty" mapstructure:",squash"` + CheckPrecondition `json:",omitempty" mapstructure:",squash"` + RunPipeline `json:",omitempty" mapstructure:",squash"` + EvaluateVariables `json:",omitempty" mapstructure:",squash"` + Wait `json:",omitempty" mapstructure:",squash"` + KubernetesManifest `json:",omitempty" mapstructure:",squash"` +} +type StageEnabled struct { + Expression string `json:"expression,omitempty"` + Type string `json:"type,omitempty"` +} From f52adcb7873c6ad2e6a30d6b1b397ea9335c6cfc Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Tue, 23 Jul 2019 15:08:54 -0700 Subject: [PATCH 07/22] add new spinnaker_pipelines data source --- spinnaker/api/pipeline.go | 27 ++++++++++++++ spinnaker/datasource_pipelines.go | 59 +++++++++++++++++++++++++++++++ spinnaker/provider.go | 1 + 3 files changed, 87 insertions(+) create mode 100644 spinnaker/datasource_pipelines.go diff --git a/spinnaker/api/pipeline.go b/spinnaker/api/pipeline.go index 636adb8..d95f887 100644 --- a/spinnaker/api/pipeline.go +++ b/spinnaker/api/pipeline.go @@ -97,6 +97,33 @@ func GetPipeline(client *gate.GatewayClient, applicationName, pipelineName strin return jsonMap, nil } +func GetPipelines(client *gate.GatewayClient, applicationName string, dest interface{}) ([]interface{}, error) { + data, resp, err := client.ApplicationControllerApi.GetPipelineConfigsForApplicationUsingGET(client.Context, applicationName) + + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return data, fmt.Errorf("%s", ErrCodeNoSuchEntityException) + } + return data, fmt.Errorf("Encountered an error getting pipelines for application %s, %s\n", applicationName, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return data, fmt.Errorf("Encountered an error getting pipelines in application %s, status code: %d\n", + applicationName, + resp.StatusCode) + } + + if data == nil { + return data, fmt.Errorf(ErrCodeNoSuchEntityException) + } + + if err := mapstructure.Decode(data, dest); err != nil { + return data, err + } + + return data, nil +} + func UpdatePipeline(client *gate.GatewayClient, pipelineID string, pipeline interface{}) error { _, resp, err := client.PipelineControllerApi.UpdatePipelineUsingPUT(client.Context, pipelineID, pipeline) diff --git a/spinnaker/datasource_pipelines.go b/spinnaker/datasource_pipelines.go new file mode 100644 index 0000000..c232bf7 --- /dev/null +++ b/spinnaker/datasource_pipelines.go @@ -0,0 +1,59 @@ +package spinnaker + +import ( + "github.com/armory-io/terraform-provider-spinnaker/spinnaker/api" + "github.com/hashicorp/terraform/helper/schema" +) + +func datasourcePipelines() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "application": { + Type: schema.TypeString, + Required: true, + }, + "pipelines": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + Read: datasourcePipelinesRead, + } +} + +func datasourcePipelinesRead(d *schema.ResourceData, meta interface{}) error { + clientConfig := meta.(gateConfig) + client := clientConfig.client + applicationName := d.Get("application").(string) + + var pipelines []map[string]interface{} + + data, err := api.GetPipelines(client, applicationName, &[]pipelineRead{}) + if err != nil { + return err + } + + for _, pipeline := range data { + pipelines = append(pipelines, map[string]interface{}{ + "name": pipeline.(map[string]interface{})["name"].(string), + "id": pipeline.(map[string]interface{})["id"].(string), + }) + } + + d.SetId(applicationName) + d.Set("pipelines", pipelines) + + return nil +} diff --git a/spinnaker/provider.go b/spinnaker/provider.go index 9705f3b..334d9b8 100644 --- a/spinnaker/provider.go +++ b/spinnaker/provider.go @@ -42,6 +42,7 @@ func Provider() *schema.Provider { }, DataSourcesMap: map[string]*schema.Resource{ "spinnaker_pipeline": datasourcePipeline(), + "spinnaker_pipelines": datasourcePipelines(), "spinnaker_pipeline_document": datasourcePipelineDocument(), }, ConfigureFunc: providerConfigureFunc, From ce66173047b54823d36e90db212b55fee48683f3 Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Tue, 23 Jul 2019 15:10:52 -0700 Subject: [PATCH 08/22] override stage by id, not name --- spinnaker/api/helper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spinnaker/api/helper.go b/spinnaker/api/helper.go index 477cf01..51c8e6a 100644 --- a/spinnaker/api/helper.go +++ b/spinnaker/api/helper.go @@ -40,7 +40,7 @@ func (oldDoc *PipelineDocument) Merge(newDoc *PipelineDocument) { for _, newStage := range newDoc.Stages { found := false for idx, oldStage := range oldDoc.Stages { - if oldStage.Name == newStage.Name { + if oldStage.RefID == newStage.RefID { oldDoc.Stages[idx] = newStage found = true continue From d121baa4c8026e53750b3290dfeaeb0f35b93e45 Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Tue, 23 Jul 2019 15:22:05 -0700 Subject: [PATCH 09/22] add (runJob,deploy,patch)Manifest stage types The commint introduces 3 new stage types related to the kubernetes v2 provider. Extends already existing kubernetes v1 with the missing fields to get aligned with the JSON fields. Additionally it organize each of the new as well as the already existing stages into switch/case blocks. --- spinnaker/datasource_pipeline_document.go | 413 ++++++++++++++++++++-- 1 file changed, 374 insertions(+), 39 deletions(-) diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index b35dc25..b1f1b7c 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -2,10 +2,12 @@ package spinnaker import ( "encoding/json" + "fmt" "sort" "strconv" "github.com/armory-io/terraform-provider-spinnaker/spinnaker/api" + "github.com/ghodss/yaml" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/mapstructure" @@ -52,7 +54,7 @@ func datasourcePipelineDocument() *schema.Resource { Optional: true, }, "parameter": { - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -92,6 +94,10 @@ func datasourcePipelineDocument() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "timeout": { + Type: schema.TypeInt, + Optional: true, + }, "account": { Type: schema.TypeString, Optional: true, @@ -108,10 +114,6 @@ func datasourcePipelineDocument() *schema.Resource { Type: schema.TypeMap, Optional: true, }, - "clusters": { - Type: schema.TypeString, - Optional: true, - }, "complete_other_branches_then_fail": { Type: schema.TypeBool, Optional: true, @@ -123,6 +125,7 @@ func datasourcePipelineDocument() *schema.Resource { "fail_pipeline": { Type: schema.TypeBool, Optional: true, + Default: true, }, "fail_on_failed_expression": { Type: schema.TypeBool, @@ -268,6 +271,14 @@ func datasourcePipelineDocument() *schema.Resource { Type: schema.TypeInt, Optional: true, }, + "host": { + Type: schema.TypeInt, + Optional: true, + }, + "hostip": { + Type: schema.TypeString, + Optional: true, + }, "name": { Type: schema.TypeString, Optional: true, @@ -279,9 +290,61 @@ func datasourcePipelineDocument() *schema.Resource { }, }, }, + "limits": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeString, + Optional: true, + }, + "memory": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_path": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "read_only": { + Type: schema.TypeBool, + Optional: true, + }, + "sub_path": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, }, }, }, + "node_selector": { + Type: schema.TypeMap, + Optional: true, + }, + "service_account_name": { + Type: schema.TypeString, + Optional: true, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + }, "deferred_initialization": { Type: schema.TypeBool, Optional: true, @@ -297,14 +360,15 @@ func datasourcePipelineDocument() *schema.Resource { "namespace": { Type: schema.TypeString, Optional: true, + Default: "default", }, - "ref_id": { + "id": { Type: schema.TypeString, Optional: true, }, "type": { Type: schema.TypeString, - Optional: true, + Required: true, }, "wait_for_completion": { Type: schema.TypeBool, @@ -322,7 +386,7 @@ func datasourcePipelineDocument() *schema.Resource { Type: schema.TypeString, }, }, - "requisite_stage_refids": { + "depends_on": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ @@ -333,13 +397,135 @@ func datasourcePipelineDocument() *schema.Resource { Type: schema.TypeInt, Optional: true, }, - "status_url_resolution": { + "skip_text": { Type: schema.TypeString, Optional: true, }, "stage_enabled": { Type: schema.TypeMap, Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "notification": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Optional: true, + }, + "cc": { + Type: schema.TypeString, + Optional: true, + }, + "level": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + }, + "when": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "message": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "stage_completed": { + Type: schema.TypeString, + Optional: true, + }, + "stage_failed": { + Type: schema.TypeString, + Optional: true, + }, + "stage_starting": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "manifest_artifact_account": { + Type: schema.TypeString, + Optional: true, + }, + "moniker": { + Type: schema.TypeMap, + Optional: true, + }, + "source": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + if val != "text" && val != "artifact" { + errs = append(errs, fmt.Errorf("%q must be set to either `text` or `artifact`, got: %s", key, v)) + } + return + }, + }, + "skip_expression_evaluation": { + Type: schema.TypeBool, + Optional: true, + }, + "consume_artifact_source": { + Type: schema.TypeString, + Optional: true, + }, + "property_file": { + Type: schema.TypeString, + Optional: true, + }, + "manifest": { + Type: schema.TypeString, + Optional: true, + }, + "location": { + Type: schema.TypeString, + Optional: true, + }, + "mode": { + Type: schema.TypeString, + Optional: true, + }, + "options": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "merge_strategy": { + Type: schema.TypeString, + Optional: true, + }, + "record": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "patch_body": { + Type: schema.TypeString, + Optional: true, }, }, }, @@ -416,12 +602,11 @@ func datasourcePipelineDocumentRead(d *schema.ResourceData, meta interface{}) er return nil } -// parametersDecodeDocument iterates over each parameter. The schema for the parameters -// is being set to TypeSet, which means in that case order does not matter. +// parametersDecodeDocument iterates over each parameter. // The parameter "hasOptions" is assumed based on the fact if the "options" are being // populated or not func parametersDecodeDocument(parameters interface{}) []*api.PipelineParameter { - var selParams = parameters.(*schema.Set).List() + var selParams = parameters.([]interface{}) //(*schema.Set).List() params := make([]*api.PipelineParameter, len(selParams)) for i, param := range selParams { @@ -455,17 +640,28 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { stageField := stg.(map[string]interface{}) sg := &api.Stage{} + // most of the schema should be decoded here err := mapstructure.Decode(stageField, &sg) if err != nil { return nil, err } - // extract env variables if any - extractEnvs(stageField["container"].([]interface{}), sg) - - // extract variables - if vars, ok := stageField["variables"]; ok { - for key, value := range vars.(map[string]interface{}) { + // Some of the stage fields are only related to the specific stage types + switch stageField["type"].(string) { + case "runJob": + if err := ValidateFields("runJob", []string{"cloud_provider_type", "account"}, stageField); err != nil { + return nil, err + } + if stageField["cloud_provider_type"].(string) == "kubernetes" { + extractEnvs(stageField["container"].([]interface{}), sg) + } else { + return nil, fmt.Errorf("runJob: cloudProviderType = %s not supported at this time", stageField["cloud_provider_type"].(string)) + } + case "evaluateVariables": + if err := ValidateFields("evaluateVariables", []string{"variables"}, stageField); err != nil { + return nil, err + } + for key, value := range stageField["variables"].(map[string]interface{}) { sg.Variables = append(sg.Variables, struct { Key string `json:"key"` Value string `json:"value"` @@ -473,39 +669,147 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { Key: key, Value: value.(string), }) + // Prevent shifts of the maps within the slice. Suppress unnecessary diffs on the data source + sort.Slice(sg.Variables, + func(i, j int) bool { + return sg.Variables[i].Key < sg.Variables[j].Key + }) } - } - - // evaluate if judgment manual is set, map string values to map - if judgmentInputs, ok := stageField["judgment_inputs"]; ok { - for _, inpt := range judgmentInputs.([]interface{}) { + case "checkPreconditions": + if err := ValidateFields("checkPreconditions", []string{"precondition"}, stageField); err != nil { + return nil, err + } + case "manualJudgment": + if err := ValidateFields("manualJudgment", []string{"judgment_inputs"}, stageField); err != nil { + return nil, err + } + for _, inpt := range stageField["judgment_inputs"].([]interface{}) { sg.JudgmentInputs = append(sg.JudgmentInputs, struct { Value string `json:"value"` }{ Value: inpt.(string), }) } + case "runJobManifest": + // handle runJobManifest stage type + if err := ValidateFields("runJobManifest", []string{"manifest", "account", "source"}, stageField); err != nil { + return nil, err + } + manifestJSON, err := yaml.YAMLToJSON([]byte(stageField["manifest"].(string))) + if err != nil { + return nil, err + } + err = json.Unmarshal(manifestJSON, &sg.Manifest) + if err != nil { + return nil, err + } + + // default to CloudProvider if field is not defined + if _, defined := stageField["manifest_artifact_account"]; !defined { + if cloudProvider, ok := stageField["cloud_provider"]; ok { + sg.ManifestArtifactAccount = cloudProvider.(string) + } + } + + // the json `alias` is always being defaulted to runJob + // setting it here for the consistency + sg.Alias = "runJob" + case "deployManifest": + if err := ValidateFields("deployManifest", []string{"manifest", "account", "source"}, stageField); err != nil { + return nil, err + } + manifestJSON, err := yaml.YAMLToJSON([]byte(stageField["manifest"].(string))) + if err != nil { + return nil, err + } + manifestMap := make(map[string]interface{}) + err = json.Unmarshal(manifestJSON, &manifestMap) + if err != nil { + return nil, err + } + + if manifestMap != nil { + sg.Manifests = append(sg.Manifests, manifestMap) + } + + // default to CloudProvider if field is not defined + if _, defined := stageField["manifest_artifact_account"]; !defined { + if cloudProvider, ok := stageField["cloud_provider"]; ok { + sg.ManifestArtifactAccount = cloudProvider.(string) + } + } + case "patchManifest": + if err := ValidateFields("patchManifest", []string{"location", "mode", "patch_body"}, stageField); err != nil { + return nil, err + } + manifestJSON, err := yaml.YAMLToJSON([]byte(stageField["patch_body"].(string))) + if err != nil { + return nil, err + } + err = json.Unmarshal(manifestJSON, &sg.PatchBody) + if err != nil { + return nil, err + } + + // default to CloudProvider if field is not defined + if _, defined := stageField["manifest_artifact_account"]; !defined { + if cloudProvider, ok := stageField["cloud_provider"]; ok { + sg.ManifestArtifactAccount = cloudProvider.(string) + } + } + case "pipeline": + if err := ValidateFields("pipeline", []string{"pipeline"}, stageField); err != nil { + return nil, err + } + case "wait": + if err := ValidateFields("wait", []string{"wait_time"}, stageField); err != nil { + return nil, err + } + } + + // map notifications stages from simplified resource schema to struct + if notifications, ok := stageField["notification"]; ok { + for i, notification := range notifications.([]interface{}) { + message := notification.(map[string]interface{})["message"] + + if stageCompleted, ok := message.(map[string]interface{})["stage_completed"]; ok { + sg.Notifications[i].Message.StageCompleted.Text = stageCompleted.(string) + } + if stageFailed, ok := message.(map[string]interface{})["stage_failed"]; ok { + sg.Notifications[i].Message.StageFailed.Text = stageFailed.(string) + } + if stageStarting, ok := message.(map[string]interface{})["stage_starting"]; ok { + sg.Notifications[i].Message.StageStarting.Text = stageStarting.(string) + } + } + if len(notifications.([]interface{})) > 0 { + sg.SendNotification = true + } } - // stage_enabled is being populated in pipeline's json only if - // the Execution Optional within the job definition is set to "Conditional on Expression" - // The only accepted type in spinnaker at this moment is "expression" - // Instead of accepting: - // { - // expression: "", - // type: "expression" - // } - // accept: - // expression: "VALUE" - // and assume the type - if stageEnabled, ok := stageField["stage_enabled"]; ok { - // most likely will be iterated only once - for key, value := range stageEnabled.(map[string]interface{}) { - sg.StageEnabled.Expression = value.(string) - sg.StageEnabled.Type = key + // Execution Options - by default failPipeline is set to true. + // to simplify the hcl logic - + // set it to false if either continuePipeline or completeOtherBranchesThenFail is set to true + if stageField["continue_pipeline"].(bool) || stageField["complete_other_branches_then_fail"].(bool) { + *sg.FailPipeline = false + } + + // Execution Options + // If "Conditional Execution" is being set. Set the Type to it's default value = "expression" + // and extract the expression + if stageEnabled, ok := stageField["stage_enabled"].(map[string]interface{}); ok { + if expression, ok := stageEnabled["expression"].(string); ok { + sg.StageEnabled = &api.StageEnabled{ + Expression: expression, + Type: "expression", + } } } + // Since id/RefID is optional field, "calculate" the value if not provided + if sg.RefID == "" { + sg.RefID = strconv.Itoa(i + 1) + } stgs[i] = sg } return stgs, nil @@ -554,3 +858,34 @@ func extractEnvs(fields []interface{}, sg *api.Stage) { func Bool(v bool) *bool { return &v } + +// ValidateFields checks if all the required fields for specific stage type have been set +func ValidateFields(stageType string, v []string, array map[string]interface{}) error { + var missing []string + + for _, field := range v { + switch value := array[field].(type) { + case string: + if value == "" { + missing = append(missing, field) + } + case []string: + if len(value) == 0 { + missing = append(missing, field) + } + case int: + if value == 0 { + missing = append(missing, field) + } + default: + if value == nil { + missing = append(missing, field) + } + } + + for _, field = range missing { + return fmt.Errorf("stage type '%s': required field `%s` is missing", stageType, field) + } + } + return nil +} From cf85d7c96be1474822865c7c7d7a3d5e91b5c715 Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Tue, 23 Jul 2019 15:25:54 -0700 Subject: [PATCH 10/22] get spinnaker_pipeline_document tests aligned with the data source --- .../datasource_pipeline_document_test.go | 107 +++++++++++------- 1 file changed, 69 insertions(+), 38 deletions(-) diff --git a/spinnaker/datasource_pipeline_document_test.go b/spinnaker/datasource_pipeline_document_test.go index 8852c9a..62aed92 100644 --- a/spinnaker/datasource_pipeline_document_test.go +++ b/spinnaker/datasource_pipeline_document_test.go @@ -2,11 +2,13 @@ package spinnaker import ( "fmt" + "log" "reflect" "strings" "testing" "github.com/armory-io/terraform-provider-spinnaker/spinnaker/api" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform/helper/schema" ) @@ -28,58 +30,84 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { GetFieldTagsFromStruct("", api.PipelineDocument{}, "mapstructure", tags) // some of the fields have different format. The reason for that is to simplify - // terraform definition of the reasurce. Because of that schema.Resource does not match + // terraform definition of the resource. Because of that schema.Resource does not match // 1:1 the json format. There is a transformation performed in: // parametersDecodeDocument() and stageDecodeDocument(). Other fields are just computed // and not present in the either struct or Schema skipFields := []string{ - ".pipeline", ".pipeline.json", ".pipeline.parameter.hasoptions", ".pipeline.stage.container.env", - ".pipeline.stage.judgment_inputs", ".pipeline.stage.container.image", ".pipeline.stage.stage_enabled", - ".pipeline.stage.variables", ".pipeline.stage.container.envvars", "pipeline.stage.container.envvars.name", - ".pipeline.stage.container.envvars.value", ".pipeline.stage.container.image", ".pipeline.stage.judgmentinputs", - ".pipeline.stage.judgmentinputs.value", ".pipeline.stage.stageenabled", ".pipeline.stage.stageenabled.expression", - ".pipeline.stage.stageenabled.type", ".pipeline.stage.variables", ".pipeline.stage.variables.key", - ".pipeline.stage.variables.value", ".pipeline.source_json", ".pipeline.override_json", ".pipeline.stage.container.envvars.name", - ".pipeline.limit_concurrent", ".pipeline.parallel", ".pipeline.stage.deferred_initialization", ".pipeline.wait", - ".pipeline.stage.precondition.context", ".pipeline.parameter.options.value", + ".pipeline", ".pipeline.config", ".pipeline.stage", ".pipeline.json", ".pipeline.override_json", ".pipeline.parameter.hasoptions", + ".pipeline.parameter.options.value", ".pipeline.source_json", ".pipeline.stage.alias", ".pipeline.stage.app", ".pipeline.stage.container.env", + ".pipeline.stage.container.envvars", ".pipeline.stage.container.envvars.name", ".pipeline.stage.container.envvars.value", ".pipeline.stage.judgment_inputs", + ".pipeline.stage.judgmentinputs", ".pipeline.stage.judgmentinputs.value", ".pipeline.stage.manifestname", ".pipeline.stage.manifests", + ".pipeline.stage.notification.message.stage_completed", ".pipeline.stage.notification.message.stage_failed", ".pipeline.stage.notification.message.stage_starting", + ".pipeline.stage.notification.message.stagecompleted", ".pipeline.stage.notification.message.stagecompleted.text", ".pipeline.stage.notification.message.stagefailed", + ".pipeline.stage.notification.message.stagefailed.text", ".pipeline.stage.notification.message.stagestarting", ".pipeline.stage.notification.message.stagestarting.text", + ".pipeline.stage.options.merge_strategy", ".pipeline.stage.options.mergestrategy", ".pipeline.stage.patch_body", ".pipeline.stage.patchbody", ".pipeline.stage.sendnotification", + ".pipeline.stage.stage_enabled", ".pipeline.stage.stage_enabled.expression", ".pipeline.stage.stageenabled", ".pipeline.stage.variables.key", ".pipeline.stage.variables.value", } - // transofrm some of the fields to make comparision more accurate. + // // transofrm some of the fields to make comparision more accurate. schemas[".config"] = schemas[".pipeline.config"] - delete(schemas, ".pipeline.config") // some of the fields have different type. In struct representation // they will have more likely `struct` type, in schema either map or slice, // more rare bool ==> ptr mapping - assertEqual(t, schemas[".pipeline.stage.container.env"], "map") - assertEqual(t, schemas[".pipeline.stage.judgment_inputs"], "slice") - assertEqual(t, schemas[".pipeline.stage.container.image"], "map") - assertEqual(t, schemas[".pipeline.stage.stage_enabled"], "map") - assertEqual(t, schemas[".pipeline.stage.variables"], "map") + assertEqual(t, tags[".pipeline.limit_concurrent"], "ptr") assertEqual(t, schemas[".pipeline.limit_concurrent"], "bool") + skipFields = append(skipFields, ".pipeline.limit_concurrent") + + assertEqual(t, tags[".pipeline.parallel"], "ptr") assertEqual(t, schemas[".pipeline.parallel"], "bool") - assertEqual(t, schemas[".pipeline.stage.deferred_initialization"], "bool") - assertEqual(t, schemas[".pipeline.wait"], "bool") - assertEqual(t, schemas[".pipeline.stage.precondition.context"], "map") + skipFields = append(skipFields, ".pipeline.parallel") + + assertEqual(t, tags[".pipeline.stage.complete_other_branches_then_fail"], "ptr") + assertEqual(t, schemas[".pipeline.stage.complete_other_branches_then_fail"], "bool") + skipFields = append(skipFields, ".pipeline.stage.complete_other_branches_then_fail") - assertEqual(t, tags[".pipeline.stage.container.envvars"], "slice") - assertEqual(t, tags[".pipeline.stage.container.envvars.name"], "string") - assertEqual(t, tags[".pipeline.stage.container.envvars.value"], "string") assertEqual(t, tags[".pipeline.stage.container.image"], "struct") - assertEqual(t, tags[".pipeline.stage.judgmentinputs"], "slice") - assertEqual(t, tags[".pipeline.stage.judgmentinputs.value"], "string") - assertEqual(t, tags[".pipeline.stage.stageenabled"], "struct") - assertEqual(t, tags[".pipeline.stage.stageenabled.expression"], "string") - assertEqual(t, tags[".pipeline.stage.stageenabled.type"], "string") + assertEqual(t, schemas[".pipeline.stage.container.image"], "map") + skipFields = append(skipFields, ".pipeline.stage.container.image") + + assertEqual(t, tags[".pipeline.stage.container.limits"], "struct") + assertEqual(t, schemas[".pipeline.stage.container.limits"], "map") + skipFields = append(skipFields, ".pipeline.stage.container.limits") + + assertEqual(t, tags[".pipeline.stage.continue_pipeline"], "ptr") + assertEqual(t, schemas[".pipeline.stage.continue_pipeline"], "bool") + skipFields = append(skipFields, ".pipeline.stage.continue_pipeline") + + assertEqual(t, tags[".pipeline.stage.fail_on_failed_expression"], "ptr") + assertEqual(t, schemas[".pipeline.stage.fail_on_failed_expression"], "bool") + skipFields = append(skipFields, ".pipeline.stage.fail_on_failed_expression") + + assertEqual(t, tags[".pipeline.stage.fail_pipeline"], "ptr") + assertEqual(t, schemas[".pipeline.stage.fail_pipeline"], "bool") + skipFields = append(skipFields, ".pipeline.stage.fail_pipeline") + + assertEqual(t, tags[".pipeline.stage.manifest"], "map") + assertEqual(t, schemas[".pipeline.stage.manifest"], "string") + skipFields = append(skipFields, ".pipeline.stage.manifest") + + assertEqual(t, tags[".pipeline.stage.notification.message"], "struct") + assertEqual(t, schemas[".pipeline.stage.notification.message"], "map") + skipFields = append(skipFields, ".pipeline.stage.notification.message") + + assertEqual(t, tags[".pipeline.stage.options"], "struct") + assertEqual(t, schemas[".pipeline.stage.options"], "map") + skipFields = append(skipFields, ".pipeline.stage.options") + + assertEqual(t, tags[".pipeline.stage.precondition.context"], "struct") + assertEqual(t, schemas[".pipeline.stage.precondition.context"], "map") + skipFields = append(skipFields, ".pipeline.stage.precondition.context") + assertEqual(t, tags[".pipeline.stage.variables"], "slice") - assertEqual(t, tags[".pipeline.stage.variables.key"], "string") - assertEqual(t, tags[".pipeline.stage.variables.value"], "string") - assertEqual(t, tags[".pipeline.parameter.options.value"], "string") - assertEqual(t, tags[".pipeline.limit_concurrent"], "ptr") - assertEqual(t, tags[".pipeline.parallel"], "ptr") - assertEqual(t, tags[".pipeline.stage.deferred_initialization"], "ptr") + assertEqual(t, schemas[".pipeline.stage.variables"], "map") + skipFields = append(skipFields, ".pipeline.stage.variables") + assertEqual(t, tags[".pipeline.wait"], "ptr") - assertEqual(t, tags[".pipeline.stage.precondition.context"], "struct") + assertEqual(t, schemas[".pipeline.wait"], "bool") + skipFields = append(skipFields, ".pipeline.wait") + // cleanup different values and make assertion for _, skip := range skipFields { delete(schemas, skip) @@ -88,6 +116,8 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { // Final assertion if !reflect.DeepEqual(tags, schemas) { + // For deeper investigation uncomment line below + log.Println(cmp.Diff(tags, schemas)) t.Fatal("PipelineDocument struct and data_source_spinnaker_pipeline_document do not match!") } } @@ -123,7 +153,7 @@ func GetFieldTagsFromStruct(prefix string, source interface{}, tagName string, t var key string if tag := field.Tag.Get(tagName); tag != "" { // Some values are skipped by mapstructure since they have different type in Schema.Resource - // and in the struct. Most likely those are embeded structs with Key/Value fields. + // and in the struct. Most likely those are embedded structs with Key/Value fields. // the reason for keeping that that way is to simplify the terraform definition and the same time // keep compatibility with the json output generated for spinnaker // affected fields are tagged with `mapstructure:"-"` @@ -132,12 +162,13 @@ func GetFieldTagsFromStruct(prefix string, source interface{}, tagName string, t } key = fmt.Sprintf("%s.%s", prefix, tag) - tags[key] = field.Type.Kind().String() } else { key = fmt.Sprintf("%s.%s", prefix, strings.ToLower(field.Name)) - tags[key] = field.Type.Kind().String() } + // get rid of `squash` tag + tags[strings.Replace(key, ".,squash", "", -1)] = field.Type.Kind().String() + switch field.Type.Kind() { case reflect.Struct: GetFieldTagsFromStruct(key, sourceValue.Field(i).Interface(), tagName, tags) From 58a6cbf8bf50d58df0e42107a5803da003af054f Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Tue, 23 Jul 2019 15:26:19 -0700 Subject: [PATCH 11/22] Add initial documentation for the spinnaker_pipeline_document --- README.md | 209 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 209 insertions(+) diff --git a/README.md b/README.md index d4dbba8..2fb4b0c 100644 --- a/README.md +++ b/README.md @@ -97,6 +97,9 @@ resource "spinnaker_pipeline" "terraform_example" { * `name` - Pipeline name * `pipeline` - Pipeline JSON in string format, example `file(pipelines/example.json)` + +## Data source + ### `spinnaker_pipeline_template` #### Example Usage @@ -132,3 +135,209 @@ resource "spinnaker_pipeline_template_config" "terraform_example" { #### Argument Reference * `pipeline_config` - A yaml formated [DCD Spec pipeline configuration](https://github.com/spinnaker/dcd-spec/blob/master/PIPELINE_TEMPLATES.md#configurations) + + +### `spinnaker_pipeline_document` + +#### Example Usage + +``` +data "spinnaker_pipeline_document" "parameters" { + parameter { + description = "The description of the parameter" + default = "default value" + name = "PARAMETER1" + required = true + } + + parameter { + name = "ENVIRONMENT" + required = false + options = [ + "prod", + "preprod", + ] + } +} + +data "spinnaker_pipeline_document" "doc" { + description = "demonstrate pipeline document" + wait = true + limit_concurrent = false + + // source parameters + source_json = "${data.spinnaker_pipeline_document.parameters.json}" + + parameter { + name = "ANOTHER_PARAMETER" + } + + # runJob -> kubernetes + stage { + name = "stage name" + namespace = "namespace-name" + account = "k8s-account" + cloud_provider = "kubernetes" + cloud_provider_type = "kubernetes" + + container { + name = "container-name" + image_pull_policy = "ALWAYS" + + args = [ + "argument", + ] + + "command" = [ + "/opt/bin/app.sh", + ] + + env { + WORKSPACE = "$${parameters["ENVIRONMENT"]}" + HOST = "localhost" + } + + image { + account = "gcr" + id = "gcr.io/image:tag" + registry = "gcr.io" + repository = "image" + tag = "tag" + } + + ports { + container = 80 + name = "http" + protocol = "TCP" + } + } + + deferred_initialization = true + dns_policy = "ClusterFirst" + id = "1" + type = "runJob" + wait_for_completion = true + } + + # manualJudgment + stage { + name = "Manual Judgment" + fail_pipeline = true + instructions = "Apply?" + judgment_inputs = ["yes", "no"] + id = "2" + depends_on = ["6"] + + stage_enabled { + expression = "Spring Expression Language (SpEL) here" + } + + type = "manualJudgment" + } + + # wait + stage { + name = "exit" + id = "4" + depends_on = ["2"] + + stage_enabled { + expression = "$${ #judgment("Manual Judgment").equals("no")}" + } + + type = "wait" + wait_time = 1 + } + + # evaluateVariables + stage { + name = "Evaluate variables" + fail_on_failed_expression = true + id = 5 + depends_on = ["3"] + type = "evaluateVariables" + + variables { + VARIABLE_NAME = "SpEL here" + } + } + + # checkPreconditions + stage { + name = "Changes to apply" + id = 6 + depends_on = ["1"] + type = "checkPreconditions" + + precondition { + context { + expression = "SpEL here" + } + + fail_pipeline = false + type = "expression" + } + } + + # runJobManifest + stage { + application = "${var.app_name}" + name = "stage name" + namespace = "$${parameters["ENVIRONMENT"]}" + account = "$${parameters["CLOUD"] == "gcp" ? "${var.k8s_account_gcp}" : "${var.k8s_account_aws}"}" + cloud_provider = "kubernetes" + + consume_artifact_source = "propertyFile" + property_file = "terraform-apply" + + id = "4" + depends_on = ["3"] + type = "runJobManifest" + skip_expression_evaluation = false + source = "text" + wait_for_completion = true + + manifest = "${data.template_file.apply.rendered}" + + stage_enabled { + expression = "$${ #judgment("Manual Judgment").equals("yes")}" + } + } + + # deployManifest + stage { + name = "configure namespaces" + namespace = "default" + account = "aws" + + id = "1" + type = "deployManifest" + skip_expression_evaluation = false + source = "text" + wait_for_completion = true + + moniker { + app = "${spinnaker_application.app.application}" + } + + manifest = < Date: Thu, 1 Aug 2019 13:43:04 -0700 Subject: [PATCH 12/22] add findArtifactsFromResource stage type --- spinnaker/api/stage.go | 10 +++--- spinnaker/datasource_pipeline_document.go | 34 +++++++++++++++++-- .../datasource_pipeline_document_test.go | 1 + 3 files changed, 38 insertions(+), 7 deletions(-) diff --git a/spinnaker/api/stage.go b/spinnaker/api/stage.go index be912a9..f4c80ff 100644 --- a/spinnaker/api/stage.go +++ b/spinnaker/api/stage.go @@ -122,9 +122,9 @@ type RunJobManifest struct { Manifest map[string]interface{} `json:"manifest,omitempty" mapstructure:"-"` } -type PatchManifest struct { +type PatchFindArtifactsFromResourceManifest struct { App string `json:"app,omitempty" mapstructure:"-"` - Location string `json:"location,omitempty"` + Location string `json:"location,omitempty" mapstructure:"-"` ManifestName string `json:"manifestName,omitempty" mapstructure:"-"` Mode string `json:"mode,omitempty"` Options struct { @@ -137,9 +137,9 @@ type KubernetesManifest struct { Source string `json:"source,omitempty"` ManifestArtifactAccount string `json:"manifestArtifactAccount,omitempty" mapstructure:"manifest_artifact_account"` - RunJobManifest `mapstructure:",squash"` - DeployManifest `mapstructure:",squash"` - PatchManifest `mapstructure:",squash"` + RunJobManifest `mapstructure:",squash"` + DeployManifest `mapstructure:",squash"` + PatchFindArtifactsFromResourceManifest `mapstructure:",squash"` } type Stage struct { diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index b1f1b7c..f4ad8c7 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -499,7 +499,7 @@ func datasourcePipelineDocument() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "location": { + "kind": { Type: schema.TypeString, Optional: true, }, @@ -739,7 +739,7 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { } } case "patchManifest": - if err := ValidateFields("patchManifest", []string{"location", "mode", "patch_body"}, stageField); err != nil { + if err := ValidateFields("patchManifest", []string{"patch_body"}, stageField); err != nil { return nil, err } manifestJSON, err := yaml.YAMLToJSON([]byte(stageField["patch_body"].(string))) @@ -757,10 +757,40 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { sg.ManifestArtifactAccount = cloudProvider.(string) } } + + // default app: "" to the application name for the stage + sg.App = sg.Application + sg.Application = "" + // The only supported mode for the terraform invocation is static, so default to it. + // set source to "text" + sg.Mode = "static" + + // PatchManifest location in fact is a namespace, let's reuse + sg.Location = sg.Namespace + sg.Namespace = "" + + sg.ManifestName = fmt.Sprintf("%s %s", stageField["kind"].(string), stageField["manifest"]) + case "findArtifactsFromResource": + if err := ValidateFields("findArtifactsFromResource", []string{"manifest", "kind"}, stageField); err != nil { + return nil, err + } + // default app: "" to the application name for the stage + sg.App = sg.Application + sg.Application = "" + // The only supported mode for the terraform invocation is static, so default to it. + // set source to "text" + sg.Mode = "static" + + // PatchManifest location in fact is a namespace, let's reuse + sg.Location = sg.Namespace + sg.Namespace = "" + + sg.ManifestName = fmt.Sprintf("%s %s", stageField["kind"].(string), stageField["manifest"]) case "pipeline": if err := ValidateFields("pipeline", []string{"pipeline"}, stageField); err != nil { return nil, err } + case "wait": if err := ValidateFields("wait", []string{"wait_time"}, stageField); err != nil { return nil, err diff --git a/spinnaker/datasource_pipeline_document_test.go b/spinnaker/datasource_pipeline_document_test.go index 62aed92..a28498b 100644 --- a/spinnaker/datasource_pipeline_document_test.go +++ b/spinnaker/datasource_pipeline_document_test.go @@ -44,6 +44,7 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { ".pipeline.stage.notification.message.stagefailed.text", ".pipeline.stage.notification.message.stagestarting", ".pipeline.stage.notification.message.stagestarting.text", ".pipeline.stage.options.merge_strategy", ".pipeline.stage.options.mergestrategy", ".pipeline.stage.patch_body", ".pipeline.stage.patchbody", ".pipeline.stage.sendnotification", ".pipeline.stage.stage_enabled", ".pipeline.stage.stage_enabled.expression", ".pipeline.stage.stageenabled", ".pipeline.stage.variables.key", ".pipeline.stage.variables.value", + ".pipeline.stage.kind", ".pipeline.stage.location", } // // transofrm some of the fields to make comparision more accurate. From 8f9db170760d878e6f87c3fa7b21fc1f98fdf4b6 Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Fri, 2 Aug 2019 11:10:51 -0700 Subject: [PATCH 13/22] default pipeline parameter to empty string --- spinnaker/api/pipeline.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spinnaker/api/pipeline.go b/spinnaker/api/pipeline.go index d95f887..980c4bb 100644 --- a/spinnaker/api/pipeline.go +++ b/spinnaker/api/pipeline.go @@ -29,7 +29,7 @@ type PipelineDocument struct { type PipelineParameter struct { Description string `json:"description,omitempty"` - Default string `json:"default,omitempty"` + Default string `json:"default"` Name string `json:"name"` Required bool `json:"required"` HasOptions bool `json:"hasOptions"` From 25d9349a2209b933c3afa05ff14921ff66fb22a4 Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Mon, 19 Aug 2019 15:27:28 -0700 Subject: [PATCH 14/22] Add docs related to data.spinnaker_pipeline_document --- README.md | 205 +----------------- docs/data.spinnaker_pipeline_document.md | 97 +++++++++ .../checkPreconditions.md | 30 +++ .../deployManifest.md | 28 +++ .../evaluateVariables.md | 23 ++ .../findArtifactsFromResource.md | 26 +++ .../manualJudgment.md | 20 ++ .../patchManifest.md | 20 ++ docs/spinnaker_pipeline_document/pipeline.md | 22 ++ docs/spinnaker_pipeline_document/runJob.md | 78 +++++++ .../runJobManifest.md | 28 +++ docs/spinnaker_pipeline_document/wait.md | 15 ++ 12 files changed, 388 insertions(+), 204 deletions(-) create mode 100644 docs/data.spinnaker_pipeline_document.md create mode 100644 docs/spinnaker_pipeline_document/checkPreconditions.md create mode 100644 docs/spinnaker_pipeline_document/deployManifest.md create mode 100644 docs/spinnaker_pipeline_document/evaluateVariables.md create mode 100644 docs/spinnaker_pipeline_document/findArtifactsFromResource.md create mode 100644 docs/spinnaker_pipeline_document/manualJudgment.md create mode 100644 docs/spinnaker_pipeline_document/patchManifest.md create mode 100644 docs/spinnaker_pipeline_document/pipeline.md create mode 100644 docs/spinnaker_pipeline_document/runJob.md create mode 100644 docs/spinnaker_pipeline_document/runJobManifest.md create mode 100644 docs/spinnaker_pipeline_document/wait.md diff --git a/README.md b/README.md index 2fb4b0c..af5d86f 100644 --- a/README.md +++ b/README.md @@ -137,207 +137,4 @@ resource "spinnaker_pipeline_template_config" "terraform_example" { * `pipeline_config` - A yaml formated [DCD Spec pipeline configuration](https://github.com/spinnaker/dcd-spec/blob/master/PIPELINE_TEMPLATES.md#configurations) -### `spinnaker_pipeline_document` - -#### Example Usage - -``` -data "spinnaker_pipeline_document" "parameters" { - parameter { - description = "The description of the parameter" - default = "default value" - name = "PARAMETER1" - required = true - } - - parameter { - name = "ENVIRONMENT" - required = false - options = [ - "prod", - "preprod", - ] - } -} - -data "spinnaker_pipeline_document" "doc" { - description = "demonstrate pipeline document" - wait = true - limit_concurrent = false - - // source parameters - source_json = "${data.spinnaker_pipeline_document.parameters.json}" - - parameter { - name = "ANOTHER_PARAMETER" - } - - # runJob -> kubernetes - stage { - name = "stage name" - namespace = "namespace-name" - account = "k8s-account" - cloud_provider = "kubernetes" - cloud_provider_type = "kubernetes" - - container { - name = "container-name" - image_pull_policy = "ALWAYS" - - args = [ - "argument", - ] - - "command" = [ - "/opt/bin/app.sh", - ] - - env { - WORKSPACE = "$${parameters["ENVIRONMENT"]}" - HOST = "localhost" - } - - image { - account = "gcr" - id = "gcr.io/image:tag" - registry = "gcr.io" - repository = "image" - tag = "tag" - } - - ports { - container = 80 - name = "http" - protocol = "TCP" - } - } - - deferred_initialization = true - dns_policy = "ClusterFirst" - id = "1" - type = "runJob" - wait_for_completion = true - } - - # manualJudgment - stage { - name = "Manual Judgment" - fail_pipeline = true - instructions = "Apply?" - judgment_inputs = ["yes", "no"] - id = "2" - depends_on = ["6"] - - stage_enabled { - expression = "Spring Expression Language (SpEL) here" - } - - type = "manualJudgment" - } - - # wait - stage { - name = "exit" - id = "4" - depends_on = ["2"] - - stage_enabled { - expression = "$${ #judgment("Manual Judgment").equals("no")}" - } - - type = "wait" - wait_time = 1 - } - - # evaluateVariables - stage { - name = "Evaluate variables" - fail_on_failed_expression = true - id = 5 - depends_on = ["3"] - type = "evaluateVariables" - - variables { - VARIABLE_NAME = "SpEL here" - } - } - - # checkPreconditions - stage { - name = "Changes to apply" - id = 6 - depends_on = ["1"] - type = "checkPreconditions" - - precondition { - context { - expression = "SpEL here" - } - - fail_pipeline = false - type = "expression" - } - } - - # runJobManifest - stage { - application = "${var.app_name}" - name = "stage name" - namespace = "$${parameters["ENVIRONMENT"]}" - account = "$${parameters["CLOUD"] == "gcp" ? "${var.k8s_account_gcp}" : "${var.k8s_account_aws}"}" - cloud_provider = "kubernetes" - - consume_artifact_source = "propertyFile" - property_file = "terraform-apply" - - id = "4" - depends_on = ["3"] - type = "runJobManifest" - skip_expression_evaluation = false - source = "text" - wait_for_completion = true - - manifest = "${data.template_file.apply.rendered}" - - stage_enabled { - expression = "$${ #judgment("Manual Judgment").equals("yes")}" - } - } - - # deployManifest - stage { - name = "configure namespaces" - namespace = "default" - account = "aws" - - id = "1" - type = "deployManifest" - skip_expression_evaluation = false - source = "text" - wait_for_completion = true - - moniker { - app = "${spinnaker_application.app.application}" - } - - manifest = < Date: Tue, 20 Aug 2019 11:04:31 -0700 Subject: [PATCH 15/22] for k8s v2 default cloud_provider to kubernetes --- .../deployManifest.md | 5 ++-- .../findArtifactsFromResource.md | 3 +-- .../runJobManifest.md | 3 +-- spinnaker/datasource_pipeline_document.go | 24 +++++++++++++++++++ 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/docs/spinnaker_pipeline_document/deployManifest.md b/docs/spinnaker_pipeline_document/deployManifest.md index 2a80ab2..22e7ad8 100644 --- a/docs/spinnaker_pipeline_document/deployManifest.md +++ b/docs/spinnaker_pipeline_document/deployManifest.md @@ -6,7 +6,6 @@ data "spinnaker_pipeline_document" "example" { name = "...." namespace = "default" account = "spinnaker-registered-kubernetes-account" - cloud_provider = "kubernetes" manifest = "${data.template_file.example.rendered}" @@ -23,6 +22,6 @@ The following arguments are supported: - `namespace` - The namespace the pod will be deployed into. - `account` - The name of the kubernetes spinnaker account name to deploy the pod to. -- `cloud_provider` - The clouddriver's driver name. +- `cloud_provider` (optional) - The clouddriver's driver name. Defaults to `kubernetes` - `source` - The field specifies the source of the manifest. At this stage only `text` is being supported. -- `moniker` - Configures custom moniker for the runJob. \ No newline at end of file +- `moniker` (optional) - Configures custom moniker for the runJob. Used for custom annotations, see [docs](https://www.spinnaker.io/reference/providers/kubernetes-v2/#moniker) diff --git a/docs/spinnaker_pipeline_document/findArtifactsFromResource.md b/docs/spinnaker_pipeline_document/findArtifactsFromResource.md index deecc21..127c211 100644 --- a/docs/spinnaker_pipeline_document/findArtifactsFromResource.md +++ b/docs/spinnaker_pipeline_document/findArtifactsFromResource.md @@ -4,7 +4,6 @@ data "spinnaker_pipeline_document" "example" { stage { account = "kubernetes-account-name-registered-with-spinnaker" - cloud_provider = "kubernetes" namespace = "kube-system" kind = "configmap" @@ -20,7 +19,7 @@ data "spinnaker_pipeline_document" "example" { The following arguments are supported: - `account` - The name of the kubernetes account registered with the spinnaker -- `cloud_provider` - The clouddriver's driver. +- `cloud_provider` (optional) - The clouddriver's driver. Defaults to `kubernetes` - `namespace` - The namespace to look into for the artifact - `kind` - The kind of the kubernetes resource. - `manifest` - The object to query for. diff --git a/docs/spinnaker_pipeline_document/runJobManifest.md b/docs/spinnaker_pipeline_document/runJobManifest.md index f981e07..93b8281 100644 --- a/docs/spinnaker_pipeline_document/runJobManifest.md +++ b/docs/spinnaker_pipeline_document/runJobManifest.md @@ -6,7 +6,6 @@ data "spinnaker_pipeline_document" "example" { name = "...." namespace = "default" account = "spinnaker-registered-kubernetes-account" - cloud_provider = "kubernetes" manifest = "${data.template_file.example.rendered}" @@ -23,6 +22,6 @@ The following arguments are supported: - `namespace` - The namespace the pod will be deployed into. - `account` - The name of the kubernetes spinnaker account name to deploy the pod to. -- `cloud_provider` - The clouddriver's driver name. +- `cloud_provider` (optional) - The clouddriver's driver name. Defaults to `kubernetes` - `source` - The field specifies the source of the manifest. At this stage only `text` is being supported. - `moniker` - Configures custom moniker for the runJob. \ No newline at end of file diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index f4ad8c7..adc582f 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -704,6 +704,12 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { return nil, err } + // Since runJobManifest is a stage type supported only by the kubernetes v2 driver + // default it to `kubernetes` unless specified otherwise + if _, defined := stageField["cloud_provider"]; !defined { + sg.CloudProvider = "kubernetes" + } + // default to CloudProvider if field is not defined if _, defined := stageField["manifest_artifact_account"]; !defined { if cloudProvider, ok := stageField["cloud_provider"]; ok { @@ -732,6 +738,12 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { sg.Manifests = append(sg.Manifests, manifestMap) } + // Since deployManifest is a stage type supported only by the kubernetes v2 driver + // default it to `kubernetes` unless specified otherwise + if _, defined := stageField["cloud_provider"]; !defined { + sg.CloudProvider = "kubernetes" + } + // default to CloudProvider if field is not defined if _, defined := stageField["manifest_artifact_account"]; !defined { if cloudProvider, ok := stageField["cloud_provider"]; ok { @@ -751,6 +763,12 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { return nil, err } + // Since patchManifest is a stage type supported only by the kubernetes v2 driver + // default it to `kubernetes` unless specified otherwise + if _, defined := stageField["cloud_provider"]; !defined { + sg.CloudProvider = "kubernetes" + } + // default to CloudProvider if field is not defined if _, defined := stageField["manifest_artifact_account"]; !defined { if cloudProvider, ok := stageField["cloud_provider"]; ok { @@ -785,6 +803,12 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { sg.Location = sg.Namespace sg.Namespace = "" + // Since findArtifactsFromResource is a stage type supported only by the kubernetes v2 driver + // default it to `kubernetes` unless specified otherwise + if _, defined := stageField["cloud_provider"]; !defined { + sg.CloudProvider = "kubernetes" + } + sg.ManifestName = fmt.Sprintf("%s %s", stageField["kind"].(string), stageField["manifest"]) case "pipeline": if err := ValidateFields("pipeline", []string{"pipeline"}, stageField); err != nil { From 7cae2691f60dcc8a14e818b487d1526e2201daf7 Mon Sep 17 00:00:00 2001 From: Graham Krizek Date: Wed, 28 Aug 2019 13:21:40 -0500 Subject: [PATCH 16/22] Add support for multiple documents in a single yaml manifest --- spinnaker/datasource_pipeline_document.go | 25 ++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index adc582f..68e8551 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -5,6 +5,7 @@ import ( "fmt" "sort" "strconv" + "strings" "github.com/armory-io/terraform-provider-spinnaker/spinnaker/api" "github.com/ghodss/yaml" @@ -724,14 +725,24 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { if err := ValidateFields("deployManifest", []string{"manifest", "account", "source"}, stageField); err != nil { return nil, err } - manifestJSON, err := yaml.YAMLToJSON([]byte(stageField["manifest"].(string))) - if err != nil { - return nil, err - } + + // The YAMLtoJSON function doesn't currently support yaml files with multiple documents (seperated by '---'). + // Therefore we need to split the yaml file ourselves and convert the documents individually. + // Then we append to manifestMap after each conversion + + manifests := strings.Split(stageField["manifest"].(string), "---\n") manifestMap := make(map[string]interface{}) - err = json.Unmarshal(manifestJSON, &manifestMap) - if err != nil { - return nil, err + + for i := range manifests { + manifestJSON, err := yaml.YAMLToJSON([]byte(manifests[i])) + if err != nil { + return nil, err + } + + err = json.Unmarshal(manifestJSON, &manifestMap) + if err != nil { + return nil, err + } } if manifestMap != nil { From cfd8f645ae2856f6fb352b52c0a92bd045ec5004 Mon Sep 17 00:00:00 2001 From: Graham Krizek Date: Wed, 28 Aug 2019 14:25:08 -0500 Subject: [PATCH 17/22] Create and append each manifest individually --- go.mod | 1 + spinnaker/datasource_pipeline_document.go | 13 ++++++------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 4e8190d..c82b3c4 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.12 require ( cloud.google.com/go v0.37.4 // indirect github.com/ghodss/yaml v1.0.0 + github.com/google/go-cmp v0.2.0 github.com/hashicorp/terraform v0.12.0 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/mitchellh/mapstructure v1.1.2 diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index 68e8551..22dbbdd 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -728,12 +728,11 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { // The YAMLtoJSON function doesn't currently support yaml files with multiple documents (seperated by '---'). // Therefore we need to split the yaml file ourselves and convert the documents individually. - // Then we append to manifestMap after each conversion - + // Then we append to sg.Manifests after each conversion manifests := strings.Split(stageField["manifest"].(string), "---\n") - manifestMap := make(map[string]interface{}) - for i := range manifests { + manifestMap := make(map[string]interface{}) + manifestJSON, err := yaml.YAMLToJSON([]byte(manifests[i])) if err != nil { return nil, err @@ -743,10 +742,10 @@ func stageDecodeDocument(field interface{}) ([]*api.Stage, error) { if err != nil { return nil, err } - } - if manifestMap != nil { - sg.Manifests = append(sg.Manifests, manifestMap) + if manifestMap != nil { + sg.Manifests = append(sg.Manifests, manifestMap) + } } // Since deployManifest is a stage type supported only by the kubernetes v2 driver From 9bb139259172d2abf1b00390a8e1a1ec40870417 Mon Sep 17 00:00:00 2001 From: Jonathan Wood Date: Wed, 4 Sep 2019 13:13:32 -0700 Subject: [PATCH 18/22] Notificationsssssssssssssss --- spinnaker/api/stage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spinnaker/api/stage.go b/spinnaker/api/stage.go index f4c80ff..5240115 100644 --- a/spinnaker/api/stage.go +++ b/spinnaker/api/stage.go @@ -162,7 +162,7 @@ type Stage struct { // Notifications and SendNotifcations is shared between: // runPipeline and manualJudgment - Notifications []*Notification `json:"notification,omitempty" mapstructure:"notification"` + Notifications []*Notification `json:"notifications,omitempty" mapstructure:"notification"` SendNotification bool `json:"sendNotifications"` // embedded structs/stage types From e5b80c75eb77aedf1fbdc3d9fb174257849ce2e9 Mon Sep 17 00:00:00 2001 From: Jonathan Wood Date: Wed, 4 Sep 2019 14:19:57 -0700 Subject: [PATCH 19/22] Added cron trigger support --- spinnaker/api/helper.go | 3 ++ spinnaker/api/pipeline.go | 8 ++++- spinnaker/datasource_pipeline_document.go | 44 +++++++++++++++++++++++ 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/spinnaker/api/helper.go b/spinnaker/api/helper.go index 51c8e6a..06d2574 100644 --- a/spinnaker/api/helper.go +++ b/spinnaker/api/helper.go @@ -21,6 +21,9 @@ func (oldDoc *PipelineDocument) Merge(newDoc *PipelineDocument) { if newDoc.KeepWaitingPipelines != nil { oldDoc.KeepWaitingPipelines = newDoc.KeepWaitingPipelines } + if newDoc.Triggers != nil { + oldDoc.Triggers = newDoc.Triggers + } if newDoc.Parameters != nil { for _, newParam := range newDoc.Parameters { found := false diff --git a/spinnaker/api/pipeline.go b/spinnaker/api/pipeline.go index 980c4bb..987f6d3 100644 --- a/spinnaker/api/pipeline.go +++ b/spinnaker/api/pipeline.go @@ -14,7 +14,6 @@ type PipelineConfig struct { Type string `json:"type,omitempty"` Name string `json:"name"` Application string `json:"application"` - Triggers []map[string]interface{} `json:"triggers,omitempty"` ExpectedArtifacts []map[string]interface{} `json:"expectedArtifacts,omitempty"` Notifications []map[string]interface{} `json:"notifications,omitempty"` LastModifiedBy string `json:"lastModifiedBy"` @@ -41,6 +40,12 @@ type Options struct { Value string `json:"value"` } +type Trigger struct { + Type string `json:"type,omitempty"` + Enabled bool `json:"enabled,omitempty"` + CronExpression string `json:"cronExpression,omitempty"` +} + type Pipeline struct { Description string `json:"description,omitempty"` ExecutionEngine string `json:"executionEngine,omitempty" mapstructure:"engine"` @@ -49,6 +54,7 @@ type Pipeline struct { KeepWaitingPipelines *bool `json:"keepWaitingPipelines,omitempty" mapstructure:"wait"` Stages []*Stage `json:"stages,omitempty" mapstructure:"stage"` Parameters []*PipelineParameter `json:"parameterConfig,omitempty" mapstructure:"parameter"` + Triggers []*Trigger `json:"triggers,omitempty" mapstructure:"trigger"` } func CreatePipeline(client *gate.GatewayClient, pipeline interface{}) error { diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index 22dbbdd..83aad45 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -54,6 +54,26 @@ func datasourcePipelineDocument() *schema.Resource { Type: schema.TypeBool, Optional: true, }, + "trigger": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "cron_expression": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, "parameter": { Type: schema.TypeList, Optional: true, @@ -567,6 +587,10 @@ func datasourcePipelineDocumentRead(d *schema.ResourceData, meta interface{}) er spDoc.KeepWaitingPipelines = Bool(keepWaiting.(bool)) } + if triggers, ok := d.GetOk("trigger"); ok { + spDoc.Triggers = triggerDecodeDocument(triggers) + } + // decouple parameters if parameters, ok := d.GetOk("parameter"); ok { spDoc.Parameters = parametersDecodeDocument(parameters) @@ -603,6 +627,26 @@ func datasourcePipelineDocumentRead(d *schema.ResourceData, meta interface{}) er return nil } +// triggerDecodeDocument iterates over each trigger. +func triggerDecodeDocument(triggers interface{}) []*api.Trigger { + var selTriggers = triggers.([]interface{}) + trigs := make([]*api.Trigger, len(selTriggers)) + + for i, trig := range selTriggers { + ftrig := trig.(map[string]interface{}) + tr := &api.Trigger{ + Type: ftrig["type"].(string), + Enabled: ftrig["enabled"].(bool), + } + + if cronExp := ftrig["cron_expression"].(string); len(cronExp) > 0 { + tr.CronExpression = cronExp + } + trigs[i] = tr + } + return trigs +} + // parametersDecodeDocument iterates over each parameter. // The parameter "hasOptions" is assumed based on the fact if the "options" are being // populated or not From 39932cb427faf3ca1521e173fe5e225af8fd3516 Mon Sep 17 00:00:00 2001 From: Jonathan Wood Date: Wed, 4 Sep 2019 14:32:17 -0700 Subject: [PATCH 20/22] Fix lint and tests. --- spinnaker/datasource_pipeline_document_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spinnaker/datasource_pipeline_document_test.go b/spinnaker/datasource_pipeline_document_test.go index a28498b..9587766 100644 --- a/spinnaker/datasource_pipeline_document_test.go +++ b/spinnaker/datasource_pipeline_document_test.go @@ -44,7 +44,7 @@ func TestDocumentSchemaMatchesStruct(t *testing.T) { ".pipeline.stage.notification.message.stagefailed.text", ".pipeline.stage.notification.message.stagestarting", ".pipeline.stage.notification.message.stagestarting.text", ".pipeline.stage.options.merge_strategy", ".pipeline.stage.options.mergestrategy", ".pipeline.stage.patch_body", ".pipeline.stage.patchbody", ".pipeline.stage.sendnotification", ".pipeline.stage.stage_enabled", ".pipeline.stage.stage_enabled.expression", ".pipeline.stage.stageenabled", ".pipeline.stage.variables.key", ".pipeline.stage.variables.value", - ".pipeline.stage.kind", ".pipeline.stage.location", + ".pipeline.stage.kind", ".pipeline.stage.location", ".pipeline.trigger.cron_expression", ".pipeline.trigger.cronexpression", } // // transofrm some of the fields to make comparision more accurate. From 139c3d9c7060c883ad96f4c4dfe4ee675dcb1634 Mon Sep 17 00:00:00 2001 From: Karol Pasternak Date: Thu, 3 Oct 2019 09:32:43 -0700 Subject: [PATCH 21/22] implement lock and disable pipeline --- README.md | 4 ++++ spinnaker/resource_pipeline.go | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/README.md b/README.md index af5d86f..1321ebd 100644 --- a/README.md +++ b/README.md @@ -88,6 +88,8 @@ resource "spinnaker_pipeline" "terraform_example" { application = "${spinnaker_application.my_app.application}" name = "Example Pipeline" pipeline = file("pipelines/example.json") + lock = true + disable = true } ``` @@ -96,6 +98,8 @@ resource "spinnaker_pipeline" "terraform_example" { * `application` - Application name * `name` - Pipeline name * `pipeline` - Pipeline JSON in string format, example `file(pipelines/example.json)` +* `lock` - Lock the edit of pipelines in the UI +* `disable` - Disable a pipeline for the execution ## Data source diff --git a/spinnaker/resource_pipeline.go b/spinnaker/resource_pipeline.go index e6567f8..0c20f35 100644 --- a/spinnaker/resource_pipeline.go +++ b/spinnaker/resource_pipeline.go @@ -27,6 +27,16 @@ func resourcePipeline() *schema.Resource { Required: true, DiffSuppressFunc: pipelineDiffSuppressFunc, }, + "lock": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "disable": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "pipeline_id": { Type: schema.TypeString, Computed: true, @@ -52,6 +62,7 @@ func resourcePipelineCreate(data *schema.ResourceData, meta interface{}) error { applicationName := data.Get("application").(string) pipelineName := data.Get("name").(string) pipeline := data.Get("pipeline").(string) + disable := data.Get("disable").(bool) var tmp map[string]interface{} if err := json.NewDecoder(strings.NewReader(pipeline)).Decode(&tmp); err != nil { @@ -60,6 +71,15 @@ func resourcePipelineCreate(data *schema.ResourceData, meta interface{}) error { tmp["application"] = applicationName tmp["name"] = pipelineName + tmp["disabled"] = disable + + if lock := data.Get("lock").(bool); lock { + tmp["locked"] = map[string]interface{}{ + "allowUnlockUi": false, + "description": "Locked with terraform", + } + } + delete(tmp, "id") if err := api.CreatePipeline(client, tmp); err != nil { From 96c925429be389b4017b1f3e2c721a72bd5fda59 Mon Sep 17 00:00:00 2001 From: Graham Krizek Date: Wed, 9 Oct 2019 16:43:15 -0500 Subject: [PATCH 22/22] Add failure_message field to a precondition stage --- .../checkPreconditions.md | 2 ++ spinnaker/api/stage.go | 13 +++++++------ spinnaker/datasource_pipeline_document.go | 4 ++++ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/spinnaker_pipeline_document/checkPreconditions.md b/docs/spinnaker_pipeline_document/checkPreconditions.md index 179067c..72e6787 100644 --- a/docs/spinnaker_pipeline_document/checkPreconditions.md +++ b/docs/spinnaker_pipeline_document/checkPreconditions.md @@ -10,6 +10,7 @@ data "spinnaker_pipeline_document" "example" { precondition { context { expression = "$${#stage("plan")["outputs"]["jobStatus"]["logs"].toString().split("PLAN_EXITSTATUS")[1].equals("0")}" + failure_message = "This is a failure message" } fail_pipeline = false @@ -26,5 +27,6 @@ The following arguments are supported: - `precondition` - A nested configuration block (described below) configuring precondition for the stage. - `context` - A nested configuration block with the `expression` setting (only supported at this stage) - `expression` - A SpEL expression returing either `true` or `false` + - `failure_message` - This failure message will be shown to the user if the precondition evaluates to false. - `fail_pipeline` - If set to `true` the overall pipeline will fail whenever this precondition is false. - `type` - the type of the precondition. At this stage only `expression` is being supported. diff --git a/spinnaker/api/stage.go b/spinnaker/api/stage.go index 5240115..d87f7dd 100644 --- a/spinnaker/api/stage.go +++ b/spinnaker/api/stage.go @@ -79,12 +79,13 @@ type CheckPrecondition struct { Preconditions []struct { CloudProvider string `json:"cloudProvider,omitempty" mapstructure:"cloud_provider"` Context struct { - Cluster string `json:"cluster,omitempty"` - Comparison string `json:"comparison,omitempty"` - Credentials string `json:"credentials,omitempty"` - Expected int `json:"expected,omitempty"` - Regions []string `json:"regions,omitempty"` - Expression string `json:"expression,omitempty"` + Cluster string `json:"cluster,omitempty"` + Comparison string `json:"comparison,omitempty"` + Credentials string `json:"credentials,omitempty"` + Expected int `json:"expected,omitempty"` + Regions []string `json:"regions,omitempty"` + Expression string `json:"expression,omitempty"` + FailureMessage string `json:"failureMessage,omitempty" mapstructure:"failure_message"` } `json:"context,omitempty"` FailPipeline bool `json:"failPipeline" mapstructure:"fail_pipeline"` Type string `json:"type"` diff --git a/spinnaker/datasource_pipeline_document.go b/spinnaker/datasource_pipeline_document.go index 83aad45..40b4f91 100644 --- a/spinnaker/datasource_pipeline_document.go +++ b/spinnaker/datasource_pipeline_document.go @@ -210,6 +210,10 @@ func datasourcePipelineDocument() *schema.Resource { Type: schema.TypeInt, Optional: true, }, + "failure_message": { + Type: schema.TypeString, + Optional: true, + }, "regions": { Type: schema.TypeList, Optional: true,