@@ -178,7 +178,7 @@ func ResourceTencentCloudElasticsearchInstance() *schema.Resource {
178
178
Description : "License type. Valid values are `oss`, `basic` and `platinum`. The default value is `platinum`." ,
179
179
},
180
180
"node_info_list" : {
181
- Type : schema .TypeList ,
181
+ Type : schema .TypeSet ,
182
182
Required : true ,
183
183
MinItems : 1 ,
184
184
Description : "Node information list, which is used to describe the specification information of various types of nodes in the cluster, such as node type, node quantity, node specification, disk type, and disk size." ,
@@ -346,6 +346,19 @@ func ResourceTencentCloudElasticsearchInstance() *schema.Resource {
346
346
Description : "Instance creation time." ,
347
347
},
348
348
},
349
+ CustomizeDiff : func (ctx context.Context , d * schema.ResourceDiff , meta interface {}) error {
350
+ nodeInfos := d .Get ("node_info_list" ).(* schema.Set ).List ()
351
+ typeMap := map [string ]bool {}
352
+ for _ , v := range nodeInfos {
353
+ m := v .(map [string ]interface {})
354
+ t := m ["type" ].(string )
355
+ if typeMap [t ] {
356
+ return fmt .Errorf ("duplicate node type '%s' is not allowed in node_info_list" , t )
357
+ }
358
+ typeMap [t ] = true
359
+ }
360
+ return nil
361
+ },
349
362
}
350
363
}
351
364
@@ -428,7 +441,7 @@ func resourceTencentCloudElasticsearchInstanceCreate(d *schema.ResourceData, met
428
441
}
429
442
430
443
if v , ok := d .GetOk ("node_info_list" ); ok {
431
- infos := v .([] interface {} )
444
+ infos := v .(* schema. Set ). List ( )
432
445
request .NodeInfoList = make ([]* es.NodeInfo , 0 , len (infos ))
433
446
for _ , item := range infos {
434
447
value := item .(map [string ]interface {})
@@ -995,39 +1008,155 @@ func resourceTencentCloudElasticsearchInstanceUpdate(d *schema.ResourceData, met
995
1008
}
996
1009
997
1010
if d .HasChange ("node_info_list" ) {
998
- nodeInfos := d .Get ("node_info_list" ).([]interface {})
999
- nodeInfoList := make ([]* es.NodeInfo , 0 , len (nodeInfos ))
1000
- for _ , d := range nodeInfos {
1001
- value := d .(map [string ]interface {})
1002
- nodeType := value ["node_type" ].(string )
1003
- diskSize := uint64 (value ["disk_size" ].(int ))
1004
- nodeNum := uint64 (value ["node_num" ].(int ))
1005
- types := value ["type" ].(string )
1006
- diskType := value ["disk_type" ].(string )
1007
- encrypt := value ["encrypt" ].(bool )
1008
- dataDisk := es.NodeInfo {
1009
- NodeType : & nodeType ,
1010
- DiskSize : & diskSize ,
1011
- NodeNum : & nodeNum ,
1012
- Type : & types ,
1013
- DiskType : & diskType ,
1014
- DiskEncrypt : helper .BoolToInt64Pointer (encrypt ),
1011
+ o , n := d .GetChange ("node_info_list" )
1012
+ oldNodeMap := make (map [string ]map [string ]interface {})
1013
+ newNodesMap := make (map [string ]map [string ]interface {})
1014
+ for _ , node := range o .(* schema.Set ).List () {
1015
+ nodeMap := node .(map [string ]interface {})
1016
+ oldNodeMap [nodeMap ["type" ].(string )] = nodeMap
1017
+ }
1018
+ for _ , node := range n .(* schema.Set ).List () {
1019
+ nodeMap := node .(map [string ]interface {})
1020
+ newNodesMap [nodeMap ["type" ].(string )] = nodeMap
1021
+ }
1022
+
1023
+ typeList := []string {"hotData" , "warmData" , "dedicatedMaster" }
1024
+ for _ , t := range typeList {
1025
+ old := oldNodeMap [t ]
1026
+ new := newNodesMap [t ]
1027
+ baseNodeList := make ([]interface {}, 0 )
1028
+ for k , v := range oldNodeMap {
1029
+ if k == t {
1030
+ continue
1031
+ }
1032
+ baseNodeList = append (baseNodeList , v )
1015
1033
}
1016
- nodeInfoList = append (nodeInfoList , & dataDisk )
1017
- }
1018
- err := resource .Retry (tccommon .WriteRetryTimeout * 2 , func () * resource.RetryError {
1019
- errRet := elasticsearchService .UpdateInstance (ctx , instanceId , "" , "" , "" , "" , "" , 0 , nodeInfoList , nil , nil , nil , nil )
1020
- if errRet != nil {
1021
- return tccommon .RetryError (errRet )
1034
+
1035
+ if old == nil && new == nil {
1036
+ // 没有该类型节点配置
1037
+ continue
1038
+ } else if old == nil {
1039
+ // 新增
1040
+ baseNodeList = append (baseNodeList , new )
1041
+ err := resource .Retry (tccommon .WriteRetryTimeout * 2 , func () * resource.RetryError {
1042
+ errRet := elasticsearchService .UpdateInstance (ctx , instanceId , "" , "" , "" , "" , "" , 0 , convertToNodeInfos (baseNodeList ), nil , nil , nil , nil )
1043
+ if errRet != nil {
1044
+ return tccommon .RetryError (errRet )
1045
+ }
1046
+ return nil
1047
+ })
1048
+ if err != nil {
1049
+ return err
1050
+ }
1051
+ err = tencentCloudElasticsearchInstanceUpgradeWaiting (ctx , & elasticsearchService , instanceId )
1052
+ if err != nil {
1053
+ return err
1054
+ }
1055
+ } else if new == nil {
1056
+ // 删除
1057
+ err := resource .Retry (tccommon .WriteRetryTimeout * 2 , func () * resource.RetryError {
1058
+ errRet := elasticsearchService .UpdateInstance (ctx , instanceId , "" , "" , "" , "" , "" , 0 , convertToNodeInfos (baseNodeList ), nil , nil , nil , nil )
1059
+ if errRet != nil {
1060
+ return tccommon .RetryError (errRet )
1061
+ }
1062
+ return nil
1063
+ })
1064
+ if err != nil {
1065
+ return err
1066
+ }
1067
+ err = tencentCloudElasticsearchInstanceUpgradeWaiting (ctx , & elasticsearchService , instanceId )
1068
+ if err != nil {
1069
+ return err
1070
+ }
1071
+ } else {
1072
+ // 磁盘类型不支持修改
1073
+ fields := []string {"disk_type" , "encrypt" , "type" }
1074
+ for _ , field := range fields {
1075
+ if old [field ] != new [field ] {
1076
+ return fmt .Errorf ("%s not support change" , field )
1077
+ }
1078
+ }
1079
+ // 修改一种节点的个数
1080
+ var isUpdateNodeNum bool
1081
+ if old ["node_num" ].(int ) != new ["node_num" ].(int ) {
1082
+ changeESNodes := convertToNodeInfos (baseNodeList )
1083
+ thisNode := convertToNodeInfo (old )
1084
+ thisNode .NodeNum = helper .IntUint64 (new ["node_num" ].(int ))
1085
+ changeESNodes = append (changeESNodes , thisNode )
1086
+ err := resource .Retry (tccommon .WriteRetryTimeout * 2 , func () * resource.RetryError {
1087
+ errRet := elasticsearchService .UpdateInstance (ctx , instanceId , "" , "" , "" , "" , "" , 0 , changeESNodes , nil , nil , nil , nil )
1088
+ if errRet != nil {
1089
+ return tccommon .RetryError (errRet )
1090
+ }
1091
+ return nil
1092
+ })
1093
+ if err != nil {
1094
+ return err
1095
+ }
1096
+ err = tencentCloudElasticsearchInstanceUpgradeWaiting (ctx , & elasticsearchService , instanceId )
1097
+ if err != nil {
1098
+ return err
1099
+ }
1100
+ isUpdateNodeNum = true
1101
+ }
1102
+
1103
+ var isUpdateNodeType bool
1104
+ // 修改一种节点的节点规格
1105
+ if old ["node_type" ].(string ) != new ["node_type" ].(string ) {
1106
+ changeESNodes := convertToNodeInfos (baseNodeList )
1107
+ thisNode := convertToNodeInfo (old )
1108
+ thisNode .NodeType = helper .String (new ["node_type" ].(string ))
1109
+ if isUpdateNodeNum {
1110
+ thisNode .NodeNum = helper .IntUint64 (new ["node_num" ].(int ))
1111
+ }
1112
+ changeESNodes = append (changeESNodes , thisNode )
1113
+ err := resource .Retry (tccommon .WriteRetryTimeout * 2 , func () * resource.RetryError {
1114
+ errRet := elasticsearchService .UpdateInstance (ctx , instanceId , "" , "" , "" , "" , "" , 0 , changeESNodes , nil , nil , nil , nil )
1115
+ if errRet != nil {
1116
+ return tccommon .RetryError (errRet )
1117
+ }
1118
+ return nil
1119
+ })
1120
+ if err != nil {
1121
+ return err
1122
+ }
1123
+ err = tencentCloudElasticsearchInstanceUpgradeWaiting (ctx , & elasticsearchService , instanceId )
1124
+ if err != nil {
1125
+ return err
1126
+ }
1127
+ isUpdateNodeType = true
1128
+ }
1129
+ // 修改一种节点的磁盘大小
1130
+ if old ["disk_size" ].(int ) != new ["disk_size" ].(int ) {
1131
+ changeESNodes := convertToNodeInfos (baseNodeList )
1132
+ thisNode := convertToNodeInfo (old )
1133
+ thisNode .NodeType = helper .String (new ["node_type" ].(string ))
1134
+ thisNode .DiskSize = helper .IntUint64 (new ["disk_size" ].(int ))
1135
+ if isUpdateNodeNum {
1136
+ thisNode .NodeNum = helper .IntUint64 (new ["node_num" ].(int ))
1137
+ }
1138
+ if isUpdateNodeType {
1139
+ thisNode .NodeType = helper .String (new ["node_type" ].(string ))
1140
+ }
1141
+ changeESNodes = append (changeESNodes , thisNode )
1142
+ err := resource .Retry (tccommon .WriteRetryTimeout * 2 , func () * resource.RetryError {
1143
+ errRet := elasticsearchService .UpdateInstance (ctx , instanceId , "" , "" , "" , "" , "" , 0 , changeESNodes , nil , nil , nil , nil )
1144
+ if errRet != nil {
1145
+ return tccommon .RetryError (errRet )
1146
+ }
1147
+ return nil
1148
+ })
1149
+ if err != nil {
1150
+ return err
1151
+ }
1152
+ err = tencentCloudElasticsearchInstanceUpgradeWaiting (ctx , & elasticsearchService , instanceId )
1153
+ if err != nil {
1154
+ return err
1155
+ }
1156
+ }
1022
1157
}
1023
- return nil
1024
- })
1025
- if err != nil {
1026
- return err
1027
- }
1028
- err = tencentCloudElasticsearchInstanceUpgradeWaiting (ctx , & elasticsearchService , instanceId )
1029
- if err != nil {
1030
- return err
1158
+ // 更新oldNodeMap中的值
1159
+ oldNodeMap [t ] = new
1031
1160
}
1032
1161
}
1033
1162
@@ -1251,3 +1380,31 @@ func tencentCloudElasticsearchInstanceUpgradeWaiting(ctx context.Context, servic
1251
1380
return nil
1252
1381
})
1253
1382
}
1383
+
1384
+ func convertToNodeInfo (n interface {}) * es.NodeInfo {
1385
+ value := n .(map [string ]interface {})
1386
+ nodeType := value ["node_type" ].(string )
1387
+ diskSize := uint64 (value ["disk_size" ].(int ))
1388
+ nodeNum := uint64 (value ["node_num" ].(int ))
1389
+ types := value ["type" ].(string )
1390
+ diskType := value ["disk_type" ].(string )
1391
+ encrypt := value ["encrypt" ].(bool )
1392
+ nodeInfo := & es.NodeInfo {
1393
+ NodeType : & nodeType ,
1394
+ DiskSize : & diskSize ,
1395
+ NodeNum : & nodeNum ,
1396
+ Type : & types ,
1397
+ DiskType : & diskType ,
1398
+ DiskEncrypt : helper .BoolToInt64Pointer (encrypt ),
1399
+ }
1400
+ return nodeInfo
1401
+ }
1402
+
1403
+ func convertToNodeInfos (nodeInfos []interface {}) []* es.NodeInfo {
1404
+ nodeInfoList := make ([]* es.NodeInfo , 0 , len (nodeInfos ))
1405
+ for _ , n := range nodeInfos {
1406
+ nodeInfo := convertToNodeInfo (n )
1407
+ nodeInfoList = append (nodeInfoList , nodeInfo )
1408
+ }
1409
+ return nodeInfoList
1410
+ }
0 commit comments