Skip to content

Commit

Permalink
Merge pull request #13190 from Security-Onion-Solutions/reyesj2/kafka
Browse files Browse the repository at this point in the history
Initial Kafka support
  • Loading branch information
reyesj2 authored Jun 13, 2024
2 parents 6340ebb + 80b1d51 commit 9ac7e05
Show file tree
Hide file tree
Showing 55 changed files with 1,658 additions and 73 deletions.
2 changes: 1 addition & 1 deletion files/firewall/assigned_hostgroups.local.map.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ role:
receiver:
standalone:
searchnode:
sensor:
sensor:
2 changes: 2 additions & 0 deletions pillar/kafka/nodes.sls
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
kafka:
nodes:
11 changes: 11 additions & 0 deletions pillar/top.sls
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ base:
- backup.adv_backup
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- stig.soc_stig

'*_sensor':
Expand Down Expand Up @@ -176,6 +179,9 @@ base:
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka

'*_heavynode':
- elasticsearch.auth
Expand Down Expand Up @@ -220,6 +226,7 @@ base:
- minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
- kafka.nodes

'*_receiver':
- logstash.nodes
Expand All @@ -232,6 +239,10 @@ base:
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- soc.license

'*_import':
- secrets
Expand Down
13 changes: 9 additions & 4 deletions salt/allowed_states.map.jinja
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-managersearch': [
'salt.master',
Expand All @@ -125,7 +126,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-searchnode': [
'ssl',
Expand Down Expand Up @@ -159,7 +161,8 @@
'schedule',
'tcpreplay',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-sensor': [
'ssl',
Expand Down Expand Up @@ -190,7 +193,9 @@
'telegraf',
'firewall',
'schedule',
'docker_clean'
'docker_clean',
'kafka',
'elasticsearch.ca'
],
'so-desktop': [
'ssl',
Expand Down
14 changes: 14 additions & 0 deletions salt/ca/files/signing_policies.conf
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,17 @@ x509_signing_policies:
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 820
- copypath: /etc/pki/issued_certs/
kafka:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "digitalSignature, keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- extendedKeyUsage: "serverAuth, clientAuth"
- days_valid: 820
- copypath: /etc/pki/issued_certs/
3 changes: 2 additions & 1 deletion salt/common/tools/sbin/so-image-common
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ container_list() {
"so-idh"
"so-idstools"
"so-influxdb"
"so-kafka"
"so-kibana"
"so-kratos"
"so-logstash"
Expand All @@ -64,7 +65,7 @@ container_list() {
"so-strelka-manager"
"so-suricata"
"so-telegraf"
"so-zeek"
"so-zeek"
)
else
TRUSTED_CONTAINERS=(
Expand Down
9 changes: 9 additions & 0 deletions salt/docker/defaults.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -187,3 +187,12 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-kafka':
final_octet: 88
port_bindings:
- 0.0.0.0:9092:9092
- 0.0.0.0:9093:9093
- 0.0.0.0:8778:8778
custom_bind_mounts: []
extra_hosts: []
extra_env: []
1 change: 1 addition & 0 deletions salt/docker/soc_docker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -101,3 +101,4 @@ docker:
multiline: True
forcedType: "[]string"
so-zeek: *dockerOptions
so-kafka: *dockerOptions
167 changes: 113 additions & 54 deletions salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update
Original file line number Diff line number Diff line change
Expand Up @@ -21,64 +21,104 @@ function update_logstash_outputs() {
# Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
}
function update_kafka_outputs() {
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')

JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
# Update Kafka outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
}

# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')

# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi

# Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')

declare -a NEW_LIST=()
{% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')

# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_kafka" ]; then
printf "Failed to query for current Kafka Outputs..."
exit 1
fi

# Get the current list of kafka outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')

declare -a NEW_LIST=()

# Query for the current Grid Nodes that are running kafka
KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local')

# Query for Kafka nodes with Broker role and add hostname to list
while IFS= read -r line; do
NEW_LIST+=("$line")
done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES)

{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %}
# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')

# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi

# Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')

declare -a NEW_LIST=()

{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
{% if ELASTICFLEETMERGED.enable_manager_output %}
# Create array & add initial elements
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
fi
{% endif %}

# Query for FQDN entries & add them to the list
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
do
NEW_LIST+=("$CUSTOMNAME:5055")
done
{% endif %}

# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')

# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi

# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi

{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
{% if ELASTICFLEETMERGED.enable_manager_output %}
# Create array & add initial elements
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
fi
{% endif %}

# Query for FQDN entries & add them to the list
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
do
NEW_LIST+=("$CUSTOMNAME:5055")
done
{% endif %}

# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')

# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi

# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi

# Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
Expand All @@ -87,9 +127,28 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"

# Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed
printf "Checking if the correct output policy is set as default\n"
OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON)
OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON)
if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then
printf "Default output policy needs to be updated.\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs
{%- endif %}
else
printf "Default output policy is set - no update needed.\n"
fi
exit 0
else
printf "\nHashes don't match - update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs
{%- endif %}
fi
5 changes: 5 additions & 0 deletions salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,11 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl
printf "\n\n"
{%- endif %}

printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %}
salt-call state.apply kafka.elasticfleet queue=True
{% endif %}

# Add Manager Hostname & URL Base to Fleet Host URLs
printf "\nAdd SO-Manager Fleet URL\n"
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
Expand Down
2 changes: 1 addition & 1 deletion salt/elasticsearch/ca.sls
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# Elastic License 2.0.

{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
Expand Down
1 change: 1 addition & 0 deletions salt/elasticsearch/files/ingest/.fleet_final_pipeline-1
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
],
"on_failure": [
Expand Down
2 changes: 2 additions & 0 deletions salt/firewall/containers.map.jinja
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
'so-elastic-fleet',
'so-elastic-fleet-package-registry',
'so-influxdb',
'so-kafka',
'so-kibana',
'so-kratos',
'so-logstash',
Expand Down Expand Up @@ -80,6 +81,7 @@
{% set NODE_CONTAINERS = [
'so-logstash',
'so-redis',
'so-kafka'
] %}

{% elif GLOBALS.role == 'so-idh' %}
Expand Down
Loading

0 comments on commit 9ac7e05

Please sign in to comment.