diff --git a/.github/workflows/ci-lite.yaml b/.github/workflows/ci-lite.yaml index 96cc4e7b29..5ebcf2532f 100644 --- a/.github/workflows/ci-lite.yaml +++ b/.github/workflows/ci-lite.yaml @@ -86,7 +86,7 @@ jobs: type=ref,event=tag - name: matrix id: matrix - uses: splunk/addonfactory-test-matrix-action@v2.1.8 + uses: splunk/addonfactory-test-matrix-action@v2.1.9 security-fossa-scan: continue-on-error: true diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index 9d02ba3fc7..0a44116cd7 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -86,7 +86,7 @@ jobs: type=ref,event=tag - name: matrix id: matrix - uses: splunk/addonfactory-test-matrix-action@v2.1.8 + uses: splunk/addonfactory-test-matrix-action@v2.1.9 security-fossa-scan: continue-on-error: true diff --git a/ansible/inventory/inventory_rke2.yml b/ansible/inventory/inventory_rke2.yml new file mode 100644 index 0000000000..0d350106f6 --- /dev/null +++ b/ansible/inventory/inventory_rke2.yml @@ -0,0 +1,5 @@ +control_nodes: + hosts: + token_node: + ansible_host: + config_file: \ No newline at end of file diff --git a/ansible/inventory/inventory_rke2_ha.yml b/ansible/inventory/inventory_rke2_ha.yml new file mode 100644 index 0000000000..b3692a4d9a --- /dev/null +++ b/ansible/inventory/inventory_rke2_ha.yml @@ -0,0 +1,23 @@ +control_nodes: + hosts: + token_node: + ansible_host: + config_file: + optional_control_node_1: + ansible_host: + config_file: + optional_control_node_2: + ansible_host: + config_file: + +agent_nodes: + hosts: + optional_agent_1: + ansible_host: + config_file: + optional_agent_2: + ansible_host: + config_file: + optional_agent_3: + ansible_host: + config_file: \ No newline at end of file diff --git a/ansible/playbooks/rke2.yml b/ansible/playbooks/rke2.yml new file mode 100644 index 0000000000..73bf27e1d4 --- /dev/null +++ b/ansible/playbooks/rke2.yml @@ -0,0 +1,65 @@ +--- +- name: Copy rke2 configuration files + hosts: all + become: true + tasks: + - include_tasks: ../tasks/rke2/copy_config.yml + +- name: Install and run rke2-server.service on first control node + hosts: control_nodes + become: true + tasks: + - include_tasks: ../tasks/rke2/install_first_server.yml + +- name: Get node-token from a control node + hosts: control_nodes + become: true + tasks: + - include_tasks: ../tasks/rke2/get_registration_token.yml + +- name: Add node-token to other control nodes and agent nodes configuration + hosts: control_nodes:agent_nodes + become: true + tasks: + - include_tasks: ../tasks/rke2/add_token_to_config.yml + +- name: Install and run rke2-server.service on rest of the control nodes + hosts: control_nodes + become: true + tasks: + - include_tasks: ../tasks/rke2/install_other_servers.yml + +- name: Install and run rke2-agent.service on agent nodes + hosts: agent_nodes + become: true + tasks: + - include_tasks: ../tasks/rke2/install_agents.yml + +- name: Make kubectl executable available for ansible_user + hosts: control_nodes + become: true + tasks: + - include_tasks: ../tasks/rke2/provide_kubectl.yml + +- name: Deploy k8s secrets + hosts: control_nodes + become: true + tasks: + - include_tasks: ../tasks/rke2/deploy_secrets.yml + +- name: Install metallb + hosts: control_nodes + tasks: + - include_tasks: ../tasks/rke2/install_metallb.yml + +- name: Install SC4S helm repo + hosts: control_nodes + tasks: + - include_tasks: ../tasks/rke2/install_helm_repo.yml + +- name: Deploy SC4S app + hosts: control_nodes + tasks: + - include_tasks: ../tasks/rke2/deploy_app.yml + + diff --git a/ansible/resources/metallb-config.yaml b/ansible/resources/metallb-config.yaml new file mode 100644 index 0000000000..6adde98b1a --- /dev/null +++ b/ansible/resources/metallb-config.yaml @@ -0,0 +1,15 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + namespace: metallb + name: my-ip-pool +spec: + addresses: + # Configure address pool for metallb + #- 1.2.3.4/32 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + namespace: metallb + name: l2-advertisement diff --git a/ansible/tasks/rke2/add_token_to_config.yml b/ansible/tasks/rke2/add_token_to_config.yml new file mode 100644 index 0000000000..b002a84e29 --- /dev/null +++ b/ansible/tasks/rke2/add_token_to_config.yml @@ -0,0 +1,8 @@ +--- +- name: Add rke2 token to config + lineinfile: + path: /etc/rancher/rke2/config.yaml + regexp: '^token:' + line: "token: {{ hostvars['token_node'].rke2_token }}" + create: yes + when: inventory_hostname != "token_node" \ No newline at end of file diff --git a/ansible/tasks/rke2/copy_config.yml b/ansible/tasks/rke2/copy_config.yml new file mode 100644 index 0000000000..ab580383d1 --- /dev/null +++ b/ansible/tasks/rke2/copy_config.yml @@ -0,0 +1,14 @@ +--- +- name: Create /etc/rancher/rke2 directory + file: + path: /etc/rancher/rke2 + state: directory + mode: u=rw,g=rw,o=r + +- name: Copy the configuration file to the remote location + copy: + src: "{{ config_file }}" + dest: /etc/rancher/rke2/config.yaml + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: u=rw,g=rw,o=r \ No newline at end of file diff --git a/ansible/tasks/rke2/deploy_app.yml b/ansible/tasks/rke2/deploy_app.yml new file mode 100644 index 0000000000..506062fbb7 --- /dev/null +++ b/ansible/tasks/rke2/deploy_app.yml @@ -0,0 +1,21 @@ +--- +- name: Copying values.yml file on the server + copy: + src: /opt/charts/splunk-connect-for-syslog/values.yaml + dest: "/home/{{ ansible_user }}/values.yaml" + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: u=rw,g=rw,o=r + +- name: Deploy app or update it with new values if already deployed + block: + - name: Deploy sc4s app from templates with overwrites from values.yml + ansible.builtin.shell: helm install sc4s splunk-connect-for-syslog/splunk-connect-for-syslog -f values.yaml + args: + chdir: "/home/{{ ansible_user }}/" + rescue: + - name: Update app with new values.yml + ansible.builtin.shell: helm upgrade sc4s splunk-connect-for-syslog/splunk-connect-for-syslog -f values.yaml + args: + chdir: "/home/{{ ansible_user }}/" + when: inventory_hostname == "token_node" \ No newline at end of file diff --git a/ansible/tasks/rke2/deploy_secrets.yml b/ansible/tasks/rke2/deploy_secrets.yml new file mode 100644 index 0000000000..ed71e4a233 --- /dev/null +++ b/ansible/tasks/rke2/deploy_secrets.yml @@ -0,0 +1,29 @@ +--- +- name: Load k8s secrets + include_vars: + file: "{{ item }}" + with_first_found: + - files: + - /opt/ansible/resources/k8s_secrets.yaml + - /opt/charts/splunk-connect-for-syslog/secrets.yaml + +- name: Export kubectl bin path + shell: export PATH=$PATH:/var/lib/rancher/rke2/bin/ + +- name: Create mTLS secret + ansible.builtin.shell: | + /var/lib/rancher/rke2/bin/kubectl apply -f - < - index=* sc4s_container=$sc4s_instance$ + | tstats count where index=* sc4s_container=$sc4s_instance$ by index _time $time_range.earliest$ $time_range.latest$ @@ -43,8 +43,8 @@ - rt-15m - rt + -15m + now @@ -220,6 +220,7 @@ + @@ -310,7 +311,7 @@ Total volume of actual syslog traffic delivered by this SC4S instance to Splunk - | stats count + | stats sum(count) @@ -318,6 +319,7 @@ + @@ -336,7 +338,7 @@ Distributions of events by index - | stats count by index + | stats sum(count) as count by index @@ -366,6 +368,7 @@ + @@ -375,7 +378,7 @@ Trends of events by index - | chart sparkline(count) AS "Indexes Trend" count AS Total BY index + | stats sparkline(sum(count)) as "Indexes Trend" sum(count) as Total by index @@ -393,7 +396,7 @@ - index=* sc4s_container=$sc4s_instance$ | eval tags=split(sc4s_tags,"|") | mvexpand tags | search tags=".app.*" | timechart count by tags + | tstats count where index=* sc4s_container=$sc4s_instance$ by sc4s_tags _time | eval tags=split(sc4s_tags,"|") | mvexpand tags | search tags=".app.*" | timechart sum(count) by tags $time_range.earliest$ $time_range.latest$ @@ -439,7 +442,7 @@
- index=* sc4s_container=$sc4s_instance$ | eval tags=split(sc4s_tags,"|") | mvexpand tags | chart count by tags + | tstats count where index=* sc4s_container=$sc4s_instance$ by sc4s_tags _time | eval tags=split(sc4s_tags,"|") | mvexpand tags | stats sum(count) as eventCount by tags | sort - eventCount $time_range.earliest$ $time_range.latest$ @@ -449,4 +452,4 @@
- \ No newline at end of file + diff --git a/docs/gettingstarted/ansible-rke2.md b/docs/gettingstarted/ansible-rke2.md new file mode 100644 index 0000000000..cee9f6ec97 --- /dev/null +++ b/docs/gettingstarted/ansible-rke2.md @@ -0,0 +1,117 @@ +# Rancher Kubernetes Engine 2 + +SC4S can be deployed on Rancher Kubernetes Engine 2 (RKE). Provisioning of the infrastructure can be automated using +Ansible. This instruction assumes knowledge of RKE2 and assumes that [helm](https://helm.sh/) is installed on all control nodes. + +## Step 1: Prepare your initial configuration + +1. Before you run SC4S with Ansible, update `values.yaml` with your Splunk endpoint and HEC token. +You can find the [values.yaml file here](https://github.com/splunk/splunk-connect-for-syslog/blob/main/charts/splunk-connect-for-syslog/values.yaml). + +2. In the inventory file, provide a list of hosts on which you want to run your cluster and the host application. In case of single node deployment inventory should look like this: +``` yaml +--8<---- "ansible/inventory/inventory_rke2.yml" +``` + +- `token_node` is the host name which must be kept unchanged. In case of high availability cluster this node is used to generate registration token. For single node deployment this feature is not used, but the `token_node` hostname is referenced in the ansible playbook. + +- `ansible_host` - address of the node + +- `config_file` - absolute path to the RKE2 configuration file. In most basic configuration this can be empty `.yaml` file. Different configuration variables can be found in [RKE2 documentation](https://docs.rke2.io/). + +3. Alternatively, you can spin up a high-availability cluster. Variables for all hosts are the same as for single node deployment with one difference, that ansible host names for other nodes apart from `token_node` can be custom: +``` yaml +--8<---- "ansible/inventory/inventory_rke2_ha.yml" +``` + +For high-availability cluster some basic configuration is required inside `config_file`. Examples below show how these files should be structures, more details can be found in [RKE2 documentation](https://docs.rke2.io/install/ha): + +- `token_node` configuration: +``` yaml +# The following configuration is needed only when multiple control nodes are installed inside the cluster. +tls-san: + - my-kubernetes-domain.com +``` + +- Other control nodes configuration: +``` yaml +# 'token' variable should be left empty. It will be updated automatically by ansible playbook. +server: https://my-kubernetes-domain.com:9345 +token: +tls-san: + - my-kubernetes-domain.com +``` + +- Agent nodes configuration: +``` yaml +# 'token' variable should be left empty. It will be updated automatically by ansible playbook. +server: https://my-kubernetes-domain.com:9345 +token: +``` + +4. Configure address pool used by Metallb in [ansible/resources/metallb-config.yaml](https://github.com/splunk/splunk-connect-for-syslog/blob/main/ansible/resources/metallb-config.yaml) file. + +## Step 2: Deploy SC4S on your configuration +1. If you have Ansible installed on your host, run the Ansible playbook to deploy SC4S. Otherwise, use the Docker Ansible image provided in the package: +```bash +# From repository root +docker-compose -f ansible/docker-compose.yml build +docker-compose -f ansible/docker-compose.yml up -d +docker exec -it ansible_sc4s /bin/bash +``` +2. If you used the Docker Ansible image, then from your container remote shell, authenticate to and run the MicroK8s playbook. + +* To authenticate with username and password: +``` bash +ansible-playbook -i path/to/inventory_rke2.yaml -u --ask-pass path/to/playbooks/rke2.yml +``` + +* To authenitcate if you are running a high-availability cluster: +``` bash +ansible-playbook -i path/to/inventory_rke2_ha.yaml -u --ask-pass path/to/playbooks/rke2.yml +``` + +* To authenticate using a key pair: +``` bash +ansible-playbook -i path/to/inventory_rke2.yaml -u --key-file path/to/playbooks/rke2.yml +``` + +## Step 3: Validate your configuration + +SC4S performs checks to ensure that the container starts properly and that the syntax of the underlying syslog-ng +configuration is correct. Once the checks are complete, validate that SC4S properly communicates with Splunk. To do this, execute the following search in Splunk: + +```ini +index=* sourcetype=sc4s:events "starting up" +``` + +This should yield an event similar to the following: + +```ini +syslog-ng starting up; version='3.28.1' +``` + +You can verify whether all services in the cluster work by checking the ```sc4s_container``` in Splunk. Each service should have a different container ID. All other fields should be the same. + +The startup process should proceed normally without syntax errors. If it does not, +follow the steps below before proceeding to deeper-level troubleshooting: + +1. Verify that the URL, token, and TLS/SSL settings are correct, and that the appropriate firewall ports are open (8088 or 443). +2. Verify that your indexes are created in Splunk, and that your token has access to them. +3. If you are using a load balancer, verify that it is operating properly. +4. Execute the following command to check the SC4S startup process running in the container. + +```bash +kubectl get pods +kubectl logs +``` + +You should see events similar to those below in the output: + +```ini +SC4S_ENV_CHECK_HEC: Splunk HEC connection test successful to index=main for sourcetype=sc4s:fallback... +SC4S_ENV_CHECK_HEC: Splunk HEC connection test successful to index=main for sourcetype=sc4s:events... +syslog-ng checking config +sc4s version=v1.36.0 +starting syslog-ng +``` diff --git a/docs/sources/vendor/Cisco/cisco_asa.md b/docs/sources/vendor/Cisco/cisco_asa.md index adf03eab8c..8cafb37184 100644 --- a/docs/sources/vendor/Cisco/cisco_asa.md +++ b/docs/sources/vendor/Cisco/cisco_asa.md @@ -11,7 +11,6 @@ | Ref | Link | |----------------|---------------------------------------------------------------------------------------------------------| | Splunk Add-on for ASA (No long supports FWSM and PIX) | | -| Cisco eStreamer for Splunk | | | Product Manual | | ## Sourcetypes diff --git a/mkdocs.yml b/mkdocs.yml index 54f426b628..cefe30386e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -52,6 +52,7 @@ nav: - Docker Swarm: "gettingstarted/ansible-docker-swarm.md" - Podman/Docker: "gettingstarted/ansible-docker-podman.md" - mk8s: "gettingstarted/ansible-mk8s.md" + - RKE2: "gettingstarted/ansible-rke2.md" - Create a parser: "create-parser.md" - Configuration: "configuration.md" - Destinations: "destinations.md" diff --git a/package/etc/conf.d/conflib/post-filter/app-postfilter-cisco_ise.conf b/package/etc/conf.d/conflib/post-filter/app-postfilter-cisco_ise.conf index d47f3ce445..640705d906 100644 --- a/package/etc/conf.d/conflib/post-filter/app-postfilter-cisco_ise.conf +++ b/package/etc/conf.d/conflib/post-filter/app-postfilter-cisco_ise.conf @@ -57,7 +57,8 @@ block parser app-postfilter-cisco_ise() { application app-postfilter-cisco_ise[sc4s-finalfilter] { filter { program('CISE_' type(string) flags(prefix)) - and "${.values.num}" != 1; + and "${.values.num}" != 1 + and not program('CISE_Alarm'); }; parser { app-postfilter-cisco_ise(); }; }; diff --git a/package/etc/conf.d/conflib/syslog/app-syslog-cisco_ise.conf b/package/etc/conf.d/conflib/syslog/app-syslog-cisco_ise.conf index d4f2ff2079..ebdb9136bd 100644 --- a/package/etc/conf.d/conflib/syslog/app-syslog-cisco_ise.conf +++ b/package/etc/conf.d/conflib/syslog/app-syslog-cisco_ise.conf @@ -18,6 +18,27 @@ parser ise_event_time { block parser app-syslog-cisco_ise() { channel { + + if { + parser { + regexp-parser( + template("${MESSAGE}") + patterns("^(?\\d{2}) (?\\d{2}:\\d{2}:\\d{2}) (?[^ ]+) (?[^ ]+) (?.*)") + prefix(".parsed.") + ); + + date-parser-nofilter( + format('%b %d %H:%M:%S') + template("${PROGRAM} ${.parsed.real_day} ${.parsed.real_hour}") + ); + }; + rewrite { + set("${.parsed.real_host}" value("HOST")); + set("${.parsed.real_program}" value("PROGRAM")); + set("${.parsed.rest_of_message}" value("MESSAGE")); + }; + }; + parser { csv-parser( columns(serial, num, seq, message) @@ -44,13 +65,13 @@ block parser app-syslog-cisco_ise() { product('ise') ); }; - - - }; + }; }; + application app-syslog-cisco_ise[sc4s-syslog-pgm] { filter { - program('CISE_' type(string) flags(prefix)); + program('CISE_' type(string) flags(prefix)) + or message('CISE_' type(string) flags(substring)); }; parser { app-syslog-cisco_ise(); }; }; diff --git a/package/lite/etc/addons/cisco/app-postfilter-cisco_ise.conf b/package/lite/etc/addons/cisco/app-postfilter-cisco_ise.conf index d47f3ce445..640705d906 100644 --- a/package/lite/etc/addons/cisco/app-postfilter-cisco_ise.conf +++ b/package/lite/etc/addons/cisco/app-postfilter-cisco_ise.conf @@ -57,7 +57,8 @@ block parser app-postfilter-cisco_ise() { application app-postfilter-cisco_ise[sc4s-finalfilter] { filter { program('CISE_' type(string) flags(prefix)) - and "${.values.num}" != 1; + and "${.values.num}" != 1 + and not program('CISE_Alarm'); }; parser { app-postfilter-cisco_ise(); }; }; diff --git a/package/lite/etc/addons/cisco/app-syslog-cisco_ise.conf b/package/lite/etc/addons/cisco/app-syslog-cisco_ise.conf index d4f2ff2079..ebdb9136bd 100644 --- a/package/lite/etc/addons/cisco/app-syslog-cisco_ise.conf +++ b/package/lite/etc/addons/cisco/app-syslog-cisco_ise.conf @@ -18,6 +18,27 @@ parser ise_event_time { block parser app-syslog-cisco_ise() { channel { + + if { + parser { + regexp-parser( + template("${MESSAGE}") + patterns("^(?\\d{2}) (?\\d{2}:\\d{2}:\\d{2}) (?[^ ]+) (?[^ ]+) (?.*)") + prefix(".parsed.") + ); + + date-parser-nofilter( + format('%b %d %H:%M:%S') + template("${PROGRAM} ${.parsed.real_day} ${.parsed.real_hour}") + ); + }; + rewrite { + set("${.parsed.real_host}" value("HOST")); + set("${.parsed.real_program}" value("PROGRAM")); + set("${.parsed.rest_of_message}" value("MESSAGE")); + }; + }; + parser { csv-parser( columns(serial, num, seq, message) @@ -44,13 +65,13 @@ block parser app-syslog-cisco_ise() { product('ise') ); }; - - - }; + }; }; + application app-syslog-cisco_ise[sc4s-syslog-pgm] { filter { - program('CISE_' type(string) flags(prefix)); + program('CISE_' type(string) flags(prefix)) + or message('CISE_' type(string) flags(substring)); }; parser { app-syslog-cisco_ise(); }; }; diff --git a/poetry.lock b/poetry.lock index b057aa3a3c..def4dd7cec 100644 --- a/poetry.lock +++ b/poetry.lock @@ -680,13 +680,13 @@ mkdocs = ">=1.0.4" [[package]] name = "mkdocs-material" -version = "9.5.41" +version = "9.5.42" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.41-py3-none-any.whl", hash = "sha256:990bc138c33342b5b73e7545915ebc0136e501bfbd8e365735144f5120891d83"}, - {file = "mkdocs_material-9.5.41.tar.gz", hash = "sha256:30fa5d459b4b8130848ecd8e1c908878345d9d8268f7ddbc31eebe88d462d97b"}, + {file = "mkdocs_material-9.5.42-py3-none-any.whl", hash = "sha256:452a7c5d21284b373f36b981a2cbebfff59263feebeede1bc28652e9c5bbe316"}, + {file = "mkdocs_material-9.5.42.tar.gz", hash = "sha256:92779b5e9b5934540c574c11647131d217dc540dce72b05feeda088c8eb1b8f2"}, ] [package.dependencies] diff --git a/tests/test_cisco_ise.py b/tests/test_cisco_ise.py index 8958efb963..2b4ee45819 100644 --- a/tests/test_cisco_ise.py +++ b/tests/test_cisco_ise.py @@ -207,7 +207,7 @@ def test_cisco_ise_cise_alarm_single( sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) st = env.from_string( - 'search index=netauth host="{{ host }}" sourcetype="cisco:ise:syslog" "Server=10.0.0.5"' + 'search index=netauth host="{{ host }}" sourcetype="cisco:ise:syslog" "CISE_Alarm WARN: RADIUS Authentication Request dropped : Server=10.0.0.5;"' ) search = st.render(epoch=epoch, host=host) @@ -218,3 +218,102 @@ def test_cisco_ise_cise_alarm_single( record_property("message", message) assert result_count == 1 + +@pytest.mark.addons("cisco") +def test_cisco_ise_double_timestamp_and_hostname( + record_property, setup_splunk, setup_sc4s +): + host = f"{shortuuid.ShortUUID().random(length=5).lower()}-{shortuuid.ShortUUID().random(length=5).lower()}" + + dt = datetime.datetime.now() + _, bsd, time, date, tzoffset, _, epoch = time_operations(dt) + + # Tune time functions for Cisco ISE + time = time[:-3] + tzoffset = tzoffset[0:3] + ":" + tzoffset[3:] + epoch = epoch[:-3] + + mt = env.from_string( + "{{ mark }}{{ bsd }} wrong_host {{ bsd }} {{ host }} CISE_System_Statistics 0000001313 1 4 2020-01-01 10:00:00.000000 +00:00 0000015291 70501 NOTICE System-Stats: ISE Counters, ConfigVersionId=1, OperationCounters=Counter=1_LocalEndPointReads:1]\n" + ) + message = mt.render( + mark="<165>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset + ) + sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) + + st = env.from_string( + 'search index=netauth host="{{ host }}" sourcetype="cisco:ise:syslog" "CISE_System_Statistics: 0000001313 1 4 2020-01-01 10:00:00.000000"' + ) + search = st.render(epoch=epoch, host=host) + + result_count, _ = splunk_single(setup_splunk, search) + + record_property("host", host) + record_property("resultCount", result_count) + record_property("message", message) + + assert result_count == 1 + +@pytest.mark.addons("cisco") +def test_cisco_ise_double_timestamp_and_hostname_sequence_eq_0( + record_property, setup_splunk, setup_sc4s +): + host = f"{shortuuid.ShortUUID().random(length=5).lower()}-{shortuuid.ShortUUID().random(length=5).lower()}" + + dt = datetime.datetime.now() + _, bsd, time, date, tzoffset, _, epoch = time_operations(dt) + + # Tune time functions for Cisco ISE + time = time[:-3] + tzoffset = tzoffset[0:3] + ":" + tzoffset[3:] + epoch = epoch[:-3] + + mt = env.from_string( + "{{ mark }}{{ bsd }} wrong_host {{ bsd }} {{ host }} CISE_System_Statistics 0000001313 4 0 {{ date }} {{ time }} {{ tzoffset }} 0000015291 70501 NOTICE System-Stats: part one,\n" + ) + + message = mt.render( + mark="<165>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset + ) + sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) + + # Generate new datetime for subsequent messages; not used in log path parser so actually could be anything + dt = datetime.datetime.now() + datetime.timedelta(seconds=1) + bsd = dt.strftime("%b %d %H:%M:%S") + + mt = env.from_string( + "{{ mark }}{{ bsd }} wrong_host {{ bsd }} {{ host }} CISE_System_Statistics 0000001313 4 1 part two,\n" + ) + message = mt.render( + mark="<111>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset + ) + sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) + + mt = env.from_string( + "{{ mark }}{{ bsd }} wrong_host {{ bsd }} {{ host }} CISE_System_Statistics 0000001313 4 2 part three,\n" + ) + message = mt.render( + mark="<111>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset + ) + sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) + + mt = env.from_string( + "{{ mark }}{{ bsd }} wrong_host {{ bsd }} {{ host }} CISE_System_Statistics 0000001313 4 3 part four,\n" + ) + message = mt.render( + mark="<111>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset + ) + sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) + + st = env.from_string( + 'search _time={{ epoch }} index=netauth host="{{ host }}" sourcetype="cisco:ise:syslog" one two three four' + ) + search = st.render(epoch=epoch, host=host) + + result_count, _ = splunk_single(setup_splunk, search) + + record_property("host", host) + record_property("resultCount", result_count) + record_property("message", message) + + assert result_count == 1 \ No newline at end of file