Index: pom.xml
===================================================================
diff -u -r2c57e5c1575442a8cd43c1e1739986728badd369 -r65f92f6c8c69e7878c00096760348b55997ed1b1
--- pom.xml (.../pom.xml) (revision 2c57e5c1575442a8cd43c1e1739986728badd369)
+++ pom.xml (.../pom.xml) (revision 65f92f6c8c69e7878c00096760348b55997ed1b1)
@@ -10,7 +10,7 @@
4.0.0
313devgrp
java-service-tree-framework-backend-server
- 22.06.22
+ 22.06.23
pom
java-service-tree-framework
Index: web-module/Dockerfile
===================================================================
diff -u -r442095cf5926d01f32d63fb79595212baa2b2019 -r65f92f6c8c69e7878c00096760348b55997ed1b1
--- web-module/Dockerfile (.../Dockerfile) (revision 442095cf5926d01f32d63fb79595212baa2b2019)
+++ web-module/Dockerfile (.../Dockerfile) (revision 65f92f6c8c69e7878c00096760348b55997ed1b1)
@@ -10,23 +10,23 @@
RUN wget http://www.313.co.kr/nexus/content/repositories/StandardProject/313devgrp/packetbeat/7.4.2-linux/packetbeat-7.4.2-linux-x86_64.tar.gz
RUN tar zxvf packetbeat-7.4.2-linux-x86_64.tar.gz
-ADD script/yml/monitoring/client/packetbeat.yml ./packetbeat-7.4.2-linux-x86_64/packetbeat.yml
+ADD src/main/resources/script/yml/elfk/client/packetbeat.yml ./packetbeat-7.4.2-linux-x86_64/packetbeat.yml
RUN wget http://www.313.co.kr/nexus/content/repositories/StandardProject/313devgrp/topbeat/1.3.1/topbeat-1.3.1-x86_64.tar.gz
RUN tar zxvf topbeat-1.3.1-x86_64.tar.gz
-ADD script/yml/monitoring/client/topbeat.yml ./topbeat-1.3.1-x86_64/topbeat.yml
+ADD src/main/resources/script/yml/monitoring/client/topbeat.yml ./topbeat-1.3.1-x86_64/topbeat.yml
RUN wget http://www.313.co.kr/nexus/content/repositories/StandardProject/313devgrp/metricbeat/7.4.2-linux/metricbeat-7.4.2-linux-x86_64.tar.gz
RUN tar zxvf metricbeat-7.4.2-linux-x86_64.tar.gz
-ADD script/yml/monitoring/client/metricbeat.yml ./metricbeat-7.4.2-linux-x86_64/metricbeat.yml
+ADD src/main/resources/script/yml/monitoring/client/metricbeat.yml ./metricbeat-7.4.2-linux-x86_64/metricbeat.yml
RUN wget http://www.313.co.kr/nexus/content/repositories/StandardProject/313devgrp/heartbeat/7.4.2-linux/heartbeat-7.4.2-linux-x86_64.tar.gz
RUN tar zxvf heartbeat-7.4.2-linux-x86_64.tar.gz
-ADD script/yml/monitoring/client/heartbeat.yml ./heartbeat-7.4.2-linux-x86_64/heartbeat.yml
+ADD src/main/resources/script/yml/monitoring/client/heartbeat.yml ./heartbeat-7.4.2-linux-x86_64/heartbeat.yml
RUN wget http://www.313.co.kr/nexus/content/repositories/StandardProject/313devgrp/filebeat/7.4.2-linux/filebeat-7.4.2-linux-x86_64.tar.gz
RUN tar zxvf filebeat-7.4.2-linux-x86_64.tar.gz
-ADD script/yml/monitoring/client/filebeat.yml ./filebeat-7.4.2-linux-x86_64/filebeat.yml
+ADD src/main/resources/script/yml/monitoring/client/filebeat.yml ./filebeat-7.4.2-linux-x86_64/filebeat.yml
RUN wget http://www.313.co.kr/nexus/content/repositories/StandardProject/313devgrp/elastic-apm-agent/1.18.1/elastic-apm-agent-1.18.1.jar
RUN mv elastic-apm-agent-1.18.1.jar /usr/local/tomcat/lib/elastic-apm-agent.jar
Index: web-module/pom.xml
===================================================================
diff -u -r2c57e5c1575442a8cd43c1e1739986728badd369 -r65f92f6c8c69e7878c00096760348b55997ed1b1
--- web-module/pom.xml (.../pom.xml) (revision 2c57e5c1575442a8cd43c1e1739986728badd369)
+++ web-module/pom.xml (.../pom.xml) (revision 65f92f6c8c69e7878c00096760348b55997ed1b1)
@@ -7,7 +7,7 @@
313devgrp
java-service-tree-framework-backend-server
- 22.06.22
+ 22.06.23
../pom.xml
Index: web-module/src/main/resources/script/yml/elfk/client/filebeat.yml
===================================================================
diff -u
--- web-module/src/main/resources/script/yml/elfk/client/filebeat.yml (revision 0)
+++ web-module/src/main/resources/script/yml/elfk/client/filebeat.yml (revision 65f92f6c8c69e7878c00096760348b55997ed1b1)
@@ -0,0 +1,217 @@
+###################### Filebeat Configuration Example #########################
+
+# This file is an example configuration file highlighting only the most common
+# options. The filebeat.reference.yml file from the same directory contains all the
+# supported options with more comments. You can use it as a reference.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/filebeat/index.html
+
+# For more available modules and options, please see the filebeat.reference.yml sample
+# configuration file.
+
+#=========================== Filebeat inputs =============================
+
+filebeat.inputs:
+
+ # Each - is an input. Most options can be set at the input level, so
+ # you can use different inputs for various configurations.
+ # Below are the input specific configurations.
+
+ - type: log
+
+ # Change to true to enable this input configuration.
+ enabled: true
+
+ # Paths that should be crawled and fetched. Glob based paths.
+ paths:
+ - /usr/local/tomcat/logs/*.log
+ - /usr/local/tomcat/logs/*.txt
+ #- c:\programdata\elasticsearch\logs\*
+
+ # Exclude lines. A list of regular expressions to match. It drops the lines that are
+ # matching any regular expression from the list.
+ #exclude_lines: ['^DBG']
+
+ # Include lines. A list of regular expressions to match. It exports the lines that are
+ # matching any regular expression from the list.
+ #include_lines: ['^ERR', '^WARN']
+
+ # Exclude files. A list of regular expressions to match. Filebeat drops the files that
+ # are matching any regular expression from the list. By default, no files are dropped.
+ #exclude_files: ['.gz$']
+
+ # Optional additional fields. These fields can be freely picked
+ # to add additional information to the crawled log files for filtering
+ #fields:
+ # level: debug
+ # review: 1
+
+ ### Multiline options
+
+ # Multiline can be used for log messages spanning multiple lines. This is common
+ # for Java Stack Traces or C-Line Continuation
+
+ # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
+ #multiline.pattern: ^\[
+
+ # Defines if the pattern set under pattern should be negated or not. Default is false.
+ #multiline.negate: false
+
+ # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
+ # that was (not) matched before or after or as long as a pattern is not matched based on negate.
+ # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
+ #multiline.match: after
+
+
+#============================= Filebeat modules ===============================
+
+filebeat.config.modules:
+ # Glob pattern for configuration loading
+ path: ${path.config}/modules.d/*.yml
+
+ # Set to true to enable config reloading
+ reload.enabled: false
+
+ # Period on which files under path should be checked for changes
+ #reload.period: 10s
+
+#==================== Elasticsearch template setting ==========================
+
+setup.template.settings:
+ index.number_of_shards: 1
+ #index.codec: best_compression
+ #_source.enabled: false
+
+#================================ General =====================================
+
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published.
+#tags: ["service-X", "web-tier"]
+
+# Optional fields that you can specify to add additional information to the
+# output.
+#fields:
+# env: staging
+
+
+#============================== Dashboards =====================================
+# These settings control loading the sample dashboards to the Kibana index. Loading
+# the dashboards is disabled by default and can be enabled either by setting the
+# options here or by using the `setup` command.
+#setup.dashboards.enabled: false
+
+# The URL from where to download the dashboards archive. By default this URL
+# has a value which is computed based on the Beat name and version. For released
+# versions, this URL points to the dashboard archive on the artifacts.elastic.co
+# website.
+#setup.dashboards.url:
+
+#============================== Kibana =====================================
+
+# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
+# This requires a Kibana endpoint configuration.
+setup.kibana:
+
+# Kibana Host
+# Scheme and port can be left out and will be set to the default (http and 5601)
+# In case you specify and additional path, the scheme is required: http://localhost:5601/path
+# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
+#host: "localhost:5601"
+
+# Kibana Space ID
+# ID of the Kibana Space into which the dashboards should be loaded. By default,
+# the Default Space will be used.
+#space.id:
+
+#============================= Elastic Cloud ==================================
+
+# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
+# `setup.kibana.host` options.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `:`.
+#cloud.auth:
+
+#================================ Outputs =====================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+#-------------------------- Elasticsearch output ------------------------------
+#output.elasticsearch:
+# Array of hosts to connect to.
+#hosts: ["localhost:9200"]
+
+# Optional protocol and basic auth credentials.
+#protocol: "https"
+#username: "elastic"
+#password: "changeme"
+
+#----------------------------- Logstash output --------------------------------
+output.logstash:
+ # The Logstash hosts
+ hosts: ["192.168.25.46:5044"]
+
+ # Optional SSL. By default is off.
+ # List of root certificates for HTTPS server verifications
+ #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+ # Certificate for SSL client authentication
+ #ssl.certificate: "/etc/pki/client/cert.pem"
+
+ # Client Certificate Key
+ #ssl.key: "/etc/pki/client/cert.key"
+
+#================================ Processors =====================================
+
+# Configure processors to enhance or manipulate events generated by the beat.
+
+processors:
+ - add_host_metadata: ~
+ - add_cloud_metadata: ~
+
+#================================ Logging =====================================
+
+# Sets log level. The default log level is info.
+# Available log levels are: error, warning, info, debug
+#logging.level: debug
+
+# At debug level, you can selectively enable logging only for some components.
+# To enable all selectors use ["*"]. Examples of other selectors are "beat",
+# "publish", "service".
+#logging.selectors: ["*"]
+
+#============================== X-Pack Monitoring ===============================
+# filebeat can export internal metrics to a central Elasticsearch monitoring
+# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
+# reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#monitoring.enabled: false
+
+# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
+# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
+# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
+#monitoring.cluster_uuid:
+
+# Uncomment to send the metrics to Elasticsearch. Most settings from the
+# Elasticsearch output are accepted here as well.
+# Note that the settings should point to your Elasticsearch *monitoring* cluster.
+# Any setting that is not set is automatically inherited from the Elasticsearch
+# output configuration, so if you have the Elasticsearch output configured such
+# that it is pointing to your Elasticsearch monitoring cluster, you can simply
+# uncomment the following line.
+#monitoring.elasticsearch:
+
+#================================= Migration ==================================
+
+# This allows to enable 6.7 migration aliases
+#migration.6_to_7.enabled: true
\ No newline at end of file
Index: web-module/src/main/resources/script/yml/elfk/client/heartbeat.yml
===================================================================
diff -u
--- web-module/src/main/resources/script/yml/elfk/client/heartbeat.yml (revision 0)
+++ web-module/src/main/resources/script/yml/elfk/client/heartbeat.yml (revision 65f92f6c8c69e7878c00096760348b55997ed1b1)
@@ -0,0 +1,165 @@
+################### Heartbeat Configuration Example #########################
+
+# This file is an example configuration file highlighting only some common options.
+# The heartbeat.reference.yml file in the same directory contains all the supported options
+# with detailed comments. You can use it for reference.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/heartbeat/index.html
+
+############################# Heartbeat ######################################
+
+# Define a directory to load monitor definitions from. Definitions take the form
+# of individual yaml files.
+heartbeat.config.monitors:
+ # Directory + glob pattern to search for configuration files
+ path: ${path.config}/monitors.d/*.yml
+ # If enabled, heartbeat will periodically check the config.monitors path for changes
+ reload.enabled: false
+ # How often to check for changes
+ reload.period: 5s
+
+# Configure monitors inline
+heartbeat.monitors:
+ - type: http
+
+ # List or urls to query
+ urls: ["http://www.313.co.kr/nas","http://www.313.co.kr/nexus"]
+
+ # Configure task schedule
+ schedule: '@every 10s'
+
+ # Total test connection and data exchange timeout
+ #timeout: 16s
+
+#==================== Elasticsearch template setting ==========================
+
+setup.template.settings:
+ index.number_of_shards: 1
+ index.codec: best_compression
+ #_source.enabled: false
+
+#================================ General =====================================
+
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published.
+#tags: ["service-X", "web-tier"]
+
+# Optional fields that you can specify to add additional information to the
+# output.
+#fields:
+# env: staging
+
+
+#============================== Kibana =====================================
+
+# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
+# This requires a Kibana endpoint configuration.
+setup.kibana:
+
+# Kibana Host
+# Scheme and port can be left out and will be set to the default (http and 5601)
+# In case you specify and additional path, the scheme is required: http://localhost:5601/path
+# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
+#host: "localhost:5601"
+
+# Kibana Space ID
+# ID of the Kibana Space into which the dashboards should be loaded. By default,
+# the Default Space will be used.
+#space.id:
+
+#============================= Elastic Cloud ==================================
+
+# These settings simplify using Heartbeat with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
+# `setup.kibana.host` options.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `:`.
+#cloud.auth:
+
+#================================ Outputs =====================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+#-------------------------- Elasticsearch output ------------------------------
+#output.elasticsearch:
+# Array of hosts to connect to.
+#hosts: ["localhost:9200"]
+
+# Optional protocol and basic auth credentials.
+#protocol: "https"
+#username: "elastic"
+#password: "changeme"
+
+#----------------------------- Logstash output --------------------------------
+output.logstash:
+ # The Logstash hosts
+ hosts: ["192.168.25.46:5044"]
+
+ # Optional SSL. By default is off.
+ # List of root certificates for HTTPS server verifications
+ #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+ # Certificate for SSL client authentication
+ #ssl.certificate: "/etc/pki/client/cert.pem"
+
+ # Client Certificate Key
+ #ssl.key: "/etc/pki/client/cert.key"
+
+#================================ Processors =====================================
+
+processors:
+ - add_observer_metadata:
+ # Optional, but recommended geo settings for the location Heartbeat is running in
+ #geo:
+ # Token describing this location
+ #name: us-east-1a
+
+ # Lat, Lon "
+ #location: "37.926868, -78.024902"
+
+#================================ Logging =====================================
+
+# Sets log level. The default log level is info.
+# Available log levels are: error, warning, info, debug
+#logging.level: debug
+
+# At debug level, you can selectively enable logging only for some components.
+# To enable all selectors use ["*"]. Examples of other selectors are "beat",
+# "publish", "service".
+#logging.selectors: ["*"]
+
+#============================== X-Pack Monitoring ===============================
+# heartbeat can export internal metrics to a central Elasticsearch monitoring
+# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
+# reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#monitoring.enabled: false
+
+# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
+# Heartbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
+# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
+#monitoring.cluster_uuid:
+
+# Uncomment to send the metrics to Elasticsearch. Most settings from the
+# Elasticsearch output are accepted here as well.
+# Note that the settings should point to your Elasticsearch *monitoring* cluster.
+# Any setting that is not set is automatically inherited from the Elasticsearch
+# output configuration, so if you have the Elasticsearch output configured such
+# that it is pointing to your Elasticsearch monitoring cluster, you can simply
+# uncomment the following line.
+#monitoring.elasticsearch:
+
+#================================= Migration ==================================
+
+# This allows to enable 6.7 migration aliases
+#migration.6_to_7.enabled: true
Index: web-module/src/main/resources/script/yml/elfk/client/metricbeat.yml
===================================================================
diff -u
--- web-module/src/main/resources/script/yml/elfk/client/metricbeat.yml (revision 0)
+++ web-module/src/main/resources/script/yml/elfk/client/metricbeat.yml (revision 65f92f6c8c69e7878c00096760348b55997ed1b1)
@@ -0,0 +1,160 @@
+###################### Metricbeat Configuration Example #######################
+
+# This file is an example configuration file highlighting only the most common
+# options. The metricbeat.reference.yml file from the same directory contains all the
+# supported options with more comments. You can use it as a reference.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/metricbeat/index.html
+
+#========================== Modules configuration ============================
+
+metricbeat.config.modules:
+ # Glob pattern for configuration loading
+ path: ${path.config}/modules.d/*.yml
+
+ # Set to true to enable config reloading
+ reload.enabled: false
+
+ # Period on which files under path should be checked for changes
+ #reload.period: 10s
+
+#==================== Elasticsearch template setting ==========================
+
+setup.template.settings:
+ index.number_of_shards: 1
+ index.codec: best_compression
+ #_source.enabled: false
+
+#================================ General =====================================
+
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published.
+#tags: ["service-X", "web-tier"]
+
+# Optional fields that you can specify to add additional information to the
+# output.
+#fields:
+# env: staging
+
+
+#============================== Dashboards =====================================
+# These settings control loading the sample dashboards to the Kibana index. Loading
+# the dashboards is disabled by default and can be enabled either by setting the
+# options here or by using the `setup` command.
+#setup.dashboards.enabled: false
+
+# The URL from where to download the dashboards archive. By default this URL
+# has a value which is computed based on the Beat name and version. For released
+# versions, this URL points to the dashboard archive on the artifacts.elastic.co
+# website.
+#setup.dashboards.url:
+
+#============================== Kibana =====================================
+
+# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
+# This requires a Kibana endpoint configuration.
+setup.kibana:
+
+# Kibana Host
+# Scheme and port can be left out and will be set to the default (http and 5601)
+# In case you specify and additional path, the scheme is required: http://localhost:5601/path
+# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
+#host: "localhost:5601"
+
+# Kibana Space ID
+# ID of the Kibana Space into which the dashboards should be loaded. By default,
+# the Default Space will be used.
+#space.id:
+
+#============================= Elastic Cloud ==================================
+
+# These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
+# `setup.kibana.host` options.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `:`.
+#cloud.auth:
+
+#================================ Outputs =====================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+#-------------------------- Elasticsearch output ------------------------------
+#output.elasticsearch:
+# Array of hosts to connect to.
+#hosts: ["localhost:9200"]
+
+# Optional protocol and basic auth credentials.
+#protocol: "https"
+#username: "elastic"
+#password: "changeme"
+
+#----------------------------- Logstash output --------------------------------
+output.logstash:
+ # The Logstash hosts
+ hosts: ["192.168.25.46:5044"]
+
+ # Optional SSL. By default is off.
+ # List of root certificates for HTTPS server verifications
+ #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+ # Certificate for SSL client authentication
+ #ssl.certificate: "/etc/pki/client/cert.pem"
+
+ # Client Certificate Key
+ #ssl.key: "/etc/pki/client/cert.key"
+
+#================================ Processors =====================================
+
+# Configure processors to enhance or manipulate events generated by the beat.
+
+processors:
+ - add_host_metadata: ~
+ - add_cloud_metadata: ~
+
+#================================ Logging =====================================
+
+# Sets log level. The default log level is info.
+# Available log levels are: error, warning, info, debug
+#logging.level: debug
+
+# At debug level, you can selectively enable logging only for some components.
+# To enable all selectors use ["*"]. Examples of other selectors are "beat",
+# "publish", "service".
+#logging.selectors: ["*"]
+
+#============================== X-Pack Monitoring ===============================
+# metricbeat can export internal metrics to a central Elasticsearch monitoring
+# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
+# reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#monitoring.enabled: false
+
+# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
+# Metricbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
+# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
+#monitoring.cluster_uuid:
+
+# Uncomment to send the metrics to Elasticsearch. Most settings from the
+# Elasticsearch output are accepted here as well.
+# Note that the settings should point to your Elasticsearch *monitoring* cluster.
+# Any setting that is not set is automatically inherited from the Elasticsearch
+# output configuration, so if you have the Elasticsearch output configured such
+# that it is pointing to your Elasticsearch monitoring cluster, you can simply
+# uncomment the following line.
+#monitoring.elasticsearch:
+
+#================================= Migration ==================================
+
+# This allows to enable 6.7 migration aliases
+#migration.6_to_7.enabled: true
Index: web-module/src/main/resources/script/yml/elfk/client/packetbeat.yml
===================================================================
diff -u
--- web-module/src/main/resources/script/yml/elfk/client/packetbeat.yml (revision 0)
+++ web-module/src/main/resources/script/yml/elfk/client/packetbeat.yml (revision 65f92f6c8c69e7878c00096760348b55997ed1b1)
@@ -0,0 +1,242 @@
+#################### Packetbeat Configuration Example #########################
+
+# This file is an example configuration file highlighting only the most common
+# options. The packetbeat.reference.yml file from the same directory contains all the
+# supported options with more comments. You can use it as a reference.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/packetbeat/index.html
+
+#============================== Network device ================================
+
+# Select the network interface to sniff the data. On Linux, you can use the
+# "any" keyword to sniff on all connected interfaces.
+packetbeat.interfaces.device: any
+
+#================================== Flows =====================================
+
+# Set `enabled: false` or comment out all options to disable flows reporting.
+packetbeat.flows:
+ # Set network flow timeout. Flow is killed if no packet is received before being
+ # timed out.
+ timeout: 30s
+
+ # Configure reporting period. If set to -1, only killed flows will be reported
+ period: 10s
+
+#========================== Transaction protocols =============================
+
+packetbeat.protocols:
+ - type: icmp
+ # Enable ICMPv4 and ICMPv6 monitoring. Default: false
+ enabled: true
+
+ - type: amqp
+ # Configure the ports where to listen for AMQP traffic. You can disable
+ # the AMQP protocol by commenting out the list of ports.
+ ports: [5672]
+
+ - type: cassandra
+ #Cassandra port for traffic monitoring.
+ ports: [9042]
+
+ - type: dhcpv4
+ # Configure the DHCP for IPv4 ports.
+ ports: [67, 68]
+
+ - type: dns
+ # Configure the ports where to listen for DNS traffic. You can disable
+ # the DNS protocol by commenting out the list of ports.
+ ports: [53]
+
+ - type: http
+ # Configure the ports where to listen for HTTP traffic. You can disable
+ # the HTTP protocol by commenting out the list of ports.
+ ports: [80, 8080, 8000, 5000, 8002]
+
+ - type: memcache
+ # Configure the ports where to listen for memcache traffic. You can disable
+ # the Memcache protocol by commenting out the list of ports.
+ ports: [11211]
+
+ - type: mysql
+ # Configure the ports where to listen for MySQL traffic. You can disable
+ # the MySQL protocol by commenting out the list of ports.
+ ports: [3306,3307]
+
+ - type: pgsql
+ # Configure the ports where to listen for Pgsql traffic. You can disable
+ # the Pgsql protocol by commenting out the list of ports.
+ ports: [5432]
+
+ - type: redis
+ # Configure the ports where to listen for Redis traffic. You can disable
+ # the Redis protocol by commenting out the list of ports.
+ ports: [6379]
+
+ - type: thrift
+ # Configure the ports where to listen for Thrift-RPC traffic. You can disable
+ # the Thrift-RPC protocol by commenting out the list of ports.
+ ports: [9090]
+
+ - type: mongodb
+ # Configure the ports where to listen for MongoDB traffic. You can disable
+ # the MongoDB protocol by commenting out the list of ports.
+ ports: [27017]
+
+ - type: nfs
+ # Configure the ports where to listen for NFS traffic. You can disable
+ # the NFS protocol by commenting out the list of ports.
+ ports: [2049]
+
+ - type: tls
+ # Configure the ports where to listen for TLS traffic. You can disable
+ # the TLS protocol by commenting out the list of ports.
+ ports:
+ - 443 # HTTPS
+ - 993 # IMAPS
+ - 995 # POP3S
+ - 5223 # XMPP over SSL
+ - 8443
+ - 8883 # Secure MQTT
+ - 9243 # Elasticsearch
+
+#==================== Elasticsearch template setting ==========================
+
+setup.template.settings:
+ index.number_of_shards: 1
+ #index.codec: best_compression
+ #_source.enabled: false
+
+#================================ General =====================================
+
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published.
+#tags: ["service-X", "web-tier"]
+
+# Optional fields that you can specify to add additional information to the
+# output.
+#fields:
+# env: staging
+
+
+#============================== Dashboards =====================================
+# These settings control loading the sample dashboards to the Kibana index. Loading
+# the dashboards is disabled by default and can be enabled either by setting the
+# options here or by using the `setup` command.
+#setup.dashboards.enabled: false
+
+# The URL from where to download the dashboards archive. By default this URL
+# has a value which is computed based on the Beat name and version. For released
+# versions, this URL points to the dashboard archive on the artifacts.elastic.co
+# website.
+#setup.dashboards.url:
+
+#============================== Kibana =====================================
+
+# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
+# This requires a Kibana endpoint configuration.
+setup.kibana:
+
+# Kibana Host
+# Scheme and port can be left out and will be set to the default (http and 5601)
+# In case you specify and additional path, the scheme is required: http://localhost:5601/path
+# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
+#host: "localhost:5601"
+
+# Kibana Space ID
+# ID of the Kibana Space into which the dashboards should be loaded. By default,
+# the Default Space will be used.
+#space.id:
+
+#============================= Elastic Cloud ==================================
+
+# These settings simplify using Packetbeat with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
+# `setup.kibana.host` options.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `:`.
+#cloud.auth:
+
+#================================ Outputs =====================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+#-------------------------- Elasticsearch output ------------------------------
+#output.elasticsearch:
+# Array of hosts to connect to.
+#hosts: ["localhost:9200"]
+
+# Optional protocol and basic auth credentials.
+#protocol: "https"
+#username: "elastic"
+#password: "changeme"
+
+#----------------------------- Logstash output --------------------------------
+output.logstash:
+ # The Logstash hosts
+ hosts: ["192.168.25.46:5044"]
+
+ # Optional SSL. By default is off.
+ # List of root certificates for HTTPS server verifications
+ #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+ # Certificate for SSL client authentication
+ #ssl.certificate: "/etc/pki/client/cert.pem"
+
+ # Client Certificate Key
+ #ssl.key: "/etc/pki/client/cert.key"
+
+#================================ Processors =====================================
+
+# Configure processors to enhance or manipulate events generated by the beat.
+
+processors:
+ - add_host_metadata: ~
+ - add_cloud_metadata: ~
+
+#================================ Logging =====================================
+
+# Sets log level. The default log level is info.
+# Available log levels are: error, warning, info, debug
+#logging.level: debug
+
+# At debug level, you can selectively enable logging only for some components.
+# To enable all selectors use ["*"]. Examples of other selectors are "beat",
+# "publish", "service".
+#logging.selectors: ["*"]
+
+#============================== X-Pack Monitoring ===============================
+# packetbeat can export internal metrics to a central Elasticsearch monitoring
+# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
+# reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#monitoring.enabled: false
+
+# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
+# Packetbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
+# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
+#monitoring.cluster_uuid:
+
+# Uncomment to send the metrics to Elasticsearch. Most settings from the
+# Elasticsearch output are accepted here as well.
+# Note that the settings should point to your Elasticsearch *monitoring* cluster.
+# Any setting that is not set is automatically inherited from the Elasticsearch
+# output configuration, so if you have the Elasticsearch output configured such
+# that it is pointing to your Elasticsearch monitoring cluster, you can simply
+# uncomment the following line.
+#monitoring.elasticsearch:
+
+#================================= Migration ==================================
+
+# This allows to enable 6.7 migration aliases
+#migration.6_to_7.enabled: true
Index: web-module/src/main/resources/script/yml/elfk/client/topbeat.yml
===================================================================
diff -u
--- web-module/src/main/resources/script/yml/elfk/client/topbeat.yml (revision 0)
+++ web-module/src/main/resources/script/yml/elfk/client/topbeat.yml (revision 65f92f6c8c69e7878c00096760348b55997ed1b1)
@@ -0,0 +1,280 @@
+################### Topbeat Configuration Example #########################
+
+############################# Input ############################################
+input:
+ # In seconds, defines how often to read server statistics
+ period: 10
+
+ # Regular expression to match the processes that are monitored
+ # By default, all the processes are monitored
+ procs: [".*"]
+
+ # Statistics to collect (all enabled by default)
+ stats:
+ # per system statistics, by default is true
+ system: true
+
+ # per process statistics, by default is true
+ process: true
+
+ # file system information, by default is true
+ filesystem: true
+
+ # cpu usage per core, by default is false
+ cpu_per_core: false
+
+
+###############################################################################
+############################# Libbeat Config ##################################
+# Base config file used by all other beats for using libbeat features
+
+############################# Output ##########################################
+
+# Configure what outputs to use when sending the data collected by the beat.
+# Multiple outputs may be used.
+output:
+
+ ### Elasticsearch as output
+ #elasticsearch:
+ # Array of hosts to connect to.
+ # Scheme and port can be left out and will be set to the default (http and 9200)
+ # In case you specify and additional path, the scheme is required: http://localhost:9200/path
+ # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
+ #hosts: ["localhost:9200"]
+
+ # Optional protocol and basic auth credentials.
+ #protocol: "https"
+ #username: "admin"
+ #password: "s3cr3t"
+
+ # Number of workers per Elasticsearch host.
+ #worker: 1
+
+ # Optional index name. The default is "topbeat" and generates
+ # [topbeat-]YYYY.MM.DD keys.
+ #index: "topbeat"
+
+ # A template is used to set the mapping in Elasticsearch
+ # By default template loading is disabled and no template is loaded.
+ # These settings can be adjusted to load your own template or overwrite existing ones
+ #template:
+
+ # Template name. By default the template name is topbeat.
+ #name: "topbeat"
+
+ # Path to template file
+ #path: "topbeat.template.json"
+
+ # Overwrite existing template
+ #overwrite: false
+
+ # Optional HTTP Path
+ #path: "/elasticsearch"
+
+ # Proxy server url
+ #proxy_url: http://proxy:3128
+
+ # The number of times a particular Elasticsearch index operation is attempted. If
+ # the indexing operation doesn't succeed after this many retries, the events are
+ # dropped. The default is 3.
+ #max_retries: 3
+
+ # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
+ # The default is 50.
+ #bulk_max_size: 50
+
+ # Configure http request timeout before failing an request to Elasticsearch.
+ #timeout: 90
+
+ # The number of seconds to wait for new events between two bulk API index requests.
+ # If `bulk_max_size` is reached before this interval expires, addition bulk index
+ # requests are made.
+ #flush_interval: 1
+
+ # Boolean that sets if the topology is kept in Elasticsearch. The default is
+ # false. This option makes sense only for Packetbeat.
+ #save_topology: false
+
+ # The time to live in seconds for the topology information that is stored in
+ # Elasticsearch. The default is 15 seconds.
+ #topology_expire: 15
+
+ # tls configuration. By default is off.
+ #tls:
+ # List of root certificates for HTTPS server verifications
+ #certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+ # Certificate for TLS client authentication
+ #certificate: "/etc/pki/client/cert.pem"
+
+ # Client Certificate Key
+ #certificate_key: "/etc/pki/client/cert.key"
+
+ # Controls whether the client verifies server certificates and host name.
+ # If insecure is set to true, all server host names and certificates will be
+ # accepted. In this mode TLS based connections are susceptible to
+ # man-in-the-middle attacks. Use only for testing.
+ #insecure: true
+
+ # Configure cipher suites to be used for TLS connections
+ #cipher_suites: []
+
+ # Configure curve types for ECDHE based cipher suites
+ #curve_types: []
+
+ # Configure minimum TLS version allowed for connection to logstash
+ #min_version: 1.0
+
+ # Configure maximum TLS version allowed for connection to logstash
+ #max_version: 1.2
+
+
+ ### Logstash as output
+ logstash:
+ # The Logstash hosts
+ hosts: ["192.168.25.46:5044"]
+
+ # Number of workers per Logstash host.
+ #worker: 1
+
+ # The maximum number of events to bulk into a single batch window. The
+ # default is 2048.
+ #bulk_max_size: 2048
+
+ # Set gzip compression level.
+ #compression_level: 3
+
+ # Optional load balance the events between the Logstash hosts
+ #loadbalance: true
+
+ # Optional index name. The default index name depends on the each beat.
+ # For Packetbeat, the default is set to packetbeat, for Topbeat
+ # top topbeat and for Filebeat to filebeat.
+ #index: topbeat
+
+ # Optional TLS. By default is off.
+ #tls:
+ # List of root certificates for HTTPS server verifications
+ #certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+ # Certificate for TLS client authentication
+ #certificate: "/etc/pki/client/cert.pem"
+
+ # Client Certificate Key
+ #certificate_key: "/etc/pki/client/cert.key"
+
+ # Controls whether the client verifies server certificates and host name.
+ # If insecure is set to true, all server host names and certificates will be
+ # accepted. In this mode TLS based connections are susceptible to
+ # man-in-the-middle attacks. Use only for testing.
+ #insecure: true
+
+ # Configure cipher suites to be used for TLS connections
+ #cipher_suites: []
+
+ # Configure curve types for ECDHE based cipher suites
+ #curve_types: []
+
+
+ ### File as output
+ #file:
+ # Path to the directory where to save the generated files. The option is mandatory.
+ #path: "/tmp/topbeat"
+
+ # Name of the generated files. The default is `topbeat` and it generates files: `topbeat`, `topbeat.1`, `topbeat.2`, etc.
+ #filename: topbeat
+
+ # Maximum size in kilobytes of each file. When this size is reached, the files are
+ # rotated. The default value is 10 MB.
+ #rotate_every_kb: 10000
+
+ # Maximum number of files under path. When this number of files is reached, the
+ # oldest file is deleted and the rest are shifted from last to first. The default
+ # is 7 files.
+ #number_of_files: 7
+
+
+ ### Console output
+ # console:
+ # Pretty print json event
+ #pretty: false
+
+
+############################# Shipper #########################################
+
+shipper:
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+# If this options is not defined, the hostname is used.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published. Tags make it easy to group servers by different
+# logical properties.
+#tags: ["service-X", "web-tier"]
+
+# Uncomment the following if you want to ignore transactions created
+# by the server on which the shipper is installed. This option is useful
+# to remove duplicates if shippers are installed on multiple servers.
+#ignore_outgoing: true
+
+# How often (in seconds) shippers are publishing their IPs to the topology map.
+# The default is 10 seconds.
+#refresh_topology_freq: 10
+
+# Expiration time (in seconds) of the IPs published by a shipper to the topology map.
+# All the IPs will be deleted afterwards. Note, that the value must be higher than
+# refresh_topology_freq. The default is 15 seconds.
+#topology_expire: 15
+
+# Internal queue size for single events in processing pipeline
+#queue_size: 1000
+
+# Configure local GeoIP database support.
+# If no paths are not configured geoip is disabled.
+#geoip:
+#paths:
+# - "/usr/share/GeoIP/GeoLiteCity.dat"
+# - "/usr/local/var/GeoIP/GeoLiteCity.dat"
+
+
+############################# Logging #########################################
+
+# There are three options for the log ouput: syslog, file, stderr.
+# Under Windos systems, the log files are per default sent to the file output,
+# under all other system per default to syslog.
+logging:
+
+ # Send all logging output to syslog. On Windows default is false, otherwise
+ # default is true.
+ #to_syslog: true
+
+ # Write all logging output to files. Beats automatically rotate files if rotateeverybytes
+ # limit is reached.
+ #to_files: false
+
+ # To enable logging to files, to_files option has to be set to true
+ files:
+ # The directory where the log files will written to.
+ #path: /var/log/mybeat
+
+ # The name of the files where the logs are written to.
+ #name: mybeat
+
+ # Configure log file size limit. If limit is reached, log file will be
+ # automatically rotated
+ rotateeverybytes: 10485760 # = 10MB
+
+ # Number of rotated log files to keep. Oldest files will be deleted first.
+ #keepfiles: 7
+
+ # Enable debug output for selected components. To enable all selectors use ["*"]
+ # Other available selectors are beat, publish, service
+ # Multiple selectors can be chained.
+ #selectors: [ ]
+
+ # Sets log level. The default log level is error.
+ # Available log levels are: critical, error, warning, info, debug
+ #level: error
+
+