Index: pom.xml =================================================================== diff -u -rec0dc317deb82dd5c06f549ebf4fe0dc4408495b -r3e2c8b7634389a7bc0ddee58812b44d1e711aecc --- pom.xml (.../pom.xml) (revision ec0dc317deb82dd5c06f549ebf4fe0dc4408495b) +++ pom.xml (.../pom.xml) (revision 3e2c8b7634389a7bc0ddee58812b44d1e711aecc) @@ -10,7 +10,7 @@ 4.0.0 313devgrp standard-project - 20.08.30 + 20.09.12 pom standard-project Index: web-module/pom.xml =================================================================== diff -u -rec0dc317deb82dd5c06f549ebf4fe0dc4408495b -r3e2c8b7634389a7bc0ddee58812b44d1e711aecc --- web-module/pom.xml (.../pom.xml) (revision ec0dc317deb82dd5c06f549ebf4fe0dc4408495b) +++ web-module/pom.xml (.../pom.xml) (revision 3e2c8b7634389a7bc0ddee58812b44d1e711aecc) @@ -7,7 +7,7 @@ 313devgrp standard-project - 20.08.30 + 20.09.12 ../pom.xml @@ -135,7 +135,7 @@ 313devgrp scouter - 2.8.3 + 2.8.2 conf false ${project.build.directory} Index: web-module/script/yml/monitoring/allinone.yml =================================================================== diff -u --- web-module/script/yml/monitoring/allinone.yml (revision 0) +++ web-module/script/yml/monitoring/allinone.yml (revision 3e2c8b7634389a7bc0ddee58812b44d1e711aecc) @@ -0,0 +1,529 @@ +version: '3.6' + +services: + + #https://github.com/jakubhajek/elasticsearch-docker-swarm/blob/master/stack-elastic.yml + # elasticsearch coordinating node + # /etc/security/limits.conf + # * hard memlock unlimited + # * soft memlock unlimited + # * hard nofile 65536 + # * soft nofile 65536 + # * hard nproc 65536 + # * soft nproc 65536 + + # sysctl -w vm.max_map_count=262144 + # /etc/sysctl.conf + # vm.max_map_count=262144 + + # /lib/systemd/system/docker.service + # LimitMEMLOCK=infinity + + # elasticsearch master node + es-master-01: + image: docker.elastic.co/elasticsearch/elasticsearch:7.4.0 + hostname: es-master-01 + volumes: + - es-master-01-data:/usr/share/elasticsearch/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + environment: &es-master-env + cluster.name: es-swarm-cluster + node.name: "es-master-01" + discovery.seed_hosts: "es-master-01,es-master-02,es-master-03" + cluster.initial_master_nodes: "es-master-01,es-master-02,es-master-03" + node.master: "true" + node.voting_only: "false" + node.data: "true" + node.ingest: "false" + node.ml: "false" + xpack.ml.enabled: "true" + cluster.remote.connect: "false" + MAX_LOCKED_MEMORY: unlimited + bootstrap.memory_lock: "true" + ES_JAVA_OPTS: "-Xms512m -Xmx512m" + configs: &es-limits-conf + - source: es-limits.conf + target: /etc/security/limits.conf + networks: &efk-network + - efk + deploy: + endpoint_mode: dnsrr + mode: replicated + replicas: 1 + + # elasticsearch master node + es-master-02: + image: docker.elastic.co/elasticsearch/elasticsearch:7.4.0 + hostname: es-master-02 + volumes: + - es-master-02-data:/usr/share/elasticsearch/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + environment: + cluster.name: es-swarm-cluster + node.name: "es-master-02" + discovery.seed_hosts: "es-master-01,es-master-02,es-master-03" + cluster.initial_master_nodes: "es-master-01,es-master-02,es-master-03" + node.master: "true" + node.voting_only: "false" + node.data: "true" + node.ingest: "false" + node.ml: "false" + xpack.ml.enabled: "true" + cluster.remote.connect: "false" + MAX_LOCKED_MEMORY: unlimited + bootstrap.memory_lock: "true" + ES_JAVA_OPTS: "-Xms512m -Xmx512m" + configs: *es-limits-conf + networks: *efk-network + deploy: + endpoint_mode: dnsrr + mode: replicated + replicas: 1 + + es-master-03: + image: docker.elastic.co/elasticsearch/elasticsearch:7.4.0 + hostname: es-master-03 + volumes: + - es-master-03-data:/usr/share/elasticsearch/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + environment: &es-master-env + cluster.name: es-swarm-cluster + node.name: "es-master-03" + discovery.seed_hosts: "es-master-01,es-master-02,es-master-03" + cluster.initial_master_nodes: "es-master-01,es-master-02,es-master-03" + node.master: "true" + node.voting_only: "false" + node.data: "true" + node.ingest: "false" + node.ml: "false" + xpack.ml.enabled: "true" + cluster.remote.connect: "false" + MAX_LOCKED_MEMORY: unlimited + bootstrap.memory_lock: "true" + ES_JAVA_OPTS: "-Xms512m -Xmx512m" + configs: *es-limits-conf + networks: *efk-network + deploy: + endpoint_mode: dnsrr + mode: replicated + replicas: 1 + + es-coordinating: + image: docker.elastic.co/elasticsearch/elasticsearch:7.4.0 + hostname: es-coordinating + ports: + - 9200:9200 + volumes: + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + environment: + ES_JAVA_OPTS: "-Xms1g -Xmx1g" + cluster.name: es-swarm-cluster + discovery.seed_hosts: "es-master-01,es-master-02,es-master-03" + node.name: "es-coordinating" + node.master: "false" + node.voting_only: "false" + node.data: "false" + node.ingest: "false" + node.ml: "false" + cluster.remote.connect: "false" + MAX_LOCKED_MEMORY: unlimited + bootstrap.memory_lock: "true" + configs: *es-limits-conf + networks: *efk-network + depends_on: + - es-master-01 + - es-master-02 + - es-master-03 + deploy: + mode: replicated + replicas: 1 + + kibana: + image: kibana:7.4.0 + hostname: kibana + ports: + - 5601:5601 + volumes: + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + environment: + ELASTICSEARCH_HOSTS: "http://es-coordinating:9200" + networks: *efk-network + depends_on: + - es-coordinating + deploy: + mode: replicated + replicas: 1 + + logstash: + image: docker.elastic.co/logstash/logstash:7.4.0 + ports: + - "5000:5000" + - "9600:9600" + configs: + - source: logstash_config + target: /usr/share/logstash/config/logstash.yml + - source: logstash_pipeline + target: /usr/share/logstash/pipeline/logstash.conf + environment: + LS_JAVA_OPTS: "-Xmx256m -Xms256m" + networks: *efk-network + depends_on: + - kibana + deploy: + mode: replicated + replicas: 1 + + #https://gist.github.com/dkurzaj/2a899de8cb5ae698919f0a9bbf7685f0 + zookeeper1: + image: wurstmeister/zookeeper:3.4.6 + volumes: + - zookeeper1-data:/data + - zookeeper1-logs:/datalog + ports: + - "2181:2181" + networks: *efk-network + environment: + - ZOO_SERVER_ID=1 + - ALLOW_ANONYMOUS_LOGIN=yes + - ZOO_SERVERS=zookeeper1:2888:3888,zookeeper2:2888:3888,zookeeper3:2888:3888 + + zookeeper2: + image: wurstmeister/zookeeper:3.4.6 + volumes: + - zookeeper2-data:/data + - zookeeper2-logs:/datalog + ports: + - "2182:2181" + networks: *efk-network + environment: + - ZOO_SERVER_ID=2 + - ALLOW_ANONYMOUS_LOGIN=yes + - ZOO_SERVERS=zookeeper1:2888:3888,zookeeper2:2888:3888,zookeeper3:2888:3888 + + zookeeper3: + image: wurstmeister/zookeeper:3.4.6 + volumes: + - zookeeper3-data:/data + - zookeeper3-logs:/datalog + ports: + - "2183:2181" + networks: *efk-network + environment: + - ZOO_SERVER_ID=3 + - ALLOW_ANONYMOUS_LOGIN=yes + - ZOO_SERVERS=zookeeper1:2888:3888,zookeeper2:2888:3888,zookeeper3:2888:3888 + + kafka1: + image: wurstmeister/kafka:2.12-2.3.0 + ports: + - "9095:9092" + volumes: + - kafka1-data:/kafka + - /var/run/docker.sock:/var/run/docker.sock + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2182,zookeeper3:2183 + JMX_PORT: 9093 + KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9093 + KAFKA_ADVERTISED_HOST_NAME: kafka1 + KAFKA_ADVERTISED_PORT: 9092 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 + networks: *efk-network + depends_on: + - zookeeper1 + - zookeeper2 + - zookeeper3 + + kafka2: + image: wurstmeister/kafka:2.12-2.3.0 + ports: + - "9096:9092" + volumes: + - kafka2-data:/kafka + - /var/run/docker.sock:/var/run/docker.sock + environment: + KAFKA_BROKER_ID: 2 + KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2182,zookeeper3:2183 + JMX_PORT: 9093 + KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka2 -Dcom.sun.management.jmxremote.rmi.port=9093 + KAFKA_ADVERTISED_HOST_NAME: kafka2 + KAFKA_ADVERTISED_PORT: 9092 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 + networks: *efk-network + depends_on: + - zookeeper1 + - zookeeper2 + - zookeeper3 + + kafka3: + image: wurstmeister/kafka:2.12-2.3.0 + ports: + - "9097:9092" + volumes: + - kafka3-data:/kafka + - /var/run/docker.sock:/var/run/docker.sock + environment: + KAFKA_BROKER_ID: 3 + KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2182,zookeeper3:2183 + JMX_PORT: 9093 + KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka3 -Dcom.sun.management.jmxremote.rmi.port=9093 + KAFKA_ADVERTISED_HOST_NAME: kafka3 + KAFKA_ADVERTISED_PORT: 9092 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 + networks: *efk-network + depends_on: + - zookeeper1 + - zookeeper2 + - zookeeper3 + + # https://github.com/hleb-albau/kafka-manager-docker + kafka-manager: + image: hlebalbau/kafka-manager:2.0.0.2 + depends_on: + - kafka1 + - kafka2 + - kafka3 + environment: + ZK_HOSTS: zookeeper1:2181,zookeeper2:2182,zookeeper3:2183 + APPLICATION_SECRET: "random-secret" + KM_ARGS: -Djava.net.preferIPv4Stack=true + networks: *efk-network + ports: + - "9000:9000" + + apm-server: + image: docker.elastic.co/apm/apm-server:7.4.0 + ports: + - "8200:8200" + networks: *efk-network + depends_on: + - kafka-manager + configs: + - source: apmserver.conf + target: /usr/share/apm-server/apm-server.yml + + influxdb: + image: influxdb:1.7.10 + ports: + - 8086:8086 + - 8083:8083 + - 8089:8089/udp + networks: *efk-network + configs: + - source: influxdb.conf + target: /etc/influxdb/influxdb.conf + volumes: + - influxdb-data:/var/lib/influxdb + + chronograf: + image: chronograf:1.7.17 + ports: + - 8888:8888 + volumes: + - chronograf:/var/lib/chronograf + depends_on: + - influxdb + environment: + INFLUXDB_URL: http://influxdb:8086 + networks: *efk-network + + grafana: + image: grafana/grafana:6.7.4 + networks: *efk-network + user: "$UID:$GID" + ports: + - 3000:3000 + depends_on: + - chronograf + volumes: + - grafana:/var/lib/grafana + + scouter: + image : scouterapm/scouter-server:2.7.0 + environment: + - SC_SERVER_ID=SCCOUTER-COLLECTOR + - NET_HTTP_SERVER_ENABLED=true + - NET_HTTP_API_SWAGGER_ENABLED=true + - NET_HTTP_API_ENABLED=true + - MGR_PURGE_PROFILE_KEEP_DAYS=2 + - MGR_PURGE_XLOG_KEEP_DAYS=5 + - MGR_PURGE_COUNTER_KEEP_DAYS=15 + - JAVA_OPT=-Xms1024m -Xmx1024m + volumes: + - sc-logs:/home/scouter-server/logs + - sc-data:/home/scouter-server/database + - sc-libs:/home/scouter-server/lib + - sc-conf:/home/scouter-server/conf + - sc-extweb:/home/scouter-server/extweb + networks: *efk-network + depends_on: + - influxdb + ports: + - 6180:6180 + - 6100:6100 + - 6100:6100/udp + +configs: + apmserver.conf: + file: ./allinone/apm-server.yml + influxdb.conf: + file: ./allinone/influxdb.conf + es-limits.conf: + file: ./allinone/es-limits.conf + logstash_config: + file: ./allinone/logstash.yml + logstash_pipeline: + file: ./allinone/logstash.conf + +volumes: + kafka3-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/kafka3" + + kafka2-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/kafka2" + + kafka1-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/kafka1" + + chronograf: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/chronograf" + + grafana: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/grafana" + + sc-extweb: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/scouter/sc-extweb" + + sc-conf: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/scouter/sc-conf" + + sc-libs: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/scouter/sc-libs" + + sc-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/scouter/sc-data" + + sc-logs: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/scouter/sc-logs" + + zookeeper3-logs: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/zookeeper3/logs" + + zookeeper3-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/zookeeper3/data" + + zookeeper2-logs: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/zookeeper2/logs" + + zookeeper2-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/zookeeper2/data" + + zookeeper1-logs: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/zookeeper1/logs" + + zookeeper1-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/zookeeper1/data" + + influxdb-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/influxdb/data" + + es-master-01-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/es-master-01/data" + + es-master-02-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/es-master-02/data" + + es-master-03-data: + driver: local + driver_opts: + type: "nfs" + o: "addr=192.168.25.42,nolock,soft,rw,sync" + device: ":/volume1/design/allinone/es-master-03/data" + +networks: + efk: + attachable: true Index: web-module/script/yml/monitoring/allinone/apm-server.yml =================================================================== diff -u --- web-module/script/yml/monitoring/allinone/apm-server.yml (revision 0) +++ web-module/script/yml/monitoring/allinone/apm-server.yml (revision 3e2c8b7634389a7bc0ddee58812b44d1e711aecc) @@ -0,0 +1,896 @@ +################### APM Server Configuration ######################### + +############################# APM Server ###################################### + +apm-server: + # Defines the host and port the server is listening on. use "unix:/path/to.sock" to listen on a unix domain socket. + host: "0.0.0.0:8200" + + # Maximum permitted size in bytes of a request's header accepted by the server to be processed. + #max_header_size: 1048576 + + # Maximum permitted duration for reading an entire request. + #read_timeout: 30s + + # Maximum permitted duration for writing a response. + #write_timeout: 30s + + # Maximum duration in seconds before releasing resources when shutting down the server. + #shutdown_timeout: 5s + + #-- v1 Intake API (deprecated) + + # Maximum permitted size in bytes of an unzipped request accepted by the server to be processed. + #max_unzipped_size: 31457280 + + # Maximum duration request will be queued before being read. + #max_request_queue_time: 2s + + # Maximum number of requests permitted to be sent to the server concurrently. + #concurrent_requests: 5 + + #-- v2 Intake API + + # Maximum allowed size in bytes of a single event + #max_event_size: 307200 + + #-- + + # Maximum number of new connections to accept simultaneously (0 means unlimited) + # max_connections: 0 + + # Authorization token to be checked. If a token is set here the agents must + # send their token in the following format: Authorization: Bearer . + # It is recommended to use an authorization token in combination with SSL enabled, + # and save the token in the beats keystore. + #secret_token: + + # Enable secure communication between APM agents and the server. By default ssl is disabled. + #ssl: + #enabled: false + + # Configure a list of root certificate authorities for verifying client certificates + #certificate_authorities: [] + + # Path to file containing the certificate for server authentication + # Needs to be configured when ssl is enabled + #certificate: '' + + # Path to file containing server certificate key + # Needs to be configured when ssl is enabled + #key: '' + + # Optional configuration options for ssl communication + + # Passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #key_passphrase: '' + + # List of supported/valid protocol versions. By default TLS versions 1.0 up to 1.2 are enabled. + #supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Configure cipher suites to be used for SSL connections + #cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #curve_types: [] + + # Configure which type of client authentication is supported. + # Options are `none`, `optional`, and `required`. Default is `optional`. + #client_authentication: "optional" + + + #rum: + # To enable real user monitoring (RUM) support set this to true. + #enabled: false + + #-- v1 RUM endpoint (deprecated) + + # Rate limit per second and IP address for requests sent to the RUM endpoint. + #rate_limit: 10 + + #-- v2 RUM endpoint + + #event_rate: + + # Defines the maximum amount of events allowed to be sent to the APM Server v2 RUM + # endpoint per ip per second. Defaults to 300. + #limit: 300 + + # An LRU cache is used to keep a rate limit per IP for the most recently seen IPs. + # This setting defines the number of unique IPs that can be tracked in the cache. + # Sites with many concurrent clients should consider increasing this limit. Defaults to 1000. + #lru_size: 1000 + + #-- General RUM settings + + # Comma separated list of permitted origins for real user monitoring. + # User-agents will send an origin header that will be validated against this list. + # An origin is made of a protocol scheme, host and port, without the url path. + # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com) + # If an item in the list is a single '*', everything will be allowed + #allow_origins : ['*'] + + # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes. + # If the regexp matches, the stacktrace frame is considered to be a library frame. + #library_pattern: "node_modules|bower_components|~" + + # Regexp to be matched against a stacktrace frame's `file_name`. + # If the regexp matches, the stacktrace frame is not used for calculating error groups. + # The default pattern excludes stacktrace frames that have a filename starting with '/webpack' + #exclude_from_grouping: "^/webpack" + + # If a source map has previously been uploaded, source mapping is automatically applied + # to all error and transaction documents sent to the RUM endpoint. + #source_mapping: + + # Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration. + # A different instance must be configured when using any other output. + # This setting only affects sourcemap reads - the output determines where sourcemaps are written. + #elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + # hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch. + # Note that values configured without a time unit will be interpreted as seconds. + #cache: + #expiration: 5m + + # Source maps are stored in a seperate index. + # If the default index pattern for source maps at 'outputs.elasticsearch.indices' + # is changed, a matching index pattern needs to be specified here. + #index_pattern: "apm-*-sourcemap*" + + + # If set to true, APM Server augments data received by the agent with the original IP of the backend server, + # or the IP and User Agent of the real user (RUM requests). It defaults to true. + #capture_personal_data: true + + # golang expvar support - https://golang.org/pkg/expvar/ + #expvar: + # Set to true to Expose expvar + #enabled: false + + # Url to expose expvar + #url: "/debug/vars" + + # Instrumentation support for the server's HTTP endpoints and event publisher. + #instrumentation: + # Set to true to enable instrumentation of the APM server itself. + #enabled: false + # Environment in which the APM Server is running on (eg: staging, production, etc.) + #environment: "" + # Remote host to report instrumentation results to. Single entry permitted until + # https://github.com/elastic/apm-agent-go/issues/200 is resolved. + #hosts: + # - http://remote-apm-server:8200 + # Remote apm-servers' secret_token + #secret_token: + + # Metrics endpoint + #metrics: + # Set to false to disable the metrics endpoint + #enabled: true + + # A pipeline is a definition of processors applied to documents when writing them to Elasticsearch. + # Using pipelines involves two steps: + # (1) registering a pipeline + # (2) applying a pipeline during data ingestion (see `output.elasticsearch.pipelines`) + # + # You can manually register pipelines, or use this configuration option to ensure + # pipelines are loaded and registered at the configured Elasticsearch instances. + # Automatic pipeline registration requires + # * `output.elasticsearch` to be enabled and configured. + # * having the required Elasticsearch Processor Plugins installed. + # APM Server default pipelines require you to have the `Ingest User Agent Plugin` installed. + # Find the default pipeline configuration at `ingest/pipeline/definition.json`. + # + #register.ingest.pipeline: + # Registers pipeline definitions in Elasticsearch on APM Server startup. Defaults to false. + #enabled: false + + # Overwrites existing pipeline definitions in Elasticsearch. Defaults to true. + #overwrite: true + +#================================ General ====================================== + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # A value of 0 (the default) ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +#setup.template.enabled: true + +# Template name. By default the template name is "apm-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.name: "apm-%{[beat.version]}" + +# Template pattern. By default the template pattern is "apm-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.pattern: "apm-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# Overwrite existing template +#setup.template.overwrite: false + +# Elasticsearch template settings +#setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + #index: + #number_of_shards: 1 + #codec: best_compression + #number_of_routing_shards: 30 + #mapping.total_fields.limit: 2000 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + + +#============================== Deprecated: Dashboards ===================================== +# +# Deprecated: Loading dashboards from the APM Server into Kibana is deprecated from 6.4 on. +# We suggest to use the Kibana UI to load APM Server dashboards and index pattern instead. +# +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +# The dashboards.index needs to be changed in case the elasticsearch index pattern is modified. +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + +#============================== Deprecated: Kibana ===================================== + +# Deprecated: Starting with APM Server version 6.4, loading dashboards and index pattern +# from the APM Server into Kibana is deprecated. +# We suggest to use the Kibana UI to load APM Server dashboards and index pattern instead. +# +# Setting up a Kibana endpoint is not necessary when loading the index pattern and dashboards via the UI. + +#setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is false. + #ssl.enabled: false + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + +#============================= Elastic Cloud ================================== + +# These settings simplify using APM Server with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# [deprecated] `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ===================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------ +#output.elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Boolean flag to enable or disable the output module. + #enabled: true + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # By using the configuration below, apm documents are stored to separate indices, + # depending on their `processor.event`: + # - error + # - transaction + # - span + # - sourcemap + # + # The indices are all prefixed with `apm-%{[beat.version]}`. + # To allow managing indices based on their age, all indices (except for sourcemaps) + # end with the information of the day they got indexed. + # e.g. "apm-6.3.0-transaction-2018.03.20" + # + # Be aware that you can only specify one Elasticsearch template and one Kibana Index Pattern, + # In case you modify the index patterns you must also update those configurations accordingly, + # as they need to be aligned: + # * `setup.template.name` + # * `setup.template.pattern` + # * `setup.dashboards.index` + #index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}" + indices: + - index: "apm-%{[beat.version]}-sourcemap" + when.contains: + processor.event: "sourcemap" + + - index: "apm-%{[beat.version]}-error-%{+yyyy.MM.dd}" + when.contains: + processor.event: "error" + + - index: "apm-%{[beat.version]}-transaction-%{+yyyy.MM.dd}" + when.contains: + processor.event: "transaction" + + - index: "apm-%{[beat.version]}-span-%{+yyyy.MM.dd}" + when.contains: + processor.event: "span" + + - index: "apm-%{[beat.version]}-metric-%{+yyyy.MM.dd}" + when.contains: + processor.event: "metric" + + - index: "apm-%{[beat.version]}-onboarding-%{+yyyy.MM.dd}" + when.contains: + processor.event: "onboarding" + + # A pipeline is a definition of processors applied to documents when writing them to Elasticsearch. + # APM Server comes with a default pipeline definition, located at `ingets/pipeline/definition.json`. + # Pipelines are disabled by default. To make use of them you have to: + # (1) ensure pipelines are registered in Elasticsearch, see `apm-server.register.ingest.pipeline` + # (2) enable the following: + #pipelines: + #- pipeline: "apm_user_agent" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, apm-server + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. Default is false. + #ssl.enabled: false + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + # It is recommended to use the provided keystore instead of entering the passphrase in plain text. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: false + + # Pretty print json event + #pretty: false + +#----------------------------- Logstash output --------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: false + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # group. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, apm-server + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to apm + # in all lowercase. + #index: 'apm' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Kafka output ---------------------------------- +output.kafka: + # Boolean flag to enable or disable the output module. + enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + hosts: ["kafka1:9092","kafka2:9092","kafka3:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + topic: msgTopic + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version libbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + ssl.enabled: false + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#================================= Paths ====================================== + +# The home path for the apm-server installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the apm-server installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the apm-server installation. This is the default base path +# for all the files in which apm-server needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a apm-server installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + + +#================================ Logging ====================================== +# +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# If enabled, apm-server periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: false + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.to_files: true +#logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/apm-server + + # The name of the files where the logs are written to. + #name: apm-server + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + +# Set to true to log messages in json format. +#logging.json: false + + +#================================ HTTP Endpoint ====================================== +# +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================== Xpack Monitoring =============================== +# APM server can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line. +#xpack.monitoring.elasticsearch: +# username: "apm_system" +# password: "" Index: web-module/script/yml/monitoring/allinone/es-limits.conf =================================================================== diff -u --- web-module/script/yml/monitoring/allinone/es-limits.conf (revision 0) +++ web-module/script/yml/monitoring/allinone/es-limits.conf (revision 3e2c8b7634389a7bc0ddee58812b44d1e711aecc) @@ -0,0 +1,4 @@ +elasticsearch hard nofile 65536 +elasticsearch soft nofile 65536 +elasticsearch hard memlock unlimitd +elasticsearch soft memlock unlimitd Index: web-module/script/yml/monitoring/allinone/influxdb.conf =================================================================== diff -u --- web-module/script/yml/monitoring/allinone/influxdb.conf (revision 0) +++ web-module/script/yml/monitoring/allinone/influxdb.conf (revision 3e2c8b7634389a7bc0ddee58812b44d1e711aecc) @@ -0,0 +1,586 @@ +### Welcome to the InfluxDB configuration file. + +# The values in this file override the default values used by the system if +# a config option is not specified. The commented out lines are the configuration +# field and the default value used. Uncommenting a line and changing the value +# will change the value used at runtime when the process is restarted. + +# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com +# The data includes a random ID, os, arch, version, the number of series and other +# usage data. No data from user databases is ever transmitted. +# Change this option to true to disable reporting. +# reporting-disabled = false + +# Bind address to use for the RPC service for backup and restore. +# bind-address = "127.0.0.1:8088" + +### +### [meta] +### +### Controls the parameters for the Raft consensus group that stores metadata +### about the InfluxDB cluster. +### + +[meta] + # Where the metadata/raft database is stored + dir = "/var/lib/influxdb/meta" + + # Automatically create a default retention policy when creating a database. + # retention-autocreate = true + + # If log messages are printed for the meta service + # logging-enabled = true + +### +### [data] +### +### Controls where the actual shard data for InfluxDB lives and how it is +### flushed from the WAL. "dir" may need to be changed to a suitable place +### for your system, but the WAL settings are an advanced configuration. The +### defaults should work for most systems. +### + +[data] + # The directory where the TSM storage engine stores TSM files. + dir = "/var/lib/influxdb/data" + + # The directory where the TSM storage engine stores WAL files. + wal-dir = "/var/lib/influxdb/wal" + + # The amount of time that a write will wait before fsyncing. A duration + # greater than 0 can be used to batch up multiple fsync calls. This is useful for slower + # disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL. + # Values in the range of 0-100ms are recommended for non-SSD disks. + # wal-fsync-delay = "0s" + + + # The type of shard index to use for new shards. The default is an in-memory index that is + # recreated at startup. A value of "tsi1" will use a disk based index that supports higher + # cardinality datasets. + # index-version = "inmem" + + # Trace logging provides more verbose output around the tsm engine. Turning + # this on can provide more useful output for debugging tsm engine issues. + # trace-logging-enabled = false + + # Whether queries should be logged before execution. Very useful for troubleshooting, but will + # log any sensitive data contained within a query. + # query-log-enabled = true + + # Validates incoming writes to ensure keys only have valid unicode characters. + # This setting will incur a small overhead because every key must be checked. + # validate-keys = false + + # Settings for the TSM engine + + # CacheMaxMemorySize is the maximum size a shard's cache can + # reach before it starts rejecting writes. + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Values without a size suffix are in bytes. + # cache-max-memory-size = "1g" + + # CacheSnapshotMemorySize is the size at which the engine will + # snapshot the cache and write it to a TSM file, freeing up memory + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Values without a size suffix are in bytes. + # cache-snapshot-memory-size = "25m" + + # CacheSnapshotWriteColdDuration is the length of time at + # which the engine will snapshot the cache and write it to + # a new TSM file if the shard hasn't received writes or deletes + # cache-snapshot-write-cold-duration = "10m" + + # CompactFullWriteColdDuration is the duration at which the engine + # will compact all TSM files in a shard if it hasn't received a + # write or delete + # compact-full-write-cold-duration = "4h" + + # The maximum number of concurrent full and level compactions that can run at one time. A + # value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater + # than 0 limits compactions to that value. This setting does not apply + # to cache snapshotting. + # max-concurrent-compactions = 0 + + # CompactThroughput is the rate limit in bytes per second that we + # will allow TSM compactions to write to disk. Note that short bursts are allowed + # to happen at a possibly larger value, set by CompactThroughputBurst + # compact-throughput = "48m" + + # CompactThroughputBurst is the rate limit in bytes per second that we + # will allow TSM compactions to write to disk. + # compact-throughput-burst = "48m" + + # If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to + # TSM files. This setting has been found to be problematic on some kernels, and defaults to off. + # It might help users who have slow disks in some cases. + # tsm-use-madv-willneed = false + + # Settings for the inmem index + + # The maximum series allowed per database before writes are dropped. This limit can prevent + # high cardinality issues at the database level. This limit can be disabled by setting it to + # 0. + # max-series-per-database = 1000000 + + # The maximum number of tag values per tag that are allowed before writes are dropped. This limit + # can prevent high cardinality tag values from being written to a measurement. This limit can be + # disabled by setting it to 0. + # max-values-per-tag = 100000 + + # Settings for the tsi1 index + + # The threshold, in bytes, when an index write-ahead log file will compact + # into an index file. Lower sizes will cause log files to be compacted more + # quickly and result in lower heap usage at the expense of write throughput. + # Higher sizes will be compacted less frequently, store more series in-memory, + # and provide higher write throughput. + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Values without a size suffix are in bytes. + # max-index-log-file-size = "1m" + + # The size of the internal cache used in the TSI index to store previously + # calculated series results. Cached results will be returned quickly from the cache rather + # than needing to be recalculated when a subsequent query with a matching tag key/value + # predicate is executed. Setting this value to 0 will disable the cache, which may + # lead to query performance issues. + # This value should only be increased if it is known that the set of regularly used + # tag key/value predicates across all measurements for a database is larger than 100. An + # increase in cache size may lead to an increase in heap usage. + series-id-set-cache-size = 100 + +### +### [coordinator] +### +### Controls the clustering service configuration. +### + +[coordinator] + # The default time a write request will wait until a "timeout" error is returned to the caller. + # write-timeout = "10s" + + # The maximum number of concurrent queries allowed to be executing at one time. If a query is + # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled + # by setting it to 0. + # max-concurrent-queries = 0 + + # The maximum time a query will is allowed to execute before being killed by the system. This limit + # can help prevent run away queries. Setting the value to 0 disables the limit. + # query-timeout = "0s" + + # The time threshold when a query will be logged as a slow query. This limit can be set to help + # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. + # log-queries-after = "0s" + + # The maximum number of points a SELECT can process. A value of 0 will make + # the maximum point count unlimited. This will only be checked every second so queries will not + # be aborted immediately when hitting the limit. + # max-select-point = 0 + + # The maximum number of series a SELECT can run. A value of 0 will make the maximum series + # count unlimited. + # max-select-series = 0 + + # The maximum number of group by time bucket a SELECT can create. A value of zero will max the maximum + # number of buckets unlimited. + # max-select-buckets = 0 + +### +### [retention] +### +### Controls the enforcement of retention policies for evicting old data. +### + +[retention] + # Determines whether retention policy enforcement enabled. + enabled = true + + # The interval of time when retention policy enforcement checks run. + check-interval = "30m" + +### +### [shard-precreation] +### +### Controls the precreation of shards, so they are available before data arrives. +### Only shards that, after creation, will have both a start- and end-time in the +### future, will ever be created. Shards are never precreated that would be wholly +### or partially in the past. + +[shard-precreation] + # Determines whether shard pre-creation service is enabled. + # enabled = true + + # The interval of time when the check to pre-create new shards runs. + # check-interval = "10m" + + # The default period ahead of the endtime of a shard group that its successor + # group is created. + # advance-period = "30m" + +### +### Controls the system self-monitoring, statistics and diagnostics. +### +### The internal database for monitoring data is created automatically if +### if it does not already exist. The target retention within this database +### is called 'monitor' and is also created with a retention period of 7 days +### and a replication factor of 1, if it does not exist. In all cases the +### this retention policy is configured as the default for the database. + +[monitor] + # Whether to record statistics internally. + store-enabled = true + + # The destination database for recorded statistics + store-database = "_internal" + + # The interval at which to record statistics + store-interval = "10s" + +### +### [http] +### +### Controls how the HTTP endpoints are configured. These are the primary +### mechanism for getting data into and out of InfluxDB. +### + +[http] + # Determines whether HTTP endpoint is enabled. + enabled = true + + # Determines whether the Flux query endpoint is enabled. + # flux-enabled = false + + # Determines whether the Flux query logging is enabled. + # flux-log-enabled = false + + # The bind address used by the HTTP service. + bind-address = ":8086" + + # Determines whether user authentication is enabled over HTTP/HTTPS. + auth-enabled = false + + # The default realm sent back when issuing a basic auth challenge. + realm = "InfluxDB" + + # Determines whether HTTP request logging is enabled. + log-enabled = true + + # Determines whether the HTTP write request logs should be suppressed when the log is enabled. + # suppress-write-log = false + + # When HTTP request logging is enabled, this option specifies the path where + # log entries should be written. If unspecified, the default is to write to stderr, which + # intermingles HTTP logs with internal InfluxDB logging. + # + # If influxd is unable to access the specified path, it will log an error and fall back to writing + # the request log to stderr. + # access-log-path = "" + + # Filters which requests should be logged. Each filter is of the pattern NNN, NNX, or NXX where N is + # a number and X is a wildcard for any number. To filter all 5xx responses, use the string 5xx. + # If multiple filters are used, then only one has to match. The default is to have no filters which + # will cause every request to be printed. + # access-log-status-filters = [] + + # Determines whether detailed write logging is enabled. + write-tracing = false + + # Determines whether the pprof endpoint is enabled. This endpoint is used for + # troubleshooting and monitoring. + pprof-enabled = true + + # Enables authentication on pprof endpoints. Users will need admin permissions + # to access the pprof endpoints when this setting is enabled. This setting has + # no effect if either auth-enabled or pprof-enabled are set to false. + # pprof-auth-enabled = false + + # Enables a pprof endpoint that binds to localhost:6060 immediately on startup. + # This is only needed to debug startup issues. + # debug-pprof-enabled = false + + # Enables authentication on the /ping, /metrics, and deprecated /status + # endpoints. This setting has no effect if auth-enabled is set to false. + # ping-auth-enabled = false + + # Determines whether HTTPS is enabled. + https-enabled = false + + # The SSL certificate to use when HTTPS is enabled. + https-certificate = "/etc/ssl/influxdb.pem" + + # Use a separate private key location. + # https-private-key = "" + + # The JWT auth shared secret to validate requests using JSON web tokens. + # shared-secret = "" + + # The default chunk size for result sets that should be chunked. + # max-row-limit = 0 + + # The maximum number of HTTP connections that may be open at once. New connections that + # would exceed this limit are dropped. Setting this value to 0 disables the limit. + # max-connection-limit = 0 + + # Enable http service over unix domain socket + # unix-socket-enabled = false + + # The path of the unix domain socket. + # bind-socket = "/var/run/influxdb.sock" + + # The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit. + # max-body-size = 25000000 + + # The maximum number of writes processed concurrently. + # Setting this to 0 disables the limit. + # max-concurrent-write-limit = 0 + + # The maximum number of writes queued for processing. + # Setting this to 0 disables the limit. + # max-enqueued-write-limit = 0 + + # The maximum duration for a write to wait in the queue to be processed. + # Setting this to 0 or setting max-concurrent-write-limit to 0 disables the limit. + # enqueued-write-timeout = 0 + +### +### [logging] +### +### Controls how the logger emits logs to the output. +### + +[logging] + # Determines which log encoder to use for logs. Available options + # are auto, logfmt, and json. auto will use a more a more user-friendly + # output format if the output terminal is a TTY, but the format is not as + # easily machine-readable. When the output is a non-TTY, auto will use + # logfmt. + # format = "auto" + + # Determines which level of logs will be emitted. The available levels + # are error, warn, info, and debug. Logs that are equal to or above the + # specified level will be emitted. + # level = "info" + + # Suppresses the logo output that is printed when the program is started. + # The logo is always suppressed if STDOUT is not a TTY. + # suppress-logo = false + +### +### [subscriber] +### +### Controls the subscriptions, which can be used to fork a copy of all data +### received by the InfluxDB host. +### + +[subscriber] + # Determines whether the subscriber service is enabled. + # enabled = true + + # The default timeout for HTTP writes to subscribers. + # http-timeout = "30s" + + # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- + # signed certificates. + # insecure-skip-verify = false + + # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used + # ca-certs = "" + + # The number of writer goroutines processing the write channel. + # write-concurrency = 40 + + # The number of in-flight writes buffered in the write channel. + # write-buffer-size = 1000 + + +### +### [[graphite]] +### +### Controls one or many listeners for Graphite data. +### + +[[graphite]] + # Determines whether the graphite endpoint is enabled. + enabled = false + # database = "graphite" + # retention-policy = "" + # bind-address = ":2003" + # protocol = "tcp" + # consistency-level = "one" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # udp-read-buffer = 0 + + ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. + # separator = "." + + ### Default tags that will be added to all metrics. These can be overridden at the template level + ### or by tags extracted from metric + # tags = ["region=us-east", "zone=1c"] + + ### Each template line requires a template pattern. It can have an optional + ### filter before the template and separated by spaces. It can also have optional extra + ### tags following the template. Multiple tags should be separated by commas and no spaces + ### similar to the line protocol format. There can be only one default template. + # templates = [ + # "*.app env.service.resource.measurement", + # # Default template + # "server.*", + # ] + +### +### [collectd] +### +### Controls one or many listeners for collectd data. +### + +[[collectd]] + enabled = false + # bind-address = ":25826" + # database = "collectd" + # retention-policy = "" + # + # The collectd service supports either scanning a directory for multiple types + # db files, or specifying a single db file. + # typesdb = "/usr/local/share/collectd" + # + # security-level = "none" + # auth-file = "/etc/collectd/auth_file" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "10s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + + # Multi-value plugins can be handled two ways. + # "split" will parse and store the multi-value plugin data into separate measurements + # "join" will parse and store the multi-value plugin as a single multi-value measurement. + # "split" is the default behavior for backward compatibility with previous versions of influxdb. + # parse-multivalue-plugin = "split" +### +### [opentsdb] +### +### Controls one or many listeners for OpenTSDB data. +### + +[[opentsdb]] + enabled = false + # bind-address = ":4242" + # database = "opentsdb" + # retention-policy = "" + # consistency-level = "one" + # tls-enabled = false + # certificate= "/etc/ssl/influxdb.pem" + + # Log an error for every malformed point. + # log-point-errors = true + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Only points + # metrics received over the telnet protocol undergo batching. + + # Flush if this many points get buffered + # batch-size = 1000 + + # Number of batches that may be pending in memory + # batch-pending = 5 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + +### +### [[udp]] +### +### Controls the listeners for InfluxDB line protocol data via UDP. +### + +[[udp]] + enabled = true + bind-address = ":8089" + database = "scouterCounter" + retention-policy = "default" + + # InfluxDB precision for timestamps on received points ("" or "n", "u", "ms", "s", "m", "h") + # precision = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + batch-size = 5000 + + # Number of batches that may be pending in memory + batch-pending = 10 + + # Will flush at least this often even if we haven't hit buffer limit + batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + read-buffer = 0 + +### +### [continuous_queries] +### +### Controls how continuous queries are run within InfluxDB. +### + +[continuous_queries] + # Determines whether the continuous query service is enabled. + enabled = true + + # Controls whether queries are logged when executed by the CQ service. + log-enabled = true + + # Controls whether queries are logged to the self-monitoring data store. + # query-stats-enabled = false + + # interval for how often continuous queries will be checked if they need to run + run-interval = "5s" + +### +### [tls] +### +### Global configuration settings for TLS in InfluxDB. +### + +[tls] + # Determines the available set of cipher suites. See https://golang.org/pkg/crypto/tls/#pkg-constants + # for a list of available ciphers, which depends on the version of Go (use the query + # SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not specified, uses + # the default settings from Go's crypto/tls package. + # ciphers = [ + # "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + # "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + # ] + + # Minimum version of the tls protocol that will be negotiated. If not specified, uses the + # default settings from Go's crypto/tls package. + # min-version = "tls1.2" + + # Maximum version of the tls protocol that will be negotiated. If not specified, uses the + # default settings from Go's crypto/tls package. + # max-version = "tls1.2" Index: web-module/script/yml/monitoring/allinone/logstash.conf =================================================================== diff -u --- web-module/script/yml/monitoring/allinone/logstash.conf (revision 0) +++ web-module/script/yml/monitoring/allinone/logstash.conf (revision 3e2c8b7634389a7bc0ddee58812b44d1e711aecc) @@ -0,0 +1,42 @@ +input { + beats { + port => 5044 + } + + kafka { + bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092" + topics => ["msgTopic"] + consumer_threads => 1 + decorate_events => true + group_id => "logstash" + add_field => { + "[@metadata][beat]" => "apm-%{+YYYY.MM.dd}" + "[@metadata][type]" => "logs" + } + } +} +filter { + if [type] == "catalinalog" { + + grok { + match => { "message" => "%{COMBINEDAPACHELOG}" } + } + date { + match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ] + } + } + +} +output { + + stdout { + codec => rubydebug + } + + elasticsearch { + hosts => ["es-coordinating:9200"] + index => "%{[@metadata][beat]}" + document_type => "%{[@metadata][type]}" + } + +} Index: web-module/script/yml/monitoring/allinone/logstash.yml =================================================================== diff -u --- web-module/script/yml/monitoring/allinone/logstash.yml (revision 0) +++ web-module/script/yml/monitoring/allinone/logstash.yml (revision 3e2c8b7634389a7bc0ddee58812b44d1e711aecc) @@ -0,0 +1,201 @@ +# Settings file in YAML +# +# Settings can be specified either in hierarchical form, e.g.: +# +# pipeline: +# batch: +# size: 125 +# delay: 5 +# +# Or as flat keys: +# +# pipeline.batch.size: 125 +# pipeline.batch.delay: 5 +# +# ------------ Node identity ------------ +# +# Use a descriptive name for the node: +# +# node.name: test +# +# If omitted the node name will default to the machine's host name +# +# ------------ Data path ------------------ +# +# Which directory should be used by logstash and its plugins +# for any persistent needs. Defaults to LOGSTASH_HOME/data +# +# path.data: +# +# ------------ Pipeline Settings -------------- +# +# Set the number of workers that will, in parallel, execute the filters+outputs +# stage of the pipeline. +# +# This defaults to the number of the host's CPU cores. +# +# pipeline.workers: 2 +# +# How many workers should be used per output plugin instance +# +# pipeline.output.workers: 1 +# +# How many events to retrieve from inputs before sending to filters+workers +# +# pipeline.batch.size: 125 +# +# How long to wait before dispatching an undersized batch to filters+workers +# Value is in milliseconds. +# +# pipeline.batch.delay: 5 +# +# Force Logstash to exit during shutdown even if there are still inflight +# events in memory. By default, logstash will refuse to quit until all +# received events have been pushed to the outputs. +# +# WARNING: enabling this can lead to data loss during shutdown +# +# pipeline.unsafe_shutdown: false +# +# ------------ Pipeline Configuration Settings -------------- +# +# Where to fetch the pipeline configuration for the main pipeline +# +# path.config: +# +# Pipeline configuration string for the main pipeline +# +# config.string: +# +# At startup, test if the configuration is valid and exit (dry run) +# +# config.test_and_exit: false +# +# Periodically check if the configuration has changed and reload the pipeline +# This can also be triggered manually through the SIGHUP signal +# +# config.reload.automatic: false +# +# How often to check if the pipeline configuration has changed (in seconds) +# +# config.reload.interval: 3 +# +# Show fully compiled configuration as debug log message +# NOTE: --log.level must be 'debug' +# +# config.debug: false +# +# When enabled, process escaped characters such as \n and \" in strings in the +# pipeline configuration files. +# +# config.support_escapes: false +# +# ------------ Module Settings --------------- +# Define modules here. Modules definitions must be defined as an array. +# The simple way to see this is to prepend each `name` with a `-`, and keep +# all associated variables under the `name` they are associated with, and +# above the next, like this: +# +# modules: +# - name: MODULE_NAME +# var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE +# var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE +# var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE +# var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE +# +# Module variable names must be in the format of +# +# var.PLUGIN_TYPE.PLUGIN_NAME.KEY +# +# modules: +# +# ------------ Queuing Settings -------------- +# +# Internal queuing model, "memory" for legacy in-memory based queuing and +# "persisted" for disk-based acked queueing. Defaults is memory +# +# queue.type: memory +# +# If using queue.type: persisted, the directory path where the data files will be stored. +# Default is path.data/queue +# +# path.queue: +# +# If using queue.type: persisted, the page data files size. The queue data consists of +# append-only data files separated into pages. Default is 250mb +# +# queue.page_capacity: 250mb +# +# If using queue.type: persisted, the maximum number of unread events in the queue. +# Default is 0 (unlimited) +# +# queue.max_events: 0 +# +# If using queue.type: persisted, the total capacity of the queue in number of bytes. +# If you would like more unacked events to be buffered in Logstash, you can increase the +# capacity using this setting. Please make sure your disk drive has capacity greater than +# the size specified here. If both max_bytes and max_events are specified, Logstash will pick +# whichever criteria is reached first +# Default is 1024mb or 1gb +# +# queue.max_bytes: 1024mb +# +# If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint +# Default is 1024, 0 for unlimited +# +# queue.checkpoint.acks: 1024 +# +# If using queue.type: persisted, the maximum number of written events before forcing a checkpoint +# Default is 1024, 0 for unlimited +# +# queue.checkpoint.writes: 1024 +# +# If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page +# Default is 1000, 0 for no periodic checkpoint. +# +# queue.checkpoint.interval: 1000 +# +# ------------ Dead-Letter Queue Settings -------------- +# Flag to turn on dead-letter queue. +# +# dead_letter_queue.enable: false + +# If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries +# will be dropped if they would increase the size of the dead letter queue beyond this setting. +# Default is 1024mb +# dead_letter_queue.max_bytes: 1024mb + +# If using dead_letter_queue.enable: true, the directory path where the data files will be stored. +# Default is path.data/dead_letter_queue +# +# path.dead_letter_queue: +# +# ------------ Metrics Settings -------------- +# +# Bind address for the metrics REST endpoint +# +# http.host: "127.0.0.1" +http.host: "0.0.0.0" +# +# Bind port for the metrics REST endpoint, this option also accept a range +# (9600-9700) and logstash will pick up the first available ports. +# +# http.port: 9600-9700 +# +# ------------ Debugging Settings -------------- +# +# Options for log.level: +# * fatal +# * error +# * warn +# * info (default) +# * debug +# * trace +# +# log.level: info +# path.logs: +# +# ------------ Other Settings -------------- +# +# Where to find custom plugins +# path.plugins: []