12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879 |
- # List of pipelines to be loaded by Logstash
- #
- # This document must be a list of dictionaries/hashes, where the keys/values are pipeline settings.
- # Default values for ommitted settings are read from the `logstash.yml` file.
- # When declaring multiple pipelines, each MUST have its own `pipeline.id`.
- #
- # Example of two pipelines:
- #
- # - pipeline.id: test
- # pipeline.workers: 1
- # pipeline.batch.size: 1
- # config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }"
- # - pipeline.id: another_test
- # queue.type: persisted
- # path.config: "/tmp/logstash/*.config"
- #
- # Available options:
- #
- # # name of the pipeline
- # pipeline.id: mylogs
- #
- # # The configuration string to be used by this pipeline
- # config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }"
- #
- # # The path from where to read the configuration text
- # path.config: "/etc/conf.d/logstash/myconfig.cfg"
- #
- # # How many worker threads execute the Filters+Outputs stage of the pipeline
- # pipeline.workers: 1 (actually defaults to number of CPUs)
- #
- # # How many events to retrieve from inputs before sending to filters+workers
- # pipeline.batch.size: 125
- #
- # # How long to wait before dispatching an undersized batch to filters+workers
- # pipeline.batch.delay: 5
- #
- # # How many workers should be used per output plugin instance
- # pipeline.output.workers: 1
- #
- # # Internal queuing model, "memory" for legacy in-memory based queuing and
- # # "persisted" for disk-based acked queueing. Defaults is memory
- # queue.type: memory
- #
- # # If using queue.type: persisted, the page data files size. The queue data consists of
- # # append-only data files separated into pages. Default is 250mb
- # queue.page_capacity: 250mb
- #
- # # If using queue.type: persisted, the maximum number of unread events in the queue.
- # # Default is 0 (unlimited)
- # queue.max_events: 0
- #
- # # If using queue.type: persisted, the total capacity of the queue in number of bytes.
- # # Default is 1024mb or 1gb
- # queue.max_bytes: 1024mb
- #
- # # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
- # # Default is 1024, 0 for unlimited
- # queue.checkpoint.acks: 1024
- #
- # # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
- # # Default is 1024, 0 for unlimited
- # queue.checkpoint.writes: 1024
- #
- # # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
- # # Default is 1000, 0 for no periodic checkpoint.
- # queue.checkpoint.interval: 1000
- #
- # # Enable Dead Letter Queueing for this pipeline.
- # dead_letter_queue.enable: false
- #
- # If using dead_letter_queue.enable: true, the maximum size of dead letter queue for this pipeline. Entries
- # will be dropped if they would increase the size of the dead letter queue beyond this setting.
- # Default is 1024mb
- # dead_letter_queue.max_bytes: 1024mb
- #
- # If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
- # Default is path.data/dead_letter_queue
- #
- # path.dead_letter_queue:
|