pipelines.yml 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. # List of pipelines to be loaded by Logstash
  2. #
  3. # This document must be a list of dictionaries/hashes, where the keys/values are pipeline settings.
  4. # Default values for ommitted settings are read from the `logstash.yml` file.
  5. # When declaring multiple pipelines, each MUST have its own `pipeline.id`.
  6. #
  7. # Example of two pipelines:
  8. #
  9. # - pipeline.id: test
  10. # pipeline.workers: 1
  11. # pipeline.batch.size: 1
  12. # config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }"
  13. # - pipeline.id: another_test
  14. # queue.type: persisted
  15. # path.config: "/tmp/logstash/*.config"
  16. #
  17. # Available options:
  18. #
  19. # # name of the pipeline
  20. # pipeline.id: mylogs
  21. #
  22. # # The configuration string to be used by this pipeline
  23. # config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }"
  24. #
  25. # # The path from where to read the configuration text
  26. # path.config: "/etc/conf.d/logstash/myconfig.cfg"
  27. #
  28. # # How many worker threads execute the Filters+Outputs stage of the pipeline
  29. # pipeline.workers: 1 (actually defaults to number of CPUs)
  30. #
  31. # # How many events to retrieve from inputs before sending to filters+workers
  32. # pipeline.batch.size: 125
  33. #
  34. # # How long to wait before dispatching an undersized batch to filters+workers
  35. # pipeline.batch.delay: 5
  36. #
  37. # # How many workers should be used per output plugin instance
  38. # pipeline.output.workers: 1
  39. #
  40. # # Internal queuing model, "memory" for legacy in-memory based queuing and
  41. # # "persisted" for disk-based acked queueing. Defaults is memory
  42. # queue.type: memory
  43. #
  44. # # If using queue.type: persisted, the page data files size. The queue data consists of
  45. # # append-only data files separated into pages. Default is 250mb
  46. # queue.page_capacity: 250mb
  47. #
  48. # # If using queue.type: persisted, the maximum number of unread events in the queue.
  49. # # Default is 0 (unlimited)
  50. # queue.max_events: 0
  51. #
  52. # # If using queue.type: persisted, the total capacity of the queue in number of bytes.
  53. # # Default is 1024mb or 1gb
  54. # queue.max_bytes: 1024mb
  55. #
  56. # # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
  57. # # Default is 1024, 0 for unlimited
  58. # queue.checkpoint.acks: 1024
  59. #
  60. # # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
  61. # # Default is 1024, 0 for unlimited
  62. # queue.checkpoint.writes: 1024
  63. #
  64. # # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
  65. # # Default is 1000, 0 for no periodic checkpoint.
  66. # queue.checkpoint.interval: 1000
  67. #
  68. # # Enable Dead Letter Queueing for this pipeline.
  69. # dead_letter_queue.enable: false
  70. #
  71. # If using dead_letter_queue.enable: true, the maximum size of dead letter queue for this pipeline. Entries
  72. # will be dropped if they would increase the size of the dead letter queue beyond this setting.
  73. # Default is 1024mb
  74. # dead_letter_queue.max_bytes: 1024mb
  75. #
  76. # If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
  77. # Default is path.data/dead_letter_queue
  78. #
  79. # path.dead_letter_queue: