# base data receiver config --------------------------------------------------------------------- collector.receiver.base.ip=0.0.0.0 collector.receiver.base.port=9994
# number of tcp worker threads collector.receiver.base.worker.threadSize=8 # capacity of tcp worker queue collector.receiver.base.worker.queueSize=1024 # monitoring for tcp worker collector.receiver.base.worker.monitor=true
# stat receiver config --------------------------------------------------------------------- collector.receiver.stat.udp=true collector.receiver.stat.udp.ip=0.0.0.0 collector.receiver.stat.udp.port=9995 collector.receiver.stat.udp.receiveBufferSize=4194304
# Should keep in mind that TCP transport load balancing is per connection.(UDP transport loadbalancing is per packet) collector.receiver.stat.tcp=false collector.receiver.stat.tcp.ip=0.0.0.0 collector.receiver.stat.tcp.port=9995
# number of udp statworker threads collector.receiver.stat.worker.threadSize=8 # capacity of udp statworker queue collector.receiver.stat.worker.queueSize=64 # monitoring for udp stat worker collector.receiver.stat.worker.monitor=true
# Should keep in mind that TCP transport load balancing is per connection.(UDP transport loadbalancing is per packet) collector.receiver.span.tcp=false collector.receiver.span.tcp.ip=0.0.0.0 collector.receiver.span.tcp.port=9996
# number of udp statworker threads collector.receiver.span.worker.threadSize=32 # capacity of udp statworker queue collector.receiver.span.worker.queueSize=256 # monitoring for udp stat worker collector.receiver.span.worker.monitor=true
# configure l4 ip address to ignore health check logs collector.l4.ip=
# number of agent event worker threads collector.agentEventWorker.threadSize=4 # capacity of agent event worker queue collector.agentEventWorker.queueSize=1024
statistics.flushPeriod=1000
# ------------------------------------------------------------------------------------------------- # The cluster related options are used to establish connections between the agent, collector, and web in order to send/receive data between them in real time. # You may enable additional features using this option (Ex : RealTime Active Thread Chart). # ------------------------------------------------------------------------------------------------- # Usage : Set the following options for collector/web components that reside in the same cluster in order to enable this feature. # 1. cluster.enable (pinpoint-web.properties, pinpoint-collector.properties) - "true" to enable # 2. cluster.zookeeper.address (pinpoint-web.properties, pinpoint-collector.properties) - address of the ZooKeeper instance that will be used to manage the cluster # 3. cluster.web.tcp.port (pinpoint-web.properties) - any available port number (used to establish connection between web and collector) # ------------------------------------------------------------------------------------------------- # Please be aware of the following: #1. If the network between web, collector, and the agents are not stable, it is advisable not to use this feature. #2. We recommend using the cluster.web.tcp.port option. However, in cases where the collector is unable to establish connection to the web, you may reverse this and make the web establish connection to the collector. # In this case, you must set cluster.connect.address (pinpoint-web.properties); and cluster.listen.ip, cluster.listen.port (pinpoint-collector.properties) accordingly. cluster.enable=false cluster.zookeeper.address=localhost cluster.zookeeper.sessiontimeout=30000 cluster.listen.ip= cluster.listen.port=
# enable hbase async operation. default: false hbase.client.async.enable=false # the max number of the buffered asyncPut ops for each region. default:10000 hbase.client.async.in.queuesize=10000 # periodic asyncPut ops flush time. default:100 hbase.client.async.flush.period.ms=100 # the max number of the retry attempts before dropping the request. default:10 hbase.client.async.max.retries.in.queue=10