# If you are running more than one instances of graylog2-server you have to select one of these # instances as master. The master will perform some periodical tasks that non-masters won't perform. is_master = true # Set plugin directory here (relative or absolute) plugin_dir = plugin # On which port (UDP) should we listen for Syslog messages? (Standard: 514) syslog_listen_port = 514 syslog_listen_address = 127.0.0.1 syslog_enable_udp = true syslog_enable_tcp = false # Standard delimiter is LF. You can force using a NUL byte delimiter using this option. syslog_use_nul_delimiter = false # The raw syslog message is stored as full_message of the message if not disabled here. syslog_store_full_message = true # Embedded elasticsearch configuration file # pay attention to the working directory of the server, maybe use an absolute path here elasticsearch_config_file = /etc/graylog2-elasticsearch.yml elasticsearch_max_docs_per_index = 20000000 elasticsearch_url = http://localhost:9350/ elasticsearch_index_prefix = graylog2 # How many indices do you want to keep? If the number of indices exceeds this number, older indices will be dropped. # elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total number of messages in your setup elasticsearch_max_number_of_indices = 20 # How many ElasticSearch shards and replicas should be used per index? Note that this only applies to newly created indices. elasticsearch_shards = 4 elasticsearch_replicas = 0 # Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea. # All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom # ElasticSearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/ # Note that this setting only takes effect on newly created indices. elasticsearch_analyzer = standard # How many minutes of messages do you want to keep in the recent index? This index lives in memory only and is used to build the overview and stream pages. Raise this value if you want to see more messages in the overview pages. This is not affecting for example searches which are always targeting *all* indices. recent_index_ttl_minutes = 60 # Storage type of recent index. Allowed values: niofs, simplefs, mmapfs, memory # Standard: niofs - Set to memory for best speed but keep in mind that the whole recent index has to fit into the memory of your ElasticSearch machines. Set recent_index_ttl_minutes to a reasonable amount that will let the messages fit into memory. recent_index_store_type = niofs # Always try a reverse DNS lookup instead of parsing hostname from syslog message? force_syslog_rdns = false # Set time to NOW if parsing date/time from syslog message failed instead of rejecting it? allow_override_syslog_date = true # Batch size for all outputs. This is the maximum (!) number of messages an output module will get at once. # For example, if this is set to 5000 (default), the ElasticSearch Output will not index more than 5000 messages # at once. After that index operation is performed, the next batch will be indexed. If there is only 1 message # waiting, it will only index that single message. It is important to raise this parameter if you send in so # many messages that it is not enough to index 5000 messages at once. (Only at *really* high message rates) output_batch_size = 5000 # The number of parallel running processors. # Raise this number if your buffers are filling up. processbuffer_processors = 5 outputbuffer_processors = 5 # Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping) # Possible types: # - yielding # Compromise between performance and CPU usage. # - sleeping # Compromise between performance and CPU usage. Latency spikes can occur after quiet periods. # - blocking # High throughput, low latency, higher CPU usage. # - busy_spinning # Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores. processor_wait_strategy = sleeping # Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore. # For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache. # Start server with --statistics flag to see buffer utilization. # Must be a power of 2. (512, 1024, 2048, ...) ring_size = 1024 # MongoDB Configuration mongodb_useauth = true mongodb_user = grayloguser mongodb_password = Password mongodb_host = 127.0.0.1 mongodb_database = graylog2 mongodb_port = 27017 # Raise this according to the maximum connections your MongoDB server can handle if you encounter MongoDB connection problems. mongodb_max_connections = 100 # Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5 # If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, then 500 threads can block. More than that and an exception will be thrown. # http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier mongodb_threads_allowed_to_block_multiplier = 5 # Graylog Extended Log Format (GELF) use_gelf = true gelf_listen_address = 0.0.0.0 gelf_listen_port = 12201 # Drools Rule File (Use to rewrite incoming log messages) # rules_file = /etc/graylog2.d/rules/graylog2.drl # AMQP amqp_enabled = false amqp_host = localhost amqp_port = 5672 amqp_username = guest amqp_password = guest amqp_virtualhost = / # HTTP input # the server will accept PUT requests to /gelf or /gelf/raw # /gelf can process all standard GELF messages containing the two header bytes # /gelf/raw can only process uncompressed GELF messages without any header bytes. # the HTTP server allows keep-alive connections and supports compression. http_enabled = false http_listen_address = 0.0.0.0 http_listen_port = 12202 # Email transport transport_email_enabled = false transport_email_hostname = logger.edi.su transport_email_port = 587 transport_email_use_auth = true transport_email_use_tls = true transport_email_auth_username = logger@logger.edi.su transport_email_auth_password = secretpass transport_email_subject_prefix = [graylog2] transport_email_from_email = logger@logger.edi.su transport_email_from_name = Graylog2 # Jabber/XMPP transport transport_jabber_enabled = false transport_jabber_hostname = jabber.example.com transport_jabber_port = 5222 transport_jabber_use_sasl_auth = true transport_jabber_allow_selfsigned_certs = false transport_jabber_auth_username = your_user transport_jabber_auth_password = secret transport_jabber_message_prefix = [graylog2] # Filters # Enable the filter that tries to extract additional fields from k=v values in the log message? enable_tokenizer_filter = true # Additional modules # Graphite #enable_graphite_output = false #graphite_carbon_host = 127.0.0.1 #graphite_carbon_tcp_port = 2003 #graphite_prefix = logs # Librato Metrics (https://github.com/Graylog2/graylog2-server/wiki/Librato-Metrics-output) #enable_libratometrics_output = false #libratometrics_api_user = you@example.com #libratometrics_api_token = abcdefg12345 #libratometrics_prefix = gl2 #libratometrics_interval = 10 #libratometrics_stream_filter = #libratometrics_host_filter =