From 3a2cc9c62b9563c08c368efefd9554b7c86774cf Mon Sep 17 00:00:00 2001 From: Karen Metts Date: Wed, 2 Apr 2025 16:47:58 -0400 Subject: [PATCH 1/4] TEST esinput content against 8.x- initial commit --- docs/plugins/inputs/elasticsearch.asciidoc | 276 ++++++++++++++++----- 1 file changed, 217 insertions(+), 59 deletions(-) diff --git a/docs/plugins/inputs/elasticsearch.asciidoc b/docs/plugins/inputs/elasticsearch.asciidoc index 19167302..4ef1e2d6 100644 --- a/docs/plugins/inputs/elasticsearch.asciidoc +++ b/docs/plugins/inputs/elasticsearch.asciidoc @@ -24,7 +24,7 @@ include::{include_path}/plugin_header.asciidoc[] Read from an Elasticsearch cluster, based on search query results. This is useful for replaying test logs, reindexing, etc. -You can periodically schedule ingestion using a cron syntax +You can periodically schedule ingestion using a cron syntax (see `schedule` setting) or run the query one time to load data into Logstash. @@ -49,7 +49,7 @@ This would create an Elasticsearch query with the following format: "sort": [ "_doc" ] }' - +[id="plugins-{type}s-{plugin}-scheduling"] ==== Scheduling Input from this plugin can be scheduled to run periodically according to a specific @@ -94,10 +94,140 @@ The plugin logs a warning when ECS is enabled and `target` isn't set. TIP: Set the `target` option to avoid potential schema conflicts. +[id="plugins-{type}s-{plugin}-failure-handling"] +==== Failure handling + +When this input plugin cannot create a structured `Event` from a hit result, it will instead create an `Event` that is tagged with `_elasticsearch_input_failure` whose `[event][original]` is a JSON-encoded string representation of the entire hit. + +Common causes are: + + - When the hit result contains top-level fields that are {logstash-ref}/processing.html#reserved-fields[reserved in Logstash] but do not have the expected shape. Use the <> directive to avoid conflicts with the top-level namespace. + - When <> is enabled and the docinfo fields cannot be merged into the hit result. Combine <> and <> to avoid conflict. + +[id="plugins-{type}s-{plugin}-cursor"] +==== Tracking a field's value across runs + +NOTE: experimental:[] `tracking_field` and related settings are experimental and subject to change in the future + +It is sometimes desirable to track the value of a particular field between two jobs: +* avoid re-processing the entire result set of a long query after an unplanned restart +* only grab new data from an index instead of processing the entire set on each job + +For this, the Elasticsearch input plugin provides the <> and <> options. +When <> is set, the plugin will record the value of that field for the last document retrieved in a run into +a file (location defaults to <>). + +The user can then inject this value in the query using the placeholder `:last_value`. The value will be injected into the query +before execution, and the updated after the query completes if new data was found. + +This feature works best when: + +. the query sorts by the tracking field; +. the timestamp field is added by {es}; +. the field type has enough resolution so that two events are unlikely to have the same value. + +It is recommended to use a tracking field whose type is https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html[date nanoseconds]. +If the tracking field is of this data type, an extra placeholder called `:present` can be used to inject the nano-second based value of "now-30s". +This placeholder is useful as the right-hand side of a range filter, allowing the collection of +new data but leaving partially-searcheable bulk request data to the next scheduled job. + +Below is a series of steps to help set up the "tailing" of data being written to a set of indices, using a date nanosecond field +added by an Elasticsearch ingest pipeline, and the `tracking_field` capability of this plugin. + +. create ingest pipeline that adds Elasticsearch's `_ingest.timestamp` field to the documents as `event.ingested`: + +[source, json] + PUT _ingest/pipeline/my-pipeline + { + "processors": [ + { + "script": { + "lang": "painless", + "source": "ctx.putIfAbsent(\"event\", [:]); ctx.event.ingested = metadata().now.format(DateTimeFormatter.ISO_INSTANT);" + } + } + ] + } + +[start=2] +. create an index mapping where the tracking field is of date nanosecond type and invokes the defined pipeline: + +[source, json] + PUT /_template/my_template + { + "index_patterns": ["test-*"], + "settings": { + "index.default_pipeline": "my-pipeline", + }, + "mappings": { + "properties": { + "event": { + "properties": { + "ingested": { + "type": "date_nanos", + "format": "strict_date_optional_time_nanos" + } + } + } + } + } + } + +[start=3] +. define a query that looks at all data of the indices, sorted by the tracking field, and with a range filter since the last value seen until present: + +[source,json] +{ + "query": { + "range": { + "event.ingested": { + "gt": ":last_value", + "lt": ":present" + } + } + }, + "sort": [ + { + "event.ingested": { + "order": "asc", + "format": "strict_date_optional_time_nanos", + "numeric_type": "date_nanos" + } + } + ] +} + +[start=4] +. configure the Elasticsearch input to query the indices with the query defined above, every minute, and track the `event.ingested` field: + +[source, ruby] + input { + elasticsearch { + id => tail_test_index + hosts => [ 'https://..'] + api_key => '....' + index => 'test-*' + query => '{ "query": { "range": { "event.ingested": { "gt": ":last_value", "lt": ":present"}}}, "sort": [ { "event.ingested": {"order": "asc", "format": "strict_date_optional_time_nanos", "numeric_type" : "date_nanos" } } ] }' + tracking_field => "[event][ingested]" + slices => 5 # optional use of slices to speed data processing, should be equal to or less than number of primary shards + schedule => '* * * * *' # every minute + schedule_overlap => false # don't accumulate jobs if one takes longer than 1 minute + } + } + +With this setup, as new documents are indexed an `test-*` index, the next scheduled run will: + +. select all new documents since the last observed value of the tracking field; +. use {ref}/point-in-time-api.html#point-in-time-api[Point in time (PIT)] + {ref}/paginate-search-results.html#search-after[Search after] to paginate through all the data; +. update the value of the field at the end of the pagination. + [id="plugins-{type}s-{plugin}-options"] ==== Elasticsearch Input configuration options -This plugin supports the following configuration options plus the <> and the <> described later. +This plugin supports these configuration options plus the <> described later. + +NOTE: As of version `5.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. +Please check out <> for details. [cols="<,<,<",options="header",] |======================================================================= @@ -107,18 +237,21 @@ This plugin supports the following configuration options plus the <> |<>|No | <> |<>|No | <> | <>|No +| <> |<>|No | <> |<>|No | <> |<>|No | <> |<>|No | <> |<>|No | <> |<>|No | <> |<>|No +| <> |<>|No | <> |<>|No | <> |<>|No | <> |<>|No | <> |<>, one of `["hits","aggregations"]`|No | <> | <>|No | <> |<>|No +| <> |<>|No | <> |<>|No | <> |<>, one of `["auto", "search_after", "scroll"]`|No | <> |<>|No @@ -138,6 +271,8 @@ This plugin supports the following configuration options plus the <> |<>, one of `["full", "none"]`|No | <> | <>|No | <> | {logstash-ref}/field-references-deepdive.html[field reference] | No +| <> |<>|No +| <> |<>|No | <> | <>|No | <> |<>|No |======================================================================= @@ -200,8 +335,18 @@ For more info, check out the The maximum amount of time, in seconds, to wait while establishing a connection to Elasticsearch. Connect timeouts tend to occur when Elasticsearch or an intermediate proxy is overloaded with requests and has exhausted its connection pool. +[id="plugins-{type}s-{plugin}-custom_headers"] +===== `custom_headers` + + * Value type is <> + * Default value is empty + +Pass a set of key value pairs as the headers sent in each request to an elasticsearch node. +The headers will be used for any kind of request. +These custom headers will override any headers previously set by the plugin such as the User Agent or Authorization headers. + [id="plugins-{type}s-{plugin}-docinfo"] -===== `docinfo` +===== `docinfo` * Value type is <> * Default value is `false` @@ -252,7 +397,7 @@ Example [id="plugins-{type}s-{plugin}-docinfo_fields"] -===== `docinfo_fields` +===== `docinfo_fields` * Value type is <> * Default value is `["_index", "_type", "_id"]` @@ -263,7 +408,7 @@ option lists the metadata fields to save in the current event. See more information. [id="plugins-{type}s-{plugin}-docinfo_target"] -===== `docinfo_target` +===== `docinfo_target` * Value type is <> * Default value depends on whether <> is enabled: @@ -287,7 +432,7 @@ this option names the field under which to store the metadata fields as subfield Controls this plugin's compatibility with the {ecs-ref}[Elastic Common Schema (ECS)]. [id="plugins-{type}s-{plugin}-hosts"] -===== `hosts` +===== `hosts` * Value type is <> * There is no default value for this setting. @@ -297,18 +442,29 @@ can be either IP, HOST, IP:port, or HOST:port. The port defaults to 9200. [id="plugins-{type}s-{plugin}-index"] -===== `index` +===== `index` * Value type is <> * Default value is `"logstash-*"` -The index or alias to search. +The index or alias to search. Check out {ref}/api-conventions.html#api-multi-index[Multi Indices documentation] in the Elasticsearch documentation for info on referencing multiple indices. +[id="plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * There is no default value for this setting. + +The path to store the last observed value of the tracking field, when used. +By default this file is stored as `/plugins/inputs/elasticsearch//last_run_value`. + +This setting should point to file, not a directory, and Logstash must have read+write access to this file. + [id="plugins-{type}s-{plugin}-password"] -===== `password` +===== `password` * Value type is <> * There is no default value for this setting. @@ -328,7 +484,7 @@ An empty string is treated as if proxy was not set, this is useful when using environment variables e.g. `proxy => '${LS_PROXY:}'`. [id="plugins-{type}s-{plugin}-query"] -===== `query` +===== `query` * Value type is <> * Default value is `'{ "sort": [ "_doc" ] }'` @@ -376,7 +532,7 @@ The default is 0 (no retry). This value should be equal to or greater than zero. NOTE: Partial failures - such as errors in a subset of all slices - can result in the entire query being retried, which can lead to duplication of data. Avoiding this would require Logstash to store the entire result set of a query in memory which is often not possible. [id="plugins-{type}s-{plugin}-schedule"] -===== `schedule` +===== `schedule` * Value type is <> * There is no default value for this setting. @@ -387,8 +543,21 @@ for example: "* * * * *" (execute query every minute, on the minute) There is no schedule by default. If no schedule is given, then the statement is run exactly once. +[id="plugins-{type}s-{plugin}-schedule_overlap"] +===== `schedule_overlap` + + * Value type is <> + * Default value is `true` + +Whether to allow queuing of a scheduled run if a run is occurring. +While this is ideal for ensuring a new run happens immediately after the previous on finishes if there +is a lot of work to do, but given the queue is unbounded it may lead to an out of memory over long periods of time +if the queue grows continuously. + +When in doubt, set `schedule_overlap` to false (it may become the default value in the future). + [id="plugins-{type}s-{plugin}-scroll"] -===== `scroll` +===== `scroll` * Value type is <> * Default value is `"1m"` @@ -411,7 +580,7 @@ The query requires at least one `sort` field, as described in the <> * Default value is `1000` @@ -479,6 +648,8 @@ Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme is specified in the URLs listed in <> or extracted from the <>. If no explicit protocol is specified plain HTTP will be used. +When not explicitly set, SSL will be automatically enabled if any of the specified hosts use HTTPS. + [id="plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` * Value type is <> @@ -597,9 +768,31 @@ When the `target` is set to a field reference, the `_source` of the hit is place This option can be useful to avoid populating unknown fields when a downstream schema such as ECS is enforced. It is also possible to target an entry in the event's metadata, which will be available during event processing but not exported to your outputs (e.g., `target \=> "[@metadata][_source]"`). +[id="plugins-{type}s-{plugin}-tracking_field"] +===== `tracking_field` + +* Value type is <> +* There is no default value for this setting. + +Which field from the last event of a previous run will be used a cursor value for the following run. +The value of this field is injected into each query if the query uses the placeholder `:last_value`. +For the first query after a pipeline is started, the value used is either read from <> file, +or taken from <> setting. + +Note: The tracking value is updated after each page is read and at the end of each Point in Time. In case of a crash the last saved value will be used so some duplication of data can occur. For this reason the use of unique document IDs for each event is recommended in the downstream destination. + +[id="plugins-{type}s-{plugin}-tracking_field_seed"] +===== `tracking_field_seed` + +* Value type is <> +* Default value is `"1970-01-01T00:00:00.000000000Z"` + +The starting value for the <> if there is no <> already. +This field defaults to the nanosecond precision ISO8601 representation of `epoch`, or "1970-01-01T00:00:00.000000000Z", given nano-second precision timestamps are the +most reliable data format to use for this feature. [id="plugins-{type}s-{plugin}-user"] -===== `user` +===== `user` * Value type is <> * There is no default value for this setting. @@ -609,56 +802,21 @@ option when authenticating to the Elasticsearch server. If set to an empty string authentication will be disabled. -[id="plugins-{type}s-{plugin}-deprecated-options"] -==== Elasticsearch Input deprecated configuration options +[id="plugins-{type}s-{plugin}-obsolete-options"] +==== Elasticsearch Input Obsolete Configuration Options -This plugin supports the following deprecated configurations. +WARNING: As of version `5.0.0` of this plugin, some configuration options have been replaced. +The plugin will fail to start if it contains any of these obsolete options. -WARNING: Deprecated options are subject to removal in future releases. -[cols="<,<,<",options="header",] +[cols="<,<",options="header",] |======================================================================= -|Setting|Input type|Replaced by -| <> |a valid filesystem path|<> -| <> |<>|<> -| <> |<>|<> +|Setting|Replaced by +| ca_file | <> +| ssl | <> +| ssl_certificate_verification | <> |======================================================================= -[id="plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` -deprecated[4.17.0, Replaced by <>] - -* Value type is <> -* There is no default value for this setting. - -SSL Certificate Authority file in PEM encoded format, must also include any chain certificates as necessary. - -[id="plugins-{type}s-{plugin}-ssl"] -===== `ssl` -deprecated[4.17.0, Replaced by <>] - -* Value type is <> -* Default value is `false` - -If enabled, SSL will be used when communicating with the Elasticsearch -server (i.e. HTTPS will be used instead of plain HTTP). - - -[id="plugins-{type}s-{plugin}-ssl_certificate_verification"] -===== `ssl_certificate_verification` -deprecated[4.17.0, Replaced by <>] - -* Value type is <> -* Default value is `true` - -Option to validate the server's certificate. Disabling this severely compromises security. -When certificate validation is disabled, this plugin implicitly trusts the machine -resolved at the given address without validating its proof-of-identity. -In this scenario, the plugin can transmit credentials to or process data from an untrustworthy -man-in-the-middle or other compromised infrastructure. -More information on the importance of certificate verification: -**https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf**. - [id="plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] From cce732a6f283c04e5516cbca98f212f37410d45a Mon Sep 17 00:00:00 2001 From: Karen Metts Date: Thu, 3 Apr 2025 15:10:48 -0400 Subject: [PATCH 2/4] Diff validate view suggestions --- docs/plugins/inputs/elasticsearch.asciidoc | 66 +++++++++++++--------- 1 file changed, 39 insertions(+), 27 deletions(-) diff --git a/docs/plugins/inputs/elasticsearch.asciidoc b/docs/plugins/inputs/elasticsearch.asciidoc index 4ef1e2d6..9f6b705f 100644 --- a/docs/plugins/inputs/elasticsearch.asciidoc +++ b/docs/plugins/inputs/elasticsearch.asciidoc @@ -109,33 +109,45 @@ Common causes are: NOTE: experimental:[] `tracking_field` and related settings are experimental and subject to change in the future -It is sometimes desirable to track the value of a particular field between two jobs: -* avoid re-processing the entire result set of a long query after an unplanned restart -* only grab new data from an index instead of processing the entire set on each job +.Technical Preview: Tracking a field's value +**** +The feature that allows tracking a field's value across runs is in _Technical Preview_. +Configuration options and implementation details are subject to change in minor releases without being preceded by deprecation warnings. +**** -For this, the Elasticsearch input plugin provides the <> and <> options. -When <> is set, the plugin will record the value of that field for the last document retrieved in a run into -a file (location defaults to <>). +Some uses cases require tracking the value of a particular field between two jobs. +Examples include: -The user can then inject this value in the query using the placeholder `:last_value`. The value will be injected into the query -before execution, and the updated after the query completes if new data was found. +* avoiding the need to re-process the entire result set of a long query after an unplanned restart +* grabbing only new data from an index instead of processing the entire set on each job. + +The Elasticsearch input plugin provides the <> and <> options. +When <> is set, the plugin records the value of that field for the last document retrieved in a run into +a file. +(The file location defaults to <>). + +You can then inject this value in the query using the placeholder `:last_value`. +The value will be injected into the query before execution, and then updated after the query completes if new data was found. This feature works best when: -. the query sorts by the tracking field; -. the timestamp field is added by {es}; -. the field type has enough resolution so that two events are unlikely to have the same value. +* the query sorts by the tracking field, +* the timestamp field is added by {es}, and +* the field type has enough resolution so that two events are unlikely to have the same value. -It is recommended to use a tracking field whose type is https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html[date nanoseconds]. -If the tracking field is of this data type, an extra placeholder called `:present` can be used to inject the nano-second based value of "now-30s". +Consider using a tracking field whose type is https://www.elastic.co/guide/en/elasticsearch/reference/current/date_nanos.html[date nanoseconds]. +If the tracking field is of this data type, you can use an extra placeholder called `:present` to inject the nano-second based value of "now-30s". This placeholder is useful as the right-hand side of a range filter, allowing the collection of -new data but leaving partially-searcheable bulk request data to the next scheduled job. +new data but leaving partially-searchable bulk request data to the next scheduled job. -Below is a series of steps to help set up the "tailing" of data being written to a set of indices, using a date nanosecond field -added by an Elasticsearch ingest pipeline, and the `tracking_field` capability of this plugin. +[id="plugins-{type}s-{plugin}-tracking-sample"] +===== Sample configuration: Track field value across runs -. create ingest pipeline that adds Elasticsearch's `_ingest.timestamp` field to the documents as `event.ingested`: +This section contains a series of steps to help you set up the "tailing" of data being written to a set of indices, using a date nanosecond field +added by an Elasticsearch ingest pipeline and the `tracking_field` capability of this plugin. +. Create ingest pipeline that adds Elasticsearch's `_ingest.timestamp` field to the documents as `event.ingested`: ++ [source, json] PUT _ingest/pipeline/my-pipeline { @@ -150,8 +162,7 @@ added by an Elasticsearch ingest pipeline, and the `tracking_field` capability o } [start=2] -. create an index mapping where the tracking field is of date nanosecond type and invokes the defined pipeline: - +. Create an index mapping where the tracking field is of date nanosecond type and invokes the defined pipeline:+ [source, json] PUT /_template/my_template { @@ -174,8 +185,8 @@ added by an Elasticsearch ingest pipeline, and the `tracking_field` capability o } [start=3] -. define a query that looks at all data of the indices, sorted by the tracking field, and with a range filter since the last value seen until present: - +. Define a query that looks at all data of the indices, sorted by the tracking field, and with a range filter since the last value seen until present: ++ [source,json] { "query": { @@ -198,8 +209,8 @@ added by an Elasticsearch ingest pipeline, and the `tracking_field` capability o } [start=4] -. configure the Elasticsearch input to query the indices with the query defined above, every minute, and track the `event.ingested` field: - +. Configure the Elasticsearch input to query the indices with the query defined above, every minute, and track the `event.ingested` field: ++ [source, ruby] input { elasticsearch { @@ -215,11 +226,12 @@ added by an Elasticsearch ingest pipeline, and the `tracking_field` capability o } } -With this setup, as new documents are indexed an `test-*` index, the next scheduled run will: +With this sample setup, new documents are indexed into a `test-*` index. +The next scheduled run: -. select all new documents since the last observed value of the tracking field; -. use {ref}/point-in-time-api.html#point-in-time-api[Point in time (PIT)] + {ref}/paginate-search-results.html#search-after[Search after] to paginate through all the data; -. update the value of the field at the end of the pagination. +* selects all new documents since the last observed value of the tracking field, +* uses {ref}/point-in-time-api.html#point-in-time-api[Point in time (PIT)] + {ref}/paginate-search-results.html#search-after[Search after] to paginate through all the data, and +* updates the value of the field at the end of the pagination. [id="plugins-{type}s-{plugin}-options"] ==== Elasticsearch Input configuration options From 8fabfff02d604a5392c04dad40aab13a90864f93 Mon Sep 17 00:00:00 2001 From: Karen Metts Date: Fri, 4 Apr 2025 11:51:43 -0400 Subject: [PATCH 3/4] Validate post-review changes --- docs/plugins/inputs/elasticsearch.asciidoc | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/docs/plugins/inputs/elasticsearch.asciidoc b/docs/plugins/inputs/elasticsearch.asciidoc index 9f6b705f..7476c377 100644 --- a/docs/plugins/inputs/elasticsearch.asciidoc +++ b/docs/plugins/inputs/elasticsearch.asciidoc @@ -107,8 +107,6 @@ Common causes are: [id="plugins-{type}s-{plugin}-cursor"] ==== Tracking a field's value across runs -NOTE: experimental:[] `tracking_field` and related settings are experimental and subject to change in the future - .Technical Preview: Tracking a field's value **** The feature that allows tracking a field's value across runs is in _Technical Preview_. @@ -124,7 +122,7 @@ Examples include: The Elasticsearch input plugin provides the <> and <> options. When <> is set, the plugin records the value of that field for the last document retrieved in a run into a file. -(The file location defaults to <>). +(The file location defaults to <>.) You can then inject this value in the query using the placeholder `:last_value`. The value will be injected into the query before execution, and then updated after the query completes if new data was found. @@ -140,11 +138,10 @@ If the tracking field is of this data type, you can use an extra placeholder cal This placeholder is useful as the right-hand side of a range filter, allowing the collection of new data but leaving partially-searchable bulk request data to the next scheduled job. -[id="plugins-{type}s-{plugin}-tracking-sample"] +id="plugins-{type}s-{plugin}-tracking-sample"] ===== Sample configuration: Track field value across runs -This section contains a series of steps to help you set up the "tailing" of data being written to a set of indices, using a date nanosecond field -added by an Elasticsearch ingest pipeline and the `tracking_field` capability of this plugin. +This section contains a series of steps to help you set up the "tailing" of data being written to a set of indices, using a date nanosecond field added by an Elasticsearch ingest pipeline and the `tracking_field` capability of this plugin. . Create ingest pipeline that adds Elasticsearch's `_ingest.timestamp` field to the documents as `event.ingested`: + @@ -162,7 +159,8 @@ added by an Elasticsearch ingest pipeline and the `tracking_field` capability of } [start=2] -. Create an index mapping where the tracking field is of date nanosecond type and invokes the defined pipeline:+ +. Create an index mapping where the tracking field is of date nanosecond type and invokes the defined pipeline: ++ [source, json] PUT /_template/my_template { @@ -832,4 +830,4 @@ The plugin will fail to start if it contains any of these obsolete options. [id="plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] -:no_codec!: +:no_codec!: \ No newline at end of file From 564c056c58eb4dbef5a26f7e70f55bb40640c11c Mon Sep 17 00:00:00 2001 From: Karen Metts Date: Fri, 4 Apr 2025 13:46:42 -0400 Subject: [PATCH 4/4] Validate formatting correcting --- docs/plugins/inputs/elasticsearch.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/plugins/inputs/elasticsearch.asciidoc b/docs/plugins/inputs/elasticsearch.asciidoc index 7476c377..ddf9e3a6 100644 --- a/docs/plugins/inputs/elasticsearch.asciidoc +++ b/docs/plugins/inputs/elasticsearch.asciidoc @@ -138,7 +138,7 @@ If the tracking field is of this data type, you can use an extra placeholder cal This placeholder is useful as the right-hand side of a range filter, allowing the collection of new data but leaving partially-searchable bulk request data to the next scheduled job. -id="plugins-{type}s-{plugin}-tracking-sample"] +[id="plugins-{type}s-{plugin}-tracking-sample"] ===== Sample configuration: Track field value across runs This section contains a series of steps to help you set up the "tailing" of data being written to a set of indices, using a date nanosecond field added by an Elasticsearch ingest pipeline and the `tracking_field` capability of this plugin. @@ -830,4 +830,4 @@ The plugin will fail to start if it contains any of these obsolete options. [id="plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] -:no_codec!: \ No newline at end of file +:no_codec!: