diff --git a/cid-redirects.json b/cid-redirects.json
index 16693f997b..063f82b3af 100644
--- a/cid-redirects.json
+++ b/cid-redirects.json
@@ -330,6 +330,7 @@
"/03Send-Data/Sources/04Reference-Information-for-Sources/Collecting-Multiline-Logs": "/docs/send-data/reference-information/collect-multiline-logs",
"/03Send-Data/Sources/04Reference-Information-for-Sources/Source-Log-Encoding": "/docs/send-data/reference-information/source-log-encoding",
"/03Send-Data/Sources/04Reference-Information-for-Sources/Timestamps,-Time-Zones,-Time-Ranges,-and-Date-Formats": "/docs/send-data/reference-information/time-reference",
+ "/03Send-Data/Sources/04Reference-Information-for-Sources/Timestamps%252C-Time-Zones%252C-Time-Ranges%252C-and-Date-Formats": "/docs/send-data/reference-information/time-reference",
"/03Send-Data/Sources/04Reference-Information-for-Sources/Using-Wildcards-in-Paths": "/docs/send-data/reference-information/use-wildcards-paths",
"/03Send-Data/Sumo_Logic_Distribution_for_OpenTelemetry": "/docs/send-data/opentelemetry-collector",
"/03Send-Data/Sumo_Logic_OpenTelemetry_Distribution": "/docs/send-data/opentelemetry-collector",
@@ -1019,6 +1020,7 @@
"/07Sumo-Logic-Apps/14Hosts_and_Operating_Systems/Host_Metrics/Host-Metrics-App-Dashboards": "/docs/integrations/hosts-operating-systems/host-metrics",
"/07Sumo-Logic-Apps/14Hosts_and_Operating_Systems/Linux_Performance": "/docs/integrations/hosts-operating-systems/linux-performance",
"/07Sumo-Logic-Apps/14Hosts_and_Operating_Systems/Linux": "/docs/integrations/hosts-operating-systems/linux",
+ "/07Sumo_Logic_Apps/14Hosts_and_Operating_Systems/Linux": "/docs/integrations/hosts-operating-systems/linux",
"/07Sumo-Logic-Apps/14Hosts_and_Operating_Systems/Linux/Collect-Logs-for-Linux": "/docs/integrations/hosts-operating-systems/linux",
"/07Sumo-Logic-Apps/14Hosts_and_Operating_Systems/Linux/Linux-App-Dashboards": "/docs/integrations/hosts-operating-systems/linux",
"/07Sumo-Logic-Apps/14Hosts_and_Operating_Systems/Linux/Suggested_Searches_for_Linux_OS": "/docs/integrations/hosts-operating-systems/linux",
@@ -1054,6 +1056,7 @@
"/07Sumo-Logic-Apps/18SAAS_and_Cloud_Apps/Istio/Install_the_Istio_App_and_view_the_Dashboards": "/docs/integrations/saas-cloud/istio",
"/docs/integrations/saas-cloud/microsoft-exchange-tracelogs": "/docs/integrations/saas-cloud/microsoft-exchange-trace-logs",
"/07Sumo-Logic-Apps/18SAAS_and_Cloud_Apps/Opsgenie": "/docs/integrations/saas-cloud/opsgenie",
+ "/docs/platform-services/automation-service/app-central/integrations/atlassian-oppsgenie": "/docs/integrations/saas-cloud/opsgenie",
"/07Sumo-Logic-Apps/18SAAS_and_Cloud_Apps/Opsgenie/Collect_Logs_for_Opsgenie": "/docs/integrations/saas-cloud/opsgenie",
"/07Sumo-Logic-Apps/18SAAS_and_Cloud_Apps/Opsgenie/Install_the_Opsgenie_App_and_view_the_Dashboards": "/docs/integrations/saas-cloud/opsgenie",
"/07Sumo-Logic-Apps/18SAAS_and_Cloud_Apps/PagerDuty_V1": "/docs/integrations/saas-cloud/pagerduty-v2",
@@ -1229,6 +1232,7 @@
"/07Sumo-Logic-Apps/24Web_Servers/Varnish/Collect_Varnish_Logs/Collect_Varnish_Logs_and_Metrics_for_Kubernetes_environments": "/docs/integrations/web-servers/varnish",
"/07Sumo-Logic-Apps/24Web_Servers/Varnish/Collect_Varnish_Logs/Collect_Varnish_Logs_and_Metrics_for_Non-Kubernetes_environments": "/docs/integrations/web-servers/varnish",
"/07Sumo-Logic-Apps/24Web_Servers/Varnish/Varnish_Alerts": "/docs/integrations/web-servers/varnish",
+ "/07Sumo-Logic-Apps/08App_Development/Varnish/Varnish_Alerts": "/docs/integrations/web-servers/varnish",
"/07Sumo-Logic-Apps/26Apps_for_Sumo": "/docs/integrations/sumo-apps",
"/07Sumo-Logic-Apps/26Apps_for_Sumo/Audit_App": "/docs/integrations/sumo-apps/audit",
"/07Sumo-Logic-Apps/26Apps_for_Sumo/Audit_App/Install-the-Audit-App-and-View-the-Dashboards": "/docs/integrations/sumo-apps/audit",
@@ -2844,6 +2848,7 @@
"/Manage/Collection/Collector-Upgrade-Best-Practices": "/docs/send-data/collection/upgrade-collectors",
"/Manage/Collection/Download_Customized_Collector_Configuration_File": "/docs/send-data/collection",
"/Manage/Collection/Pause-and-Resume-a-Source": "/docs/send-data/collection/pause-resume-source",
+ "/docs/send-data/collection/pause-and-resume-source": "/docs/send-data/collection/pause-resume-source",
"/Manage/Collection/Processing-Rules": "/docs/send-data/collection/processing-rules",
"/Manage/Collection/Processing-Rules/Create-a-Processing-Rule": "/docs/send-data/collection/processing-rules/create-processing-rule",
"/Manage/Collection/Processing-Rules/Data-Forwarding-Rules": "/docs/send-data/collection/processing-rules/data-forwarding-rules",
@@ -3202,9 +3207,11 @@
"/Observability_Solution/AWS_Observability_Solution/01_Deploy_and_Use_AWS_Observability/13Update_AWS_Observability_Stack": "/docs/observability/aws/deploy-use-aws-observability/update-aws-observability-stack",
"/Observability_Solution/AWS_Observability_Solution/01_Deploy_and_Use_AWS_Observability/01_About_AWS_Observability/AWS_Observability_Resources": "/docs/observability/aws/deploy-use-aws-observability/resources",
"/Observability_Solution/AWS_Observability_Solution/01_Deploy_and_Use_AWS_Observability/16AWS_Observability_-_Change_Log": "/docs/observability/aws/deploy-use-aws-observability/changelog",
+ "/Observability_Solution/AWS_Observability_Solution/01_Deploy_and_Use_AWS_Observability/AWS_Observability_-_Change_Log": "/docs/observability/aws/deploy-use-aws-observability/changelog",
"/Observability_Solution/AWS_Observability_Solution/03_Other_Configurations_and_Tools": "/docs/observability/aws/other-configurations-tools",
"/Observability_Solution/AWS_Observability_Solution/03_Other_Configurations_and_Tools/Add_a_New_AWS_Service_to_the_AWS_Observability_Explore_Hierarchy": "/docs/observability/aws/other-configurations-tools/add-new-aws-service",
"/Observability_Solution/AWS_Observability_Solution/03_Other_Configurations_and_Tools/Add_Fields_to_Existing_Host_Metrics_Sources": "/docs/observability/aws/other-configurations-tools/add-fields-to-existing-host-metrics-sources",
+ "/Observability_Solution/AWS_Observability_Solution/Add_Fields_to_Existing_Host_Metrics_Sources": "/docs/observability/aws/other-configurations-tools/add-fields-to-existing-host-metrics-sources",
"/Observability_Solution/AWS_Observability_Solution/03_Other_Configurations_and_Tools/Integrate_Control_Tower_Accounts_with_AWS_Observability": "/docs/observability/aws/other-configurations-tools/integrate-control-tower-accounts",
"/Observability_Solution/AWS_Observability_Solution/AWS_Observability_Apps": "/docs/observability/aws/integrations",
"/Observability_Solution/AWS_Observability_Solution/AWS_Observability_Apps/AWS_Classic_Load_Balancer": "/docs/observability/aws/integrations/aws-classic-load-balancer",
@@ -3472,6 +3479,7 @@
"/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App_and_view_the_Dashboards": "/docs/integrations/containers-orchestration/kubernetes",
"/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App,_Alerts,_and_view_the_Dashboards": "/docs/integrations/containers-orchestration/kubernetes",
"/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Kubernetes_Alerts": "/docs/integrations/containers-orchestration/kubernetes",
+ "/07Sumo_Logic_Apps/10Containers_and_Orchestration/Kubernetes": "/docs/integrations/containers-orchestration/kubernetes",
"/07Sumo-Logic-Apps/12Databases": "/docs/integrations/databases",
"/07Sumo-Logic-Apps/12Databases/Couchbase/Install_the_Couchbase_Monitors,_App,_and_view_the_Dashboards": "/docs/integrations/databases/couchbase",
"/07Sumo_Logic_Apps/12Databases/MongoDB": "/docs/integrations/databases/mongodb",
@@ -3703,6 +3711,7 @@
"/Send-Data/Applications-and-Other-Data-Sources/Oracle/00Collect_Logs_for_Oracle": "/docs/send-data/collect-from-other-data-sources/collect-logs-oracle-cloud-infrastructure",
"/Send-Data/Applications-and-Other-Data-Sources/Threat-Intel-Quick-Analysis": "/docs/integrations/security-threat-detection/threat-intel-quick-analysis",
"/Send-Data/Applications-and-Other-Data-Sources/Threat-Intel-Quick-Analysis/Threat-Intel-FAQ": "/docs/integrations/security-threat-detection/threat-intel-quick-analysis",
+ "/Send-Data/Applications-and-Other-Data-Types/PCI-Compliance-for-Windows/Collecting-Logs-for-PCI-Compliance-for-Windows-App": "/docs/integrations/microsoft-azure/windows-json-pci-compliance",
"/Send-Data/Collect-from-Other-Data-Sources/Azure_Blob_Storage": "/docs/send-data/collect-from-other-data-sources/azure-blob-storage/block-blob/collect-logs",
"/Send-Data/Collect-from-Other-Data-Sources/Azure_Blob_Storage/Collect_Logs_from_Azure_Blob_Storage": "/docs/send-data/collect-from-other-data-sources/azure-blob-storage/block-blob/collect-logs",
"/Send-Data/Data-Types-and-Applications/Docker/01-Collect-Events-and-Statistics-for-the-Docker-App": "/docs/send-data/installed-collectors/sources/docker-sources",
diff --git a/docs/send-data/hosted-collectors/amazon-aws/aws-kinesis-firehose-metrics-source.md b/docs/send-data/hosted-collectors/amazon-aws/aws-kinesis-firehose-metrics-source.md
index d97f7d1986..7a38200396 100644
--- a/docs/send-data/hosted-collectors/amazon-aws/aws-kinesis-firehose-metrics-source.md
+++ b/docs/send-data/hosted-collectors/amazon-aws/aws-kinesis-firehose-metrics-source.md
@@ -35,13 +35,13 @@ The key difference between the sources is how they get metrics. The CloudWatch s
The benefits of a streaming source over a polling source include:
-* No API throttling—The Kinesis Firehose for Metrics source doesn’t consume your AWS quota by making calls to the AWS CloudWatch APIs. This offers both efficiency and cost benefits.
-* Automatic retry mechanism—Kinesis Firehose has an automatic retry mechanism for delivering metrics to the Kinesis Firehose for Metrics source. In the event of a glitch, the metrics are re-sent after the service is restored. If that fails, Firehose stores all failed messages in a customer-owned S3 bucket for later recovery.
-* Latency is the same for all metrics, whether new, old, sparse, or continuous. This is a benefit over the AWS CloudWatch Metrics source, which doesn’t reliably ingest old or sparsely published metrics.
-* High resolution—The Kinesis Firehose streams all metrics at a 1-minute resolution. The AWS CloudWatch Metrics source supports scans as low as 1 minute, but that resolution can result in AWS account throttling and higher AWS bills
+* **No API throttling**. The Kinesis Firehose for Metrics source doesn’t consume your AWS quota by making calls to the AWS CloudWatch APIs. This offers both efficiency and cost benefits.
+* **Automatic retry mechanism**. Kinesis Firehose has an automatic retry mechanism for delivering metrics to the Kinesis Firehose for Metrics source. In the event of a glitch, the metrics are re-sent after the service is restored. If that fails, Firehose stores all failed messages in a customer-owned S3 bucket for later recovery.
+* **Consistent speed**. Latency is the same for all metrics, whether new, old, sparse, or continuous. This is a benefit over the AWS CloudWatch Metrics source, which doesn’t reliably ingest old or sparsely published metrics.
+* **High resolution**. The Kinesis Firehose streams all metrics at a 1-minute resolution. The AWS CloudWatch Metrics source supports scans as low as 1 minute, but that resolution can result in AWS account throttling and higher AWS bills
:::note
-The AWS CloudWatch Metrics source uses AWS’s [GetMetricStatistics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) API and as a result, supports the `Unit` parameter. (When a request includes the `Unit` parameter, only metrics with the unit specified, for example, bytes, Microseconds, and so on, are reported. The Kinesis Firehose for Metrics does not currently support the `Unit` parameter.
+The AWS CloudWatch Metrics source uses AWS’s [GetMetricStatistics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) API and as a result, supports the `Unit` parameter. When a request includes the `Unit` parameter, only metrics with the unit specified, for example, bytes, Microseconds, and so on, are reported. The Kinesis Firehose for Metrics does not currently support the `Unit` parameter.
:::
## Step 1: Set up the source
@@ -51,12 +51,9 @@ In this step, you create the AWS Kinesis Firehose for Metrics source.
1. In the main Sumo Logic menu, select **Manage Data > Collection > Collection**.
1. Click **Add Source** next to a Hosted Collector.
1. Select **AWS Kinesis Firehose** for Metrics.
-
- 
-
1. Enter a **Name** for the source.
1. (Optional) Enter a **Description**.
-1. For **Source Category**, enter any string to tag the output collected from this Source. Category metadata is stored in a searchable field called `_sourceCategory`.
+1. For **Source Category**, enter any string to tag the output collected from this Source. Category metadata is stored in a searchable field called `_sourceCategory`.
1. For **AWS Access** of a Kinesis Metric source, the role requires `tag:GetResources` permission. The Kinesis Log source does not require permissions.
1. Click **Save**.
@@ -65,36 +62,20 @@ In this step, you create the AWS Kinesis Firehose for Metrics source.
In this step, you set up the AWS Metric Streams service to stream metrics to Kinesis Data Firehose using a [CloudFormation template](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-whatis-concepts.html#w2ab1b5c15b7):
1. Go to **Services > CloudFormation** in the AWS console.
-1. On the **CloudFormation > Stack** page, click **Create stack**.
-
- 
-
+1. On the **CloudFormation > Stack** page, click **Create stack**.

1. On the **Create stack** page:
-
1. Click **Template is ready**.
1. Click **Amazon S3 URL** and paste this URL into the URL field: https://sumologic-appdev-aws-sam-apps.s3.amazonaws.com/KinesisFirehoseCWMetrics.template.yaml.
- 1. Click **Next**.
-
- 
-
+ 1. Click **Next**.

1. On the **Specify stack details** page:
-
* **Stack name**. Enter a name for the stack.
* **Sumo Logic Kinesis Firehose Metrics Configuration.** (Required) Enter the URL of the AWS Kinesis Firehose for Metrics source.
* **Select Namespaces to collect AWS CloudWatch Metrics**. Enter a comma-delimited list of the namespaces from which you want to collect AWS CloudWatch metrics.
- * **Failed Data AWS S3 Bucket Configuration**. Enter "Yes" to create a new bucket, or "No" if you want to use an existing bucket.
+ * **Failed Data AWS S3 Bucket Configuration**. Enter **Yes** to create a new bucket, or "No" if you want to use an existing bucket.
* **AWS S3 Bucket Name for Failed Data**. Provide the name of Amazon S3 bucket to create, or the name of an existing bucket in the current AWS Account.
- * Click **Next**.
-
- 
-
-1. Click **Create stack**.
-
- 
-
-1. The AWS console displays the resources in the newly created stack.
-
- 
+ * Click **Next**.

+1. Click **Create stack**.

+1. The AWS console displays the resources in the newly created stack.

## Filter CloudWatch metrics during ingestion
@@ -106,40 +87,20 @@ Inclusive and exclusive filters can’t be combined. You can choose namespaces t
### Include metrics by namespace
-1. Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch.
+1. Open the [CloudWatch console](https://console.aws.amazon.com/cloudwatch).
1. In the navigation pane, choose **Metrics**.
-1. Under **Metrics**, select **Streams**.
-
- 
-
-1. Select the metric stream and click **Edit**.
-
- 
-
-1. Click **Selected namespaces**.
-
- 
-
-1. From the list of AWS namespaces, select the namespaces whose metrics you want to receive. In the screenshot below “S3” and “Billing” are selected.
-
- 
-
-1. Click **Save changes** at the bottom of the page.
-
- 
+1. Under **Metrics**, select **Streams**.

+1. Select the metric stream and click **Edit**.

+1. Click **Selected namespaces**.

+1. From the list of AWS namespaces, select the namespaces whose metrics you want to receive. In the screenshot below “S3” and “Billing” are selected.

+1. Click **Save changes** at the bottom of the page.

### Exclude metrics by namespace
-1. Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch.
+1. Open the [CloudWatch console](https://console.aws.amazon.com/cloudwatch).
1. In the navigation pane, choose **Metrics**.
-1. Under **Metrics**, select **Streams**.
-
- 
-
-1. Select the metric stream and click **Edit**.
-
- 
-
+1. Under **Metrics**, select **Streams**.

+1. Select the metric stream and click **Edit**.

1. Click **All metrics** and select the **Exclude metric namespaces** option.
1. From the list of AWS namespaces, select the namespaces whose metrics you do not want to receive.
1. Click **Save changes** at the bottom of the page.
diff --git a/docs/send-data/reference-information/time-reference.md b/docs/send-data/reference-information/time-reference.md
index 4348c7b76f..90bad63511 100644
--- a/docs/send-data/reference-information/time-reference.md
+++ b/docs/send-data/reference-information/time-reference.md
@@ -128,10 +128,10 @@ Our Collectors can automatically parse most timestamps without any issues. Howe
1. Do one of the following:
* If you're configuring a new Source, proceed to step 2.
- * To edit the timestamp settings for an existing Source, navigate to the [**Collection**](/docs/send-data/collection/) page. Then click **Edit** to the right of the Source name and go to step 2.
-1. Navigate to the **Advanced Options for Logs (Optional)** section.
-1. Under **Timestamp Format**, select **Specify a format** > **+ Add Timestamp Format**.
-1. In the **Format** field, enter the timestamp format the Collector should use to parse timestamps in your log.
})
+ * To edit the timestamp settings for an existing Source, navigate to the [**Collection**](/docs/send-data/collection/) page. Then click **Edit** to the right of the Source name and go to step 2.
+1. Navigate to the **Advanced Options for Logs (Optional)** section.
+1. Under **Timestamp Format**, select **Specify a format** > **+ Add Timestamp Format**.
+1. In the **Format** field, enter the timestamp format the Collector should use to parse timestamps in your log.
})
:::note
If the timestamp format is in epoch time, enter **epoch** in the **Format** field.
:::
@@ -141,7 +141,7 @@ Our Collectors can automatically parse most timestamps without any issues. Howe
* When providing multiple custom formats, specify the most common format first. The Collector will process each custom format in the order provided. Once a timestamp is located, no further timestamp checking is done.
* If no timestamps are located that match your custom formats, the Collector will still attempt to automatically locate the log's timestamp.
:::
-1. The **Timestamp locator** is a regular expression with a capture group matching the timestamp in your log messages.
 The timestamp locator must:
+1. The **Timestamp locator** is a regular expression with a capture group matching the timestamp in your log messages.
})
The timestamp locator must:
* be provided for 16-digit epoch or 19-digit epoch timestamps. Otherwise, this field is not necessary.
* be a valid Java regular expression. Otherwise, this error message will be displayed: `Unable to validate timestamp formats. The timestamp locator regex your-regex is invalid. The timestamp locator regex your-regex uses matching features which are not supported.`
* be an [RE2-compliant](https://github.com/google/re2/wiki/Syntax) regular expression, for example: `\[time=(.*?)\]`. Otherwise, this error message will be displayed: `Unable to validate timestamp formats. The timestamp locator regex your-regex uses matching features which are not supported.`
@@ -150,8 +150,8 @@ Our Collectors can automatically parse most timestamps without any issues. Howe
If you use quotes in the timestamp locator regular expression, you may see issues in the display after you save. The regular expression is not actually changed and can still be used to locate your timestamp.
:::
1. If you have more than one custom timestamp format that you want to add, click **+ Add**. The ordering of formats is significant. Each provided timestamp format is tested, in the order specified, until a matching format is found. The first matching format determines the final message timestamp. If none of the provided formats match a particular message, the Collector will attempt to automatically determine the message's timestamp.
-1. Next, we recommend testing a few log lines from your data against your specified formats and locators. Enter sample log messages to test the timestamp formats you want to extract.

-1. Click **Test** once your log lines are entered. The results display with the timestamp parsed and format matches (if any).

+1. Next, we recommend testing a few log lines from your data against your specified formats and locators. Enter sample log messages to test the timestamp formats you want to extract.
+1. Click **Test** once your log lines are entered. The results display with the timestamp parsed and format matches (if any).
})
You should see one of the following messages:
* **Format matched**. In this example, the format of `yyyy/MM/dd HH:mm:ss` was matched and highlighted in green. This was the first format provided, so it returns as `1(format: yyyy/MM/dd HH:mm:ss locator: \[time=(.*?)\])` The **Effective message time** would be `2017-01-15 02:12.000 +0000`.
* **None of the custom timestamp format was matched**. While the custom formats were not found in the log, there's still an auto detected timestamp highlighted in orange, `2017-06-01 02:12:12.259667` that we can use. **The Effective message** time is going to be `2017-06-01 02:12:12.259 +0000`.
@@ -191,7 +191,7 @@ _sourceCategory=PaloAltoNetworks
| _format as timestampformat
```
-The result would look like this:

+The result would look like this:
### Large time between message time and receipt time
@@ -257,17 +257,17 @@ Changing the **Default Timezone** setting affects how the UI displays messages
For example, the following screenshot shows the time zone set to **PST** in the UI, in the **Time** column. The logs were collected from a system that was also configured to use the **PST** time zone, which is displayed in the timestamp of the **Message** column. The timestamps in both columns match as they are set to the same time zone.
-
+
The next screenshot shows the same search result after changing the Default Timezone setting to UTC. Now the Time column is displayed in UTC, while the Message column retains the original timestamp, in PST.
-
+
In another example, if your time zone is set to **UTC**, and you share a Dashboard with another user who has their time zone set to **PST**, what will they see?
They will see the same data, just displayed using their custom set time zone. For example, if you have a Panel that uses a time series, the timeline on the X axis of your chart is displayed in your time zone, **UTC**. The other user will see the timeline on the X axis displayed in their time zone, **PST**. But the data displayed in the chart is exactly the same.
-
+
## Time ranges
diff --git a/static/robots.txt b/static/robots.txt
index 9a7b8e4dd3..5bde82aead 100644
--- a/static/robots.txt
+++ b/static/robots.txt
@@ -5,4 +5,5 @@ Allow: /
Disallow:
/docs/reuse/
/docs/tags/
+ /ja/docs/
Sitemap: https://help.sumologic.com/sitemap.xml