diff --git a/.github/workflows/docs-parity-updates.yml b/.github/workflows/docs-parity-updates.yml index 2219419db8..2a6493b74c 100644 --- a/.github/workflows/docs-parity-updates.yml +++ b/.github/workflows/docs-parity-updates.yml @@ -30,6 +30,11 @@ jobs: with: python-version: "3.11" + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '20' + - name: Download metrics data from Moto Integration test pipeline (GitHub) working-directory: docs run: ./scripts/get_latest_github_metrics.sh ./target main @@ -86,6 +91,13 @@ jobs: cp -r target/updated_coverage/md/* content/en/references/coverage && rm -R target/updated_coverage/md/ mv -f target/updated_coverage/data/*.json data/coverage + - name: Format Parity Coverage Docs + working-directory: docs + run: | + npm install + npm install markdownlint-cli2 --global + markdownlint-cli2 --config .markdownlint-cli2.yaml --fix + - name: Check for changes id: check-for-changes working-directory: docs diff --git a/.github/workflows/markdownlint.yml b/.github/workflows/markdownlint.yml new file mode 100644 index 0000000000..3c6e82d837 --- /dev/null +++ b/.github/workflows/markdownlint.yml @@ -0,0 +1,38 @@ +name: Lint Markdown Files +on: [push, pull_request] + +jobs: + markdownlint: + name: 'Markdown Lint' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Setup Hugo + uses: peaceiris/actions-hugo@v3 + with: + hugo-version: '0.125.6' + extended: true + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install dependencies + run: | + npm install + + - uses: tj-actions/changed-files@v24 + id: changed-files + with: + files: 'content/**/*.md' + separator: "\n" + + - uses: DavidAnson/markdownlint-cli2-action@v6 + if: steps.changed-files.outputs.any_changed == 'true' + with: + globs: ${{ steps.changed-files.outputs.all_changed_files }} + config: '.markdownlint-cli2.yaml' diff --git a/.markdownlint-cli2.yaml b/.markdownlint-cli2.yaml new file mode 100644 index 0000000000..63157653b3 --- /dev/null +++ b/.markdownlint-cli2.yaml @@ -0,0 +1,26 @@ +globs: + - 'content/**/*.md' +ignores: + - 'node_modules' + # Weird behavior with markdownlint; TODO: Fix + - 'content/en/tutorials/cloud-pods-collaborative-debugging/*' + - 'content/en/user-guide/integrations/terraform/*' + - 'content/en/user-guide/aws/events/*' +customRules: + - markdownlint-rule-max-one-sentence-per-line +config: + MD029: false # Ordered list item prefix + MD046: false # Code block style + MD025: false # Single H1 + MD001: false # Header levels increment by one + MD024: false # Multiple headers with the same content + MD055: false # Inconsistent leading and trailing pipe characters + MD056: false # Inconsistent Table column count + MD036: false # Emphasis used instead of a header + MD003: false # Header style + MD033: false # Inline HTML + MD013: false # Line length + MD034: false # Bare URL used + MD032: false # Lists should be surrounded by blank lines + MD018: false # No space after hash on atx style header + MD022: false # Headers should be surrounded by blank lines diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..6c8ecd8fd2 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,6 @@ +repos: + - repo: https://github.com/DavidAnson/markdownlint-cli2 + rev: v0.13.0 + hooks: + - id: markdownlint-cli2 + args: ['--fix'] \ No newline at end of file diff --git a/README.md b/README.md index 7b165e5ac8..06575161de 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,6 @@ Repository for [docs.localstack.cloud](https://docs.localstack.cloud). - ## Getting Started ### Basics @@ -12,7 +11,6 @@ LocalStack Docs is using the following technology stack: - [Docsy](https://docsy.dev) as a theme for Hugo. - [GitHub Pages](https://pages.github.com/) to automatically deploy every commit on the `main` branch of this repository on [docs.localstack.cloud](https://docs.localstack.cloud). - ### Clone the repo Clone this repository and initialize the Git submodules recursively (`themes/docsy` is a submodule that again has submodules for vendored assets like fontawesome). @@ -30,14 +28,13 @@ or: cd docs git submodule update --init --recursive - ### Install Hugo LocalStack Docs is based on the [Hugo static site generator](https://gohugo.io). -In order to contribute to LocalStack Docs, you need to [install Hugo](https://gohugo.io/getting-started/installing) in order to verify your changes. Make sure to install the _extended_ version of Hugo. -You also need to make sure that `go` is installed in order to run hugo scripts. - +In order to contribute to LocalStack Docs, you need to [install Hugo](https://gohugo.io/getting-started/installing) in order to verify your changes. +Make sure to install the _extended_ version of Hugo. +You also need to make sure that `go` is installed in order to run hugo scripts. ### Run locally @@ -51,7 +48,6 @@ or run in developer mode with automatic reload: Once the server is started, the locally served Docs are available at http://localhost:1313. - ### Writing content The whole site is generated with Hugo, a powerful static-site generator. @@ -61,12 +57,24 @@ You can find an extensive documentation on how to use Hugo [in their docs](https Make sure to follow the best practices below when contributing content. #### Updating developer hub applications + While contributing to the developer hub applications page i.e. editing `data/developerhub/applications.json` file, make sure to run the `create-applications.js` script in the `scripts` folder to create new application pages. Example usage in the project root: node scripts/create-applications.js +### Running pre-commit checks + +You can run pre-commit checks to ensure that your changes are compliant with the repository's standards. + +```bash +pip install pre-commit +pre-commit install +``` + +pre-commit will run automatically before each commit. +If you want to run it manually, use `pre-commit run`. ## Best Practices @@ -84,19 +92,20 @@ Please follow these best practices when writing documentation in this repository Use `bash` only for Bash scripts, and use `text` for shell outputs or command examples. The full list of the supported languages [here](https://gohugo.io/content-management/syntax-highlighting/). If needed, you can also [highlight a specific line](https://gohugo.io/content-management/syntax-highlighting/#highlighting-in-code-fences) in the snippet. -- **Images:** If you want to use images in your post, create a new [leaf bundle directory](https://github.com/gohugoio/hugo/issues/1240) and put the image and the post (named `index.md`) in there (you can find examples in the docs already, f.e. the cognito service docs). +- **Images:** If you want to use images in your post, create a new [leaf bundle directory](https://github.com/gohugoio/hugo/issues/1240) and put the image and the post (named `index.md`) in there (you can find examples in the docs already, f.e. + the cognito service docs). Then you can use the usual markdown syntax with a relative path (f.e.: `![Alternative_Text](file_next_to_post.png)`). If you want to resize the image, use the `figure` or `img` shortcode, for example: `{{< img src="cockpit-init-check.png" class="img-fluid shadow rounded" width="150px" >}}` - **Callouts:** Use these to make content stand out. The `callout` shortcode supports `note` (default), `tip` and `warning` levels. Use it like so: - ``` + + ```markdown {{< callout "warning" >}} This will make your computer halt and catch fire! {{< /callout >}} ``` - ## Troubleshooting This section covers common issues when working with LocalStack Docs: @@ -104,12 +113,14 @@ This section covers common issues when working with LocalStack Docs: ### Missing shortcodes Example error: -``` + +```bash Start building sites … hugo v0.88.1-5BC54738+extended linux/amd64 BuildDate=2021-09-04T09:39:19Z VendorInfo=gohugoio Error: Error building site: "/home/localstack/Repos/docs-test/content/en/get-started/_index.md:57:1": failed to extract shortcode: template for shortcode "alert" not found Built in 45 ms ``` -1. Make sure to correctly clone and initialize the git submodules of this repo. For details see the section "[Clone the repo](#clone-the-repo)" above. +1. Make sure to correctly clone and initialize the git submodules of this repo. + For details see the section "[Clone the repo](#clone-the-repo)" above. 2. Delete the Hugo Module cache using `hugo mod clean` or `make clean`. diff --git a/content/en/academy/localstack-101/lesson-2/index.md b/content/en/academy/localstack-101/lesson-2/index.md index ea93f24cd7..60a6f30032 100644 --- a/content/en/academy/localstack-101/lesson-2/index.md +++ b/content/en/academy/localstack-101/lesson-2/index.md @@ -21,7 +21,7 @@ In this lesson, we'll talk about LocalStack: - Tackles sophisticated and exotic APIs (QLDB, Athena, Glue) and helps enhance your skill set. - Contains advanced collaboration features and seamless CI integrations that foster team productivity. - Our mission is to empower developers with control over their environments, eliminating time-consuming cloud dev and test loops. -- You can focus on developing exceptional products to solve real-world challenges with LocalStack as your game-changing companion. +- You can focus on developing exceptional products to solve real-world challenges with LocalStack as your game-changing companion. Embrace the future of cloud development and have fun exploring! @@ -32,6 +32,3 @@ Further reading: - [LocalStack Service Coverage](https://docs.localstack.cloud/user-guide/aws/feature-coverage/) - [Learning Resources for Everything AWS](https://aws.amazon.com/developer/learning/) - [Documentation for AWS Services](https://docs.aws.amazon.com/) - - - diff --git a/content/en/academy/localstack-101/lesson-3/index.md b/content/en/academy/localstack-101/lesson-3/index.md index 3af0c3da0c..4f0e73b4a4 100644 --- a/content/en/academy/localstack-101/lesson-3/index.md +++ b/content/en/academy/localstack-101/lesson-3/index.md @@ -13,13 +13,14 @@ type: lessons url: "/academy/localstack-101/why-localstack/" --- -Let's imagine this scenario: Alice, a software developer, takes on the task of creating a serverless -Web application on AWS Cloud. However, she faces slow and tedious development due to cloud dependencies (DBs, VMs, MQs, etc.). +Let's imagine this scenario: Alice, a software developer, takes on the task of creating a serverless +Web application on AWS Cloud. +However, she faces slow and tedious development due to cloud dependencies (DBs, VMs, MQs, etc.). Every local change needs to be packaged and uploaded to the cloud for testing. The solution for her trouble is LocalStack — a revolutionary platform that brings cloud resources to her local machine, enabling efficient development and testing. -LocalStack is the ultimate platform for cloud developers, offering a wide array of benefits to enhance productivity, reduce costs, and simplify maintenance. +LocalStack is the ultimate platform for cloud developers, offering a wide array of benefits to enhance productivity, reduce costs, and simplify maintenance. This comprehensive course will equip you with the knowledge and skills to harness LocalStack's power and revolutionize your cloud development workflow. Key Takeaways: @@ -42,4 +43,3 @@ Further reading: - [How Understanding Billing is Complicated and Risky](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-what-is.html) - [LocalStack in CI](https://docs.localstack.cloud/user-guide/ci/) - [Third Party Integrations](https://docs.localstack.cloud/user-guide/integrations/) - diff --git a/content/en/academy/localstack-101/lesson-4/index.md b/content/en/academy/localstack-101/lesson-4/index.md index ccd1beb1bb..ee0396da28 100644 --- a/content/en/academy/localstack-101/lesson-4/index.md +++ b/content/en/academy/localstack-101/lesson-4/index.md @@ -13,13 +13,14 @@ type: lessons url: "/academy/localstack-101/getting-started/" --- -There are several LocalStack installation methods to kickstart your cloud development journey. +There are several LocalStack installation methods to kickstart your cloud development journey. Discover multiple pathways to initiate your LocalStack experience: You'll understand the diverse approaches to LocalStack installation: 1. Quickstart with LocalStack CLI: -- Install `awscli-local` and `localstack` via `pip install`. On macOS you can use `brew install`. +- Install `awscli-local` and `localstack` via `pip install`. + On macOS you can use `brew install`. - Start LocalStack using `localstack start`. - Create a bucket and list buckets using `awslocal s3 mb s3://test` and `awslocal s3 ls`. 2. Alternative - Docker: Dive into an alternate installation method using Docker: pull the image and run it, it's that easy. @@ -33,6 +34,3 @@ Further reading: - [What is AWS CLI local](https://docs.localstack.cloud/user-guide/integrations/aws-cli/) - [Pip Documentation](https://pypi.org/project/pip/) - [Docker Compose Docs](https://docs.docker.com/get-started/08_using_compose/) - - - diff --git a/content/en/academy/localstack-101/lesson-5/index.md b/content/en/academy/localstack-101/lesson-5/index.md index 68032b35cd..4a9cc3fcf1 100644 --- a/content/en/academy/localstack-101/lesson-5/index.md +++ b/content/en/academy/localstack-101/lesson-5/index.md @@ -18,9 +18,10 @@ In this informative video we guide you through the essential steps of the LocalS - Witness the seamless login flow and discover how to configure the web app. - Learn how to effortlessly connect your LocalStack Account to enable a smooth integration experience. - Explore the Resource Browser as we demonstrate how to list and create fundamental resources. -- To create an account for LocalStack, visit [app.localstack.cloud/sign-up](https://app.localstack.cloud/sign-up). You can sign up with your email address or one of our supported social identity providers (such as GitHub). +- To create an account for LocalStack, visit [app.localstack.cloud/sign-up](https://app.localstack.cloud/sign-up). + You can sign up with your email address or one of our supported social identity providers (such as GitHub). Further reading: - [Web App Documentation](https://docs.localstack.cloud/user-guide/web-application/) -- [Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) \ No newline at end of file +- [Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) diff --git a/content/en/academy/localstack-101/lesson-6/index.md b/content/en/academy/localstack-101/lesson-6/index.md index 1f62704b59..a359c6ccf3 100644 --- a/content/en/academy/localstack-101/lesson-6/index.md +++ b/content/en/academy/localstack-101/lesson-6/index.md @@ -12,12 +12,13 @@ type: lessons url: "/academy/localstack-101/full-project-demo/" --- -Discover the ins and outs of one of our Developer Hub's practical sample applications—a note-taking platform. +Discover the ins and outs of one of our Developer Hub's practical sample applications—a note-taking platform. This video tutorial offers a comprehensive walk-through, beginning with the checkout of the GitHub project and -culminating in deploying the operational application on LocalStack. Covering the essentials, we'll guide you +culminating in deploying the operational application on LocalStack. +Covering the essentials, we'll guide you through dependency installation, backend and frontend build, and a detailed exploration of the application's service diagram to unveil the integral components at play. Further exploring: -- [LocalStack Developer Hub Applications](https://docs.localstack.cloud/applications) \ No newline at end of file +- [LocalStack Developer Hub Applications](https://docs.localstack.cloud/applications) diff --git a/content/en/academy/localstack-101/lesson-7/index.md b/content/en/academy/localstack-101/lesson-7/index.md index 3bdb274834..6f6d1cc805 100644 --- a/content/en/academy/localstack-101/lesson-7/index.md +++ b/content/en/academy/localstack-101/lesson-7/index.md @@ -13,7 +13,7 @@ type: lessons url: "/academy/localstack-101/cloud-pods/" --- -This video will cover: +This video will cover: What are Cloud Pods ? @@ -23,7 +23,7 @@ Deviate from the default ephemeral state by leveraging Cloud Pods' capability to How are cloud pods useful ? - Gain insight into the advantages they offer, including the creation of shareable local cloud sandboxes. -Learn about the collaborative potential of distributing and sharing pods among team members and discover +Learn about the collaborative potential of distributing and sharing pods among team members and discover how Cloud Pods contribute to reproducibility in applications and pre-seeding test environments. Demo diff --git a/content/en/academy/localstack-development/lesson-2/index.md b/content/en/academy/localstack-development/lesson-2/index.md index 9da2115d72..31d2604e01 100644 --- a/content/en/academy/localstack-development/lesson-2/index.md +++ b/content/en/academy/localstack-development/lesson-2/index.md @@ -11,15 +11,16 @@ type: lessons url: "/academy/localstack-deployment/deploy-app-ls/" --- -In this lesson, we'll guide you through deploying a [continer-based application](https://github.com/localstack/localstack-workshop/tree/main/02-serverless-api-ecs-apigateway), which mimics the complexity of a real-world application. We are using the following AWS services and their features to build our infrastructure: +In this lesson, we'll guide you through deploying a [continer-based application](https://github.com/localstack/localstack-workshop/tree/main/02-serverless-api-ecs-apigateway), which mimics the complexity of a real-world application. +We are using the following AWS services and their features to build our infrastructure: - - [Elastic Container Service](https://docs.localstack.cloud/user-guide/aws/elastic-container-service/) to create and deploy our containerized application. - - [DynamoDB](https://docs.localstack.cloud/user-guide/aws/dynamodb/) as a key-value and document database to persist our data. - - [API Gateway](https://docs.localstack.cloud/user-guide/aws/apigatewayv2/) to expose the containerized services to the user through HTTP APIs. - - [Cognito User Pools](https://docs.localstack.cloud/user-guide/aws/cognito/) for user authentication and authorizing requests to container APIs. - - [Amplify](https://docs.localstack.cloud/user-guide/aws/amplify/) to create the user client with ReactJS to send requests to container APIs. - - [S3](https://docs.localstack.cloud/user-guide/aws/s3/) to deploy the Amplify application to make the web application available to users. - - [IAM](https://docs.localstack.cloud/user-guide/aws/iam/) to create policies to specify roles and permissions for various AWS services. +- [Elastic Container Service](https://docs.localstack.cloud/user-guide/aws/elastic-container-service/) to create and deploy our containerized application. +- [DynamoDB](https://docs.localstack.cloud/user-guide/aws/dynamodb/) as a key-value and document database to persist our data. +- [API Gateway](https://docs.localstack.cloud/user-guide/aws/apigatewayv2/) to expose the containerized services to the user through HTTP APIs. +- [Cognito User Pools](https://docs.localstack.cloud/user-guide/aws/cognito/) for user authentication and authorizing requests to container APIs. +- [Amplify](https://docs.localstack.cloud/user-guide/aws/amplify/) to create the user client with ReactJS to send requests to container APIs. +- [S3](https://docs.localstack.cloud/user-guide/aws/s3/) to deploy the Amplify application to make the web application available to users. +- [IAM](https://docs.localstack.cloud/user-guide/aws/iam/) to create policies to specify roles and permissions for various AWS services. Additionally, we'll explore the **Resource Browser**, that enables you to perform basic management operations for the locally deployed AWS resources during the development and testing process, in a fashion similar to the AWS Management Console. @@ -30,4 +31,3 @@ Further reading: - [Field Notes: Serverless Container-based APIs with Amazon ECS and Amazon API Gateway](https://aws.amazon.com/blogs/architecture/field-notes-serverless-container-based-apis-with-amazon-ecs-and-amazon-api-gateway/) - [What is `awslocal` CLI?](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#localstack-aws-cli-awslocal) - [LocalStack Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) - diff --git a/content/en/academy/localstack-development/lesson-3/index.md b/content/en/academy/localstack-development/lesson-3/index.md index ab4f6c2e31..e2ecc2bd64 100644 --- a/content/en/academy/localstack-development/lesson-3/index.md +++ b/content/en/academy/localstack-development/lesson-3/index.md @@ -11,9 +11,11 @@ type: lessons url: "/academy/localstack-deployment/ls-integrations/" --- -LocalStack integrates with various Infrastructure as Code tools like [Terraform](https://docs.localstack.cloud/user-guide/integrations/terraform/) or [Pulumi](https://docs.localstack.cloud/user-guide/integrations/pulumi/) assist in configuration management with added advantages like version control, ease of editing, and reproducibility. Additionally, LocalStack integrates with various CI platforms, such as [GitHub Actions](https://docs.localstack.cloud/user-guide/ci/github-actions/) or [CircleCI](https://docs.localstack.cloud/user-guide/ci/circle-ci/), to enable the cloud integrations tests before pushing changes to production. +LocalStack integrates with various Infrastructure as Code tools like [Terraform](https://docs.localstack.cloud/user-guide/integrations/terraform/) or [Pulumi](https://docs.localstack.cloud/user-guide/integrations/pulumi/) assist in configuration management with added advantages like version control, ease of editing, and reproducibility. +Additionally, LocalStack integrates with various CI platforms, such as [GitHub Actions](https://docs.localstack.cloud/user-guide/ci/github-actions/) or [CircleCI](https://docs.localstack.cloud/user-guide/ci/circle-ci/), to enable the cloud integrations tests before pushing changes to production. -We discuss how LocalStack integrates with infrastructure-as-code (IaC) and continuous integration (CI) tools. Towards the end, we'll demonstrate a [Terraform deployment of a PostgreSQL Aurora cluster](https://github.com/terraform-aws-modules/terraform-aws-rds-aurora/tree/v8.1.1/examples/postgresql) on LocalStack, highlighting the time and resource savings compared to deploying directly on AWS. +We discuss how LocalStack integrates with infrastructure-as-code (IaC) and continuous integration (CI) tools. +Towards the end, we'll demonstrate a [Terraform deployment of a PostgreSQL Aurora cluster](https://github.com/terraform-aws-modules/terraform-aws-rds-aurora/tree/v8.1.1/examples/postgresql) on LocalStack, highlighting the time and resource savings compared to deploying directly on AWS. Additionally, we'll provide examples of using LocalStack with Terraform and Pulumi for reference. diff --git a/content/en/academy/localstack-development/lesson-4/index.md b/content/en/academy/localstack-development/lesson-4/index.md index d4743834a2..6a2a4579aa 100644 --- a/content/en/academy/localstack-development/lesson-4/index.md +++ b/content/en/academy/localstack-development/lesson-4/index.md @@ -11,14 +11,16 @@ type: lessons url: "/academy/localstack-deployment/infra-terraform/" --- -In this video, we'll utilize Terraform to deploy AWS resources locally through LocalStack. These resources include DynamoDB tables, API Gateway, and VPC. We'll use `tflocal`, a wrapper CLI that serves as a wrapper on the `terraform` CLI to execute Terraform commands against LocalStack. +In this video, we'll utilize Terraform to deploy AWS resources locally through LocalStack. +These resources include DynamoDB tables, API Gateway, and VPC. +We'll use `tflocal`, a wrapper CLI that serves as a wrapper on the `terraform` CLI to execute Terraform commands against LocalStack. Here's a breakdown of the steps we'll take: -- We'll create a `main.tf` file and then apply the Terraform configuration on LocalStack. -- We'll demonstrate various configuration options for setting up the infrastructure for our application. -- After applying Terraform, we'll inspect the output, showcasing deployed resources such as API URLs, ECS cluster name, and VPC ID. -- Towards the end, we'll verify the deployment and resource creation using the Resource Browser. +- We'll create a `main.tf` file and then apply the Terraform configuration on LocalStack. +- We'll demonstrate various configuration options for setting up the infrastructure for our application. +- After applying Terraform, we'll inspect the output, showcasing deployed resources such as API URLs, ECS cluster name, and VPC ID. +- Towards the end, we'll verify the deployment and resource creation using the Resource Browser. Further reading: diff --git a/content/en/academy/localstack-development/lesson-5/index.md b/content/en/academy/localstack-development/lesson-5/index.md index 8c11de439c..5264adbc97 100644 --- a/content/en/academy/localstack-development/lesson-5/index.md +++ b/content/en/academy/localstack-development/lesson-5/index.md @@ -11,7 +11,9 @@ type: lessons url: "/academy/localstack-deployment/infra-cloudformation/" --- -In this video, we'll utilize [AWS CloudFormation](https://docs.localstack.cloud/user-guide/aws/cloudformation/) to deploy AWS resources locally through LocalStack. These resources include DynamoDB tables, API Gateway, and VPC. We'll use `awslocal`, a wrapper CLI that serves as a wrapper on the `aws` CLI to execute Terraform commands against LocalStack. +In this video, we'll utilize [AWS CloudFormation](https://docs.localstack.cloud/user-guide/aws/cloudformation/) to deploy AWS resources locally through LocalStack. +These resources include DynamoDB tables, API Gateway, and VPC. +We'll use `awslocal`, a wrapper CLI that serves as a wrapper on the `aws` CLI to execute Terraform commands against LocalStack. Here's a breakdown of the steps we'll take: diff --git a/content/en/academy/localstack-development/lesson-6/index.md b/content/en/academy/localstack-development/lesson-6/index.md index 1b922bd7ca..8fa56ee13b 100644 --- a/content/en/academy/localstack-development/lesson-6/index.md +++ b/content/en/academy/localstack-development/lesson-6/index.md @@ -11,14 +11,15 @@ type: lessons url: "/academy/localstack-deployment/iam-policy-stream/" --- -In this video, we'll explore the [IAM Policy Stream](https://docs.localstack.cloud/user-guide/security-testing/iam-policy-stream/) that assists in assigning precise IAM permissions to a resource. This ensures accurate and secure access to the resource. +In this video, we'll explore the [IAM Policy Stream](https://docs.localstack.cloud/user-guide/security-testing/iam-policy-stream/) that assists in assigning precise IAM permissions to a resource. +This ensures accurate and secure access to the resource. Here's a breakdown of the steps we'll take: -1. Enable IAM Policy Stream on the [LocalStack Web Application](https://app.localstack.cloud/policy-stream). -2. Trigger an AWS API request from the CLI, triggering the generation of the necessary policy for the request. -3. Submit another request to generate the corresponding policy for the resource. -4. Explore the **Summary Policy** section to view the consolidated policy for both requests. +1. Enable IAM Policy Stream on the [LocalStack Web Application](https://app.localstack.cloud/policy-stream). +2. Trigger an AWS API request from the CLI, triggering the generation of the necessary policy for the request. +3. Submit another request to generate the corresponding policy for the resource. +4. Explore the **Summary Policy** section to view the consolidated policy for both requests. Further reading: diff --git a/content/en/academy/localstack-development/lesson-7/index.md b/content/en/academy/localstack-development/lesson-7/index.md index fa692e8237..c318b2d93e 100644 --- a/content/en/academy/localstack-development/lesson-7/index.md +++ b/content/en/academy/localstack-development/lesson-7/index.md @@ -11,12 +11,16 @@ type: lessons url: "/academy/localstack-deployment/github-action-ls" --- -LocalStack allows organizations to automate their application testing and integration process using continuous integration (CI). You can seamlessly integrate LocalStack with your current CI platform. LocalStack offers native plugin for [CircleCI](https://circleci.com/developer/orbs/orb/localstack/platform) & [GitHub Actions](https://github.com/marketplace/actions/setup-localstack), and a universal driver for other CI platforms. This integration enables you to include LocalStack's local AWS cloud emulation in your CI pipelines, leverage advanced features such as Cloud Pods and CI analytics, and execute your test and integration suite before deploying to production. +LocalStack allows organizations to automate their application testing and integration process using continuous integration (CI). +You can seamlessly integrate LocalStack with your current CI platform. +LocalStack offers native plugin for [CircleCI](https://circleci.com/developer/orbs/orb/localstack/platform) & [GitHub Actions](https://github.com/marketplace/actions/setup-localstack), and a universal driver for other CI platforms. +This integration enables you to include LocalStack's local AWS cloud emulation in your CI pipelines, leverage advanced features such as Cloud Pods and CI analytics, and execute your test and integration suite before deploying to production. Here's a breakdown of the steps we'll take: -- We'll look the `main.yml` file located in the `.github` directory. This file sets up LocalStack on the GitHub Action runner. -- We install `awslocal` and `tflocal` to deploy the local infrastructure on LocalStack's cloud emulator running in the CI pipeline. +- We'll look the `main.yml` file located in the `.github` directory. + This file sets up LocalStack on the GitHub Action runner. +- We install `awslocal` and `tflocal` to deploy the local infrastructure on LocalStack's cloud emulator running in the CI pipeline. - Following this deployment, we utilize `awslocal` to validate the deployed resources and conduct a diagnostic test on LocalStack to ensure everything is functioning correctly. Further reading: diff --git a/content/en/academy/localstack-development/lesson-8/index.md b/content/en/academy/localstack-development/lesson-8/index.md index d8a55fbbe9..9bcfc08e08 100644 --- a/content/en/academy/localstack-development/lesson-8/index.md +++ b/content/en/academy/localstack-development/lesson-8/index.md @@ -11,7 +11,10 @@ type: lessons url: "/academy/localstack-deployment/cloud-pods" --- -LocalStack is an ephemeral environment by nature. It means that when you stop your LocalStack instance, all data is removed. However, by using [Cloud Pods](https://docs.localstack.cloud/user-guide/state-management/cloud-pods/), you can preserve the LocalStack state. Cloud Pods are snapshots of your LocalStack instance's state that can be saved, versioned, shared, and restored. +LocalStack is an ephemeral environment by nature. +It means that when you stop your LocalStack instance, all data is removed. +However, by using [Cloud Pods](https://docs.localstack.cloud/user-guide/state-management/cloud-pods/), you can preserve the LocalStack state. +Cloud Pods are snapshots of your LocalStack instance's state that can be saved, versioned, shared, and restored. In this video, we'll follow the [quickstart](https://app.localstack.cloud/quickstart) to import Cloud Pods shared by our team member into our LocalStack instance and observe how this process supports local development and deployment of cloud applications. diff --git a/content/en/getting-started/_index.md b/content/en/getting-started/_index.md index cb197b4113..8d390568dd 100644 --- a/content/en/getting-started/_index.md +++ b/content/en/getting-started/_index.md @@ -21,4 +21,5 @@ LocalStack supports a growing number of [AWS services]({{< ref "aws" >}}), like You can find a comprehensive list of supported APIs on our [Feature Coverage]({{< ref "feature-coverage" >}}) page. -LocalStack also provides additional features to make your life as a cloud developer easier! Check out LocalStack's [Cloud Developer Tools]({{< ref "user-guide/tools" >}}). +LocalStack also provides additional features to make your life as a cloud developer easier! +Check out LocalStack's [Cloud Developer Tools]({{< ref "user-guide/tools" >}}). diff --git a/content/en/getting-started/auth-token/index.md b/content/en/getting-started/auth-token/index.md index 1beaf28871..b2644cfb9c 100644 --- a/content/en/getting-started/auth-token/index.md +++ b/content/en/getting-started/auth-token/index.md @@ -7,14 +7,19 @@ description: > ## Introduction -The Auth Token is a personal identifier used for user authentication outside the LocalStack Web Application, particularly in conjunction with the LocalStack core cloud emulator. Its primary functions are to retrieve the user's license and enable access to advanced features, effectively replacing the older developer API keys. +The Auth Token is a personal identifier used for user authentication outside the LocalStack Web Application, particularly in conjunction with the LocalStack core cloud emulator. +Its primary functions are to retrieve the user's license and enable access to advanced features, effectively replacing the older developer API keys. -The Auth Token remains unchanged unless manually rotated by the user, regardless of any license assignment changes. You can locate your Auth Token on the [Getting Started page](https://app.localstack.cloud/getting-started) or the [Auth Token page](https://app.localstack.cloud/workspace/auth-token) in the LocalStack Web Application. +The Auth Token remains unchanged unless manually rotated by the user, regardless of any license assignment changes. +You can locate your Auth Token on the [Getting Started page](https://app.localstack.cloud/getting-started) or the [Auth Token page](https://app.localstack.cloud/workspace/auth-token) in the LocalStack Web Application. {{< callout "warning" >}} -- Previously, API keys were required to activate the LocalStack core cloud emulator. These API keys are now being replaced by Auth Tokens. -- Currently, LocalStack supports both API Keys and Auth Tokens. However, API Keys will be discontinued in the upcoming major release of LocalStack. -- To update your LocalStack configuration, replace your API Key with an Auth Token. Use the `LOCALSTACK_AUTH_TOKEN` environment variable in place of `LOCALSTACK_API_KEY`. +- Previously, API keys were required to activate the LocalStack core cloud emulator. + These API keys are now being replaced by Auth Tokens. +- Currently, LocalStack supports both API Keys and Auth Tokens. + However, API Keys will be discontinued in the upcoming major release of LocalStack. +- To update your LocalStack configuration, replace your API Key with an Auth Token. + Use the `LOCALSTACK_AUTH_TOKEN` environment variable in place of `LOCALSTACK_API_KEY`. {{< /callout >}} ## Managing your License @@ -28,29 +33,37 @@ After initiating your trial or acquiring a license, proceed to assign it to a us - Visit the [Users & Licenses page](https://app.localstack.cloud/workspace/members). - Select a user in the **Workspace Members** section for license assignment. -- Define user's role via the **Member Role** dropdown. Single users automatically receive the **Admin** role. -- Toggle **Advanced Permissions** to set specific permissions. Single users automatically receive full permissions. -- Click **Save** to complete the assignment. Single users assign licenses to themselves. +- Define user's role via the **Member Role** dropdown. + Single users automatically receive the **Admin** role. +- Toggle **Advanced Permissions** to set specific permissions. + Single users automatically receive full permissions. +- Click **Save** to complete the assignment. + Single users assign licenses to themselves. {{< img src="assigning-a-license.png" class="img-fluid shadow rounded" width="800" >}}

-If you have joined a workspace, you need to be assigned a license by the workspace administrator. In case of switching workspaces or licenses, you need to make sure that you are assigned to the correct license. +If you have joined a workspace, you need to be assigned a license by the workspace administrator. +In case of switching workspaces or licenses, you need to make sure that you are assigned to the correct license. {{< callout "note" >}} If you do not assign a license, you will not be able to use LocalStack even if you have a valid Auth token. {{< /callout >}} -To view your own assigned license, visit the [My License page](https://app.localstack.cloud/workspace/my-license). You can further navigate to the [Auth Token page](https://app.localstack.cloud/workspace/auth-token) to view your Auth Token. +To view your own assigned license, visit the [My License page](https://app.localstack.cloud/workspace/my-license). +You can further navigate to the [Auth Token page](https://app.localstack.cloud/workspace/auth-token) to view your Auth Token. ## Configuring your Auth Token -LocalStack requires the `LOCALSTACK_AUTH_TOKEN` environment variable to contain your Auth Token. You can configure your Auth Token in several ways, depending on your use case. The following sections describe the various methods of setting your Auth Token. +LocalStack requires the `LOCALSTACK_AUTH_TOKEN` environment variable to contain your Auth Token. +You can configure your Auth Token in several ways, depending on your use case. +The following sections describe the various methods of setting your Auth Token. {{< callout "warning">}} -- It's crucial to keep your Auth Token confidential. Do not include it in source code management systems, such as Git repositories. -- Be aware that if an Auth Token is committed to a public repository, it's at risk of exposure, and could remain in the repository's history, even if attempts are made to rewrite it. -- In case your Auth Token is accidentally published, immediately rotate it on the [Auth Token page](https://app.localstack.cloud/workspace/auth-token). +- It's crucial to keep your Auth Token confidential. + Do not include it in source code management systems, such as Git repositories. +- Be aware that if an Auth Token is committed to a public repository, it's at risk of exposure, and could remain in the repository's history, even if attempts are made to rewrite it. +- In case your Auth Token is accidentally published, immediately rotate it on the [Auth Token page](https://app.localstack.cloud/workspace/auth-token). {{< /callout >}} ### Configuring your CI environment @@ -76,7 +89,8 @@ $env:LOCALSTACK_AUTH_TOKEN=""; localstack start {{< /tabpane >}} {{< callout "note" >}} -1. You can alternatively set the `LOCALSTACK_AUTH_TOKEN` environment variable in your shell session. This ensures the Auth Token is transmitted to your LocalStack container, enabling key activation. +1. You can alternatively set the `LOCALSTACK_AUTH_TOKEN` environment variable in your shell session. + This ensures the Auth Token is transmitted to your LocalStack container, enabling key activation. 2. The `localstack auth set-token` command is only available for `localstack` CLI and cannot be used with a Docker/Docker Compose setup. {{< /callout >}} @@ -112,11 +126,13 @@ environment: - LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN- } ``` -You can manually set the Auth Token, or use the `export` command to establish the Auth Token in your current shell session. This ensures the Auth Token is transmitted to your LocalStack container, enabling key activation. +You can manually set the Auth Token, or use the `export` command to establish the Auth Token in your current shell session. +This ensures the Auth Token is transmitted to your LocalStack container, enabling key activation. ## Licensing-related configuration -To avoid logging any licensing-related error messages, set `LOG_LICENSE_ISSUES=0` in your environment. Refer to our [configuration guide](https://docs.localstack.cloud/references/configuration/#localstack-pro) for more information. +To avoid logging any licensing-related error messages, set `LOG_LICENSE_ISSUES=0` in your environment. +Refer to our [configuration guide](https://docs.localstack.cloud/references/configuration/#localstack-pro) for more information. ## Checking license activation @@ -139,7 +155,6 @@ $ Invoke-WebRequest -Uri http://localhost:4566/_localstack/info | ConvertFrom-Js {{< /tab >}} {{< /tabpane >}} - The following output would be retrieved: ```bash @@ -156,7 +171,8 @@ The following output would be retrieved: } ```` -You can notice the `edition` field is set to `pro` and the `is_license_activated` field is set to `true`. Another way to confirm this is by checking the logs of the LocalStack container for a message indicating successful license activation: +You can notice the `edition` field is set to `pro` and the `is_license_activated` field is set to `true`. +Another way to confirm this is by checking the logs of the LocalStack container for a message indicating successful license activation: {{< command >}} [...] Successfully activated license @@ -166,15 +182,19 @@ Otherwise, check our collected most [common activation issues](#common-activatio ## Rotating the Auth Token -Your personal Auth Token provides full access to your workspace and LocalStack license. It's important to treat auth tokens as confidential, avoiding sharing or storing them in source control management systems (SCMs) like Git. +Your personal Auth Token provides full access to your workspace and LocalStack license. +It's important to treat auth tokens as confidential, avoiding sharing or storing them in source control management systems (SCMs) like Git. -If you believe your Auth Token has been compromised or becomes known to someone else, reset it without delay. When you reset a token, the old one is immediately deactivated, losing its ability to access your license or workspace. It is not possible to restore previous tokens. +If you believe your Auth Token has been compromised or becomes known to someone else, reset it without delay. +When you reset a token, the old one is immediately deactivated, losing its ability to access your license or workspace. +It is not possible to restore previous tokens. To rotate your Auth Token, go to the [Auth Token page](https://app.localstack.cloud/workspace/auth-token) and select the **Reset Auth Token** option. ## Common activation issues -Starting from version 2.0.0, the `localstack/localstack-pro` image in LocalStack demands a successful license activation for startup. If the activation of the license is unsuccessful, LocalStack will exit and display error messages. +Starting from version 2.0.0, the `localstack/localstack-pro` image in LocalStack demands a successful license activation for startup. +If the activation of the license is unsuccessful, LocalStack will exit and display error messages. ```bash =============================================== @@ -193,7 +213,8 @@ The key activation in LocalStack may fail for several reasons, and the most comm ### Missing Credentials -You need to provide either an Auth Token or an API Key to start the LocalStack Pro image successfully. You can find your Auth Token or API Key on the [Auth Token page](https://app.localstack.cloud/workspace/auth-token) or the [Legacy API Key page](https://app.localstack.cloud/workspace/api-keys) in the LocalStack Web Application. +You need to provide either an Auth Token or an API Key to start the LocalStack Pro image successfully. +You can find your Auth Token or API Key on the [Auth Token page](https://app.localstack.cloud/workspace/auth-token) or the [Legacy API Key page](https://app.localstack.cloud/workspace/api-keys) in the LocalStack Web Application. If you are using the `localstack` CLI, you can set the `LOCALSTACK_AUTH_TOKEN` environment variable to your Auth Token or use the following command to set it up: @@ -203,16 +224,20 @@ $ localstack auth set-token ### Invalid License -The issue may occur if there is no valid license linked to your account due to expiration or if the license has not been assigned. You can check your license status in the LocalStack Web Application on the [My License page](https://app.localstack.cloud/workspace/my-license). +The issue may occur if there is no valid license linked to your account due to expiration or if the license has not been assigned. +You can check your license status in the LocalStack Web Application on the [My License page](https://app.localstack.cloud/workspace/my-license). ### License Server Unreachable -LocalStack initiates offline activation when the license server is unreachable, requiring re-activation every 24 hours. Log output may indicate issues with your machine resolving the LocalStack API domain, which can be verified using a tool like `dig`: +LocalStack initiates offline activation when the license server is unreachable, requiring re-activation every 24 hours. +Log output may indicate issues with your machine resolving the LocalStack API domain, which can be verified using a tool like `dig`: {{< command >}} $ dig api.localstack.cloud {{< / command >}} -If the result shows a status other than `status: NOERROR`, your machine is unable to resolve this domain. Certain corporate DNS servers may filter requests to specific domains. Kindly reach out to your network administrator to safelist `localstack.cloud` domain. +If the result shows a status other than `status: NOERROR`, your machine is unable to resolve this domain. +Certain corporate DNS servers may filter requests to specific domains. +Kindly reach out to your network administrator to safelist `localstack.cloud` domain. If you have any further problems concerning your license activation, or if the steps do not help, do not hesitate to [contact us](https://localstack.cloud/contact/). diff --git a/content/en/getting-started/faq.md b/content/en/getting-started/faq.md index abc736ed77..3859426d26 100644 --- a/content/en/getting-started/faq.md +++ b/content/en/getting-started/faq.md @@ -49,28 +49,28 @@ The diagnose endpoint is only available if you run LocalStack with `DEBUG=1`. We do push a set of different image tags for the LocalStack Docker images. When using LocalStack, you can decide which tag you want to use. These tags have different semantics and will be updated on different occasions: -- `latest` (default) - - This is our default tag. +* `latest` (default) + * This is our default tag. It refers to the latest commit which has been fully tested using our extensive integration test suite. - - This also entails changes that are part of major releases, which means that this tag can contain breaking changes. - - This tag should be used if you want to stay up-to-date with the latest changes. -- `stable` - - This tag refers to the latest tagged release. + * This also entails changes that are part of major releases, which means that this tag can contain breaking changes. + * This tag should be used if you want to stay up-to-date with the latest changes. +* `stable` + * This tag refers to the latest tagged release. It will be updated with every release of LocalStack. - - This also entails major releases, which means that this tag can contain breaking changes. - - This tag should be used if you want to stay up-to-date with releases, but don't necessarily need the latest and greatest changes right away. -- `` (e.g. `3`) - - These tags can be used to refer to the latest release of a specific major release. + * This also entails major releases, which means that this tag can contain breaking changes. + * This tag should be used if you want to stay up-to-date with releases, but don't necessarily need the latest and greatest changes right away. +* `` (e.g. `3`) + * These tags can be used to refer to the latest release of a specific major release. It will be updated with every minor and patch release within this major release. - - This tag should be used if you want to avoid any potential breaking changes. -- `.` (e.g. `3.0`) - - These tags can be used to refer to the latest release of a specific minor release. + * This tag should be used if you want to avoid any potential breaking changes. +* `.` (e.g. `3.0`) + * These tags can be used to refer to the latest release of a specific minor release. It will be updated with every patch release within this minor release. - - This tag can be used if you want to avoid any bigger changes, like new features, but still want to update to the latest bugfix release. -- `..` (e.g. `3.0.2`) - - These tags can be used if you want to use a very specific release. + * This tag can be used if you want to avoid any bigger changes, like new features, but still want to update to the latest bugfix release. +* `..` (e.g. `3.0.2`) + * These tags can be used if you want to use a very specific release. It will not be updated. - - This tag can be used if you really want to avoid any changes to the image (not even minimal bug fixes). + * This tag can be used if you really want to avoid any changes to the image (not even minimal bug fixes). ### How can I access LocalStack from an alternative computer? @@ -110,12 +110,12 @@ To fix this, set the following environment variables: Set the system locale (language for non-Unicode programs) to UTF-8 to avoid Unicode errors. Follow these steps: -- Open the Control Panel. -- Go to "Clock and Region" or "Region and Language." -- Click on the "Administrative" tab. -- Click on the "Change system locale" button. -- Select "Beta: Use Unicode UTF-8 for worldwide language support" and click "OK." -- Restart your computer to apply the changes. +* Open the Control Panel. +* Go to "Clock and Region" or "Region and Language." +* Click on the "Administrative" tab. +* Click on the "Change system locale" button. +* Select "Beta: Use Unicode UTF-8 for worldwide language support" and click "OK." +* Restart your computer to apply the changes. If you would like to keep the system locale as it is, you can mitigate the issue by using the command `localstack --no-banner`. @@ -304,7 +304,7 @@ $ dig api.localstack.cloud If the result has some other status than `status: NOERROR,` your machine cannot resolve this domain. Some corporate DNS servers might filter requests to certain domains. -Contact your network administrator to safelist` localstack.cloud` domains. +Contact your network administrator to safelist`localstack.cloud` domains. ### How does LocalStack Pro handle security patches and bug fixes? @@ -326,4 +326,6 @@ For more details, please take a look at our [Enterprise offering](https://locals ### How does the LocalStack Web Application communicate with the LocalStack container? -The LocalStack Web Application connects to your LocalStack container running on your local machine and retrieves the information directly via the `localhost` without using the internet. Features such as Resource Browsers, IAM Policy Stream, Chaos Engineering dashboard, and others communicate directly with the LocalStack container using your browser. None of the information is sent to the internet, or stored on any external servers maintained by LocalStack. +The LocalStack Web Application connects to your LocalStack container running on your local machine and retrieves the information directly via the `localhost` without using the internet. +Features such as Resource Browsers, IAM Policy Stream, Chaos Engineering dashboard, and others communicate directly with the LocalStack container using your browser. +None of the information is sent to the internet, or stored on any external servers maintained by LocalStack. diff --git a/content/en/getting-started/help-and-support/index.md b/content/en/getting-started/help-and-support/index.md index 0c76574d50..b0edd0510a 100644 --- a/content/en/getting-started/help-and-support/index.md +++ b/content/en/getting-started/help-and-support/index.md @@ -10,7 +10,9 @@ cascade: ## Introduction -We strive to make it as easy as possible for you to use LocalStack, and we are very grateful for any feedback. We provide different levels of support to help you with your queries and issues. The support you receive depends on the plan you are on. +We strive to make it as easy as possible for you to use LocalStack, and we are very grateful for any feedback. +We provide different levels of support to help you with your queries and issues. +The support you receive depends on the plan you are on. | Plan | Support Level | |------|---------------| @@ -22,30 +24,37 @@ We strive to make it as easy as possible for you to use LocalStack, and we are v ## Community Support -LocalStack's Community support is available to all users of the LocalStack Community Edition & Hobby Plan users. You can avail community support through the following channels: +LocalStack's Community support is available to all users of the LocalStack Community Edition & Hobby Plan users. +You can avail community support through the following channels: - [LocalStack Discuss](https://discuss.localstack.cloud/) - [LocalStack Slack Community](https://localstack.cloud/slack) - [GitHub Issue](https://github.com/localstack/docs/issues/new) -Community support is provided on a best-effort basis and is not guaranteed. We also encourage you to help others in the community by answering questions and sharing your experiences. +Community support is provided on a best-effort basis and is not guaranteed. +We also encourage you to help others in the community by answering questions and sharing your experiences. ### LocalStack Discuss -LocalStack Discuss allows our community users to ask questions, share ideas, and discuss topics related to LocalStack. To create a new topic on Discuss, follow these steps: +LocalStack Discuss allows our community users to ask questions, share ideas, and discuss topics related to LocalStack. +To create a new topic on Discuss, follow these steps: - Create a new account on [LocalStack Discuss](https://discuss.localstack.cloud/) by clicking the **Sign Up** button. - Once you have created an account, you can create a new topic by clicking the **New Topic** button. - Choose the appropriate category for your topic and provide a title and description. - Click the **Create Topic** button to submit your topic. -LocalStack Discuss is public, allowing us to keep a record of these questions and answers for the larger community to use over time. However, you should avoid sharing any sensitive information on the platform (such as Auth Tokens, private configuration, etc.). +LocalStack Discuss is public, allowing us to keep a record of these questions and answers for the larger community to use over time. +However, you should avoid sharing any sensitive information on the platform (such as Auth Tokens, private configuration, etc.). ### LocalStack Slack Community -LocalStack Slack Community includes LocalStack users, contributors, and maintainers. If you need help with the community version of LocalStack, please use the `#help` channel. You can sign up for the [LocalStack Slack Community](https://localstack.cloud/slack) by creating an account. +LocalStack Slack Community includes LocalStack users, contributors, and maintainers. +If you need help with the community version of LocalStack, please use the `#help` channel. +You can sign up for the [LocalStack Slack Community](https://localstack.cloud/slack) by creating an account. -However, the messages on Slack are not accessible after three months, so it is not the best place to ask questions that may be useful to others in the future. For that, we recommend using LocalStack Discuss. +However, the messages on Slack are not accessible after three months, so it is not the best place to ask questions that may be useful to others in the future. +For that, we recommend using LocalStack Discuss. ### GitHub Issue @@ -54,7 +63,8 @@ You can use GitHub Issue to: - [Request new features](https://github.com/localstack/localstack/issues/new?assignees=&labels=type%3A+feature%2Cstatus%3A+triage+needed&template=feature-request.yml&title=feature+request%3A+%3Ctitle%3E) - [Report existing bugs](https://github.com/localstack/localstack/issues/new?assignees=&labels=type%3A+bug%2Cstatus%3A+triage+needed&template=bug-report.yml&title=bug%3A+%3Ctitle%3E) -Make sure to follow the issue templates and provide as much information as possible. If you have encountered outdated documentation, please report it on our [documentation GitHub page](https://github.com/localstack/docs). +Make sure to follow the issue templates and provide as much information as possible. +If you have encountered outdated documentation, please report it on our [documentation GitHub page](https://github.com/localstack/docs). ## Dedicated support @@ -101,7 +111,8 @@ To create a support ticket: You can optionally choose to continue the conversation via email or via the Web Application. {{< callout "note" >}} -In many scenarios, we ask our customers to use Diagnosis endpoint to help us retrieve additional information. To use LocalStack's Diagnosis endpoint: +In many scenarios, we ask our customers to use Diagnosis endpoint to help us retrieve additional information. +To use LocalStack's Diagnosis endpoint: - Set the environment variable `LS_LOG=trace` - Start LocalStack @@ -114,7 +125,8 @@ Ensure that you avoid sending the diagnostic output to public channels or forums ## Enterprise Support -A customer portal is a home behind a login where customers can view, open, and reply to their support tickets. Currently, the **customer portal** is only **available to Enterprise customers**. +A customer portal is a home behind a login where customers can view, open, and reply to their support tickets. +Currently, the **customer portal** is only **available to Enterprise customers**. You can find the customer portal here: [https://support.localstack.cloud/portal](https://support.localstack.cloud/portal) @@ -126,32 +138,39 @@ You can find the customer portal here: [https://support.localstack.cloud/portal] If you are a member of an organization with an enterprise LocalStack subscription, you will receive an invitation to create an account and join the LocalStack Support Portal via email. -Follow the instructions in the email and set up your account by clicking on the **Sign up** button. You will be asked to create a password. Once you do so, you will be able to log in and start using the customer portal to create, view, and engage with tickets. +Follow the instructions in the email and set up your account by clicking on the **Sign up** button. +You will be asked to create a password. +Once you do so, you will be able to log in and start using the customer portal to create, view, and engage with tickets. ### Creating a Support Ticket -You can open a new ticket with LocalStack support by going to the **Create a Support Ticket** link. You will be redirected to a form where you will have to provide certain information to file a new support ticket. +You can open a new ticket with LocalStack support by going to the **Create a Support Ticket** link. +You will be redirected to a form where you will have to provide certain information to file a new support ticket. -

-{{< img src="file-a-support-ticket.png" alt="Filing a support ticket" class="img-fluid shadow rounded" width="800px" >}} +

+{{< img src="file-a-support-ticket.png" alt="Filing a support ticket" class="img-fluid shadow rounded" width="800px" >}}

-The form consists of two parts. One is basic information, which is mandatory to fill out, and additional information, which adds more context to your issue but is not mandatory. Once all the mandatory fields are filled out, you can create a new support ticket by clicking on the Submit button. Once the ticket is submitted, it will be reported to LocalStack support, who will get back to you on that query as soon as possible. A ticket will show up in the ticket list as soon as it’s submitted. +The form consists of two parts. +One is basic information, which is mandatory to fill out, and additional information, which adds more context to your issue but is not mandatory. +Once all the mandatory fields are filled out, you can create a new support ticket by clicking on the Submit button. +Once the ticket is submitted, it will be reported to LocalStack support, who will get back to you on that query as soon as possible. +A ticket will show up in the ticket list as soon as it’s submitted. #### Basic Information You need to fill out the following fields, which are mandatory to open a new ticket: -- **Type** - Choose the type of your query from the following options: - - **Issue** - Select this when you are facing an issue using LocalStack. - - **General inquiry** - Select this when you have a general question regarding LocalStack. - - **Feature request** - Select this when you are looking for a feature that is not yet implemented in LocalStack. -- **Ticket name** - Provide a descriptive name for the ticket that summarizes your inquiry. -- **Description** - Provide a comprehensive description of your inquiry, explaining all the details that will help us understand your query. +- **Type** - Choose the type of your query from the following options: + - **Issue** - Select this when you are facing an issue using LocalStack. + - **General inquiry** - Select this when you have a general question regarding LocalStack. + - **Feature request** - Select this when you are looking for a feature that is not yet implemented in LocalStack. +- **Ticket name** - Provide a descriptive name for the ticket that summarizes your inquiry. +- **Description** - Provide a comprehensive description of your inquiry, explaining all the details that will help us understand your query. #### Additional Information -- **CI Issue?** - If the query is related to a CI issue, select the one that best fits your query from the dropdown. -- **Operating system** - From the dropdown, select the operating system you are using. -- **Affected Services** - From the dropdown, select the AWS service that is affected in your query. -- **File upload** - Here you can provide any additional files that you believe would be helpful for LocalStack support (e.g., screenshots, log files, etc.). \ No newline at end of file +- **CI Issue?** - If the query is related to a CI issue, select the one that best fits your query from the dropdown. +- **Operating system** - From the dropdown, select the operating system you are using. +- **Affected Services** - From the dropdown, select the AWS service that is affected in your query. +- **File upload** - Here you can provide any additional files that you believe would be helpful for LocalStack support (e.g., screenshots, log files, etc.). diff --git a/content/en/getting-started/installation.md b/content/en/getting-started/installation.md index ab7cd89cd7..f399e639da 100644 --- a/content/en/getting-started/installation.md +++ b/content/en/getting-started/installation.md @@ -11,7 +11,8 @@ cascade: ## LocalStack CLI -The quickest way get started with LocalStack is by using the LocalStack CLI. It allows you to start LocalStack from your command line. +The quickest way get started with LocalStack is by using the LocalStack CLI. +It allows you to start LocalStack from your command line. Please make sure that you have a working [Docker installation](https://docs.docker.com/get-docker/) on your machine before moving on. The CLI starts and manages the LocalStack Docker container. @@ -357,7 +358,8 @@ $ docker run \ {{< callout "note" >}} - This command pulls the current nightly build from the `master` branch (if you don't have the image locally) and **not** the latest supported version. - If you want to use a specific version of LocalStack, use the appropriate tag: `docker run --rm -it -p 4566:4566 -p 4510-4559:4510-4559 localstack/localstack:`. Check-out the [LocalStack releases](https://github.com/localstack/localstack/releases) to know more about specific LocalStack versions. + If you want to use a specific version of LocalStack, use the appropriate tag: `docker run --rm -it -p 4566:4566 -p 4510-4559:4510-4559 localstack/localstack:`. + Check-out the [LocalStack releases](https://github.com/localstack/localstack/releases) to know more about specific LocalStack versions. - If you are using LocalStack with an [auth token]({{< ref "auth-token" >}}), you need to specify the image tag as `localstack/localstack-pro` in your Docker setup. Going forward, `localstack/localstack-pro` image will contain our Pro-supported services and APIs. @@ -371,7 +373,8 @@ $ docker run \ This could be seen as the "expert mode" of starting LocalStack. If you are looking for a simpler method of starting LocalStack, please use the [LocalStack CLI]({{< ref "#localstack-cli" >}}). -- To facilitate interoperability, configuration variables can be prefixed with `LOCALSTACK_` in docker. For instance, setting `LOCALSTACK_PERSISTENCE=1` is equivalent to `PERSISTENCE=1`. +- To facilitate interoperability, configuration variables can be prefixed with `LOCALSTACK_` in docker. + For instance, setting `LOCALSTACK_PERSISTENCE=1` is equivalent to `PERSISTENCE=1`. - To configure an auth token, refer to the [auth token]({{< ref "auth-token" >}}) documentation. {{< /callout >}} @@ -395,7 +398,6 @@ $ helm upgrade --install localstack localstack-repo/localstack The Helm charts are not maintained in the main repository, but in a [separate one](https://github.com/localstack/helm-charts). - ## Updating The LocalStack CLI allows you to easily update the different components of LocalStack. @@ -445,7 +447,8 @@ $ DNS_ADDRESS=0 localstack start #### How should I access the LocalStack logs on my local machine? -You can now avail logging output and error reporting using LocalStack logs. To access the logs, run the following command: +You can now avail logging output and error reporting using LocalStack logs. +To access the logs, run the following command: {{< command >}} $ localstack logs @@ -453,9 +456,11 @@ $ localstack logs AWS requests are now logged uniformly in the INFO log level (set by default or when `DEBUG=0`). The format is: + ```text AWS . => () ``` + Requests to HTTP endpoints are logged in a similar way: ```text diff --git a/content/en/getting-started/quickstart/index.md b/content/en/getting-started/quickstart/index.md index f2ba1878b1..07a06a22cd 100644 --- a/content/en/getting-started/quickstart/index.md +++ b/content/en/getting-started/quickstart/index.md @@ -10,7 +10,9 @@ cascade: ## Introduction -In this quickstart guide, we'll walk you through the process of starting LocalStack on your local machine and deploying a [serverless image resizer application](https://github.com/localstack-samples/sample-serverless-image-resizer-s3-lambda) that utilizes several AWS services. This guide aims to help you understand how to use LocalStack for the development and testing of your AWS applications locally. It introduces you to the following key concepts: +In this quickstart guide, we'll walk you through the process of starting LocalStack on your local machine and deploying a [serverless image resizer application](https://github.com/localstack-samples/sample-serverless-image-resizer-s3-lambda) that utilizes several AWS services. +This guide aims to help you understand how to use LocalStack for the development and testing of your AWS applications locally. +It introduces you to the following key concepts: - Starting a LocalStack instance on your local machine. - Deploying an AWS serverless application infrastructure locally. @@ -46,7 +48,8 @@ An internal SES LocalStack testing endpoint (`/_localstack/aws/ses`) is configur - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) & [`awslocal` wrapper](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#localstack-aws-cli-awslocal) - `jq`, `zip` & `curl` -You can start LocalStack using the `localstack` CLI. Start the LocalStack Pro container with your `LOCALSTACK_AUTH_TOKEN` pre-configured: +You can start LocalStack using the `localstack` CLI. +Start the LocalStack Pro container with your `LOCALSTACK_AUTH_TOKEN` pre-configured: {{< tabpane >}} {{< tab header="macOS/Linux" lang="shell" >}} @@ -74,7 +77,9 @@ You can now follow the instructions below to start LocalStack, deploy the sample ### Setup a virtual environment -To deploy the sample application, you need to have specific Python packages are installed. It is advisable to utilize a virtual environment for the installation process, allowing the packages to be installed in an isolated environment. Execute the following commands to create a virtual environment and install the packages in `requirements-dev.txt`: +To deploy the sample application, you need to have specific Python packages are installed. +It is advisable to utilize a virtual environment for the installation process, allowing the packages to be installed in an isolated environment. +Execute the following commands to create a virtual environment and install the packages in `requirements-dev.txt`: {{< tabpane >}} {{< tab header="macOS/Linux" lang="shell" >}} @@ -90,7 +95,8 @@ pip install -r requirements-dev.txt {{< /tabpane >}} {{< callout "tip" >}} -If you are encountering issues with the installation of the packages, such as Pillow, ensure you use the same version as the Python Lambdas (3.9) for Pillow to work. If you're using pyenv, install and activate Python 3.9 with the following commands: +If you are encountering issues with the installation of the packages, such as Pillow, ensure you use the same version as the Python Lambdas (3.9) for Pillow to work. +If you're using pyenv, install and activate Python 3.9 with the following commands: {{< command >}} $ pyenv install 3.9.0 $ pyenv global 3.9.0 @@ -99,9 +105,14 @@ $ pyenv global 3.9.0 ### Setup the serverless image resizer -This application enables serverless image resizing using [S3](https://docs.localstack.cloud/user-guide/aws/s3/), [SSM](https://docs.localstack.cloud/user-guide/aws/ssm/), [Lambda](https://docs.localstack.cloud/user-guide/aws/lambda/), [SNS](https://docs.localstack.cloud/user-guide/aws/sns/), and [SES](https://docs.localstack.cloud/user-guide/aws/ses/). A simple web interface allows users to upload and view resized images. A Lambda function generates S3 pre-signed URLs for direct uploads, while S3 bucket notifications trigger image resizing. Another Lambda function lists and provides pre-signed URLs for browser display. The application also handles Lambda failures through SNS and SES email notifications. +This application enables serverless image resizing using [S3](https://docs.localstack.cloud/user-guide/aws/s3/), [SSM](https://docs.localstack.cloud/user-guide/aws/ssm/), [Lambda](https://docs.localstack.cloud/user-guide/aws/lambda/), [SNS](https://docs.localstack.cloud/user-guide/aws/sns/), and [SES](https://docs.localstack.cloud/user-guide/aws/ses/). +A simple web interface allows users to upload and view resized images. +A Lambda function generates S3 pre-signed URLs for direct uploads, while S3 bucket notifications trigger image resizing. +Another Lambda function lists and provides pre-signed URLs for browser display. +The application also handles Lambda failures through SNS and SES email notifications. -The sample application uses AWS CLI and our `awslocal` wrapper to deploy the application to LocalStack. You can build and deploy the sample application on LocalStack by running the following command: +The sample application uses AWS CLI and our `awslocal` wrapper to deploy the application to LocalStack. +You can build and deploy the sample application on LocalStack by running the following command: {{< command >}} $ bin/deploy.sh @@ -110,7 +121,8 @@ $ bin/deploy.sh Alternatively, you can follow these instructions to deploy the sample application manually step-by-step. {{< callout "tip" >}} -In absence of the `awslocal` wrapper, you can use the `aws` CLI directly, by configuring an [endpoint URL](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#configuring-an-endpoint-url) or a [custom profile](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#configuring-a-custom-profile) like `localstack`. You can then swap `awslocal` with `aws --endpoint-url=http://localhost:4566` or `aws --profile=localstack` in the commands below. +In absence of the `awslocal` wrapper, you can use the `aws` CLI directly, by configuring an [endpoint URL](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#configuring-an-endpoint-url) or a [custom profile](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#configuring-a-custom-profile) like `localstack`. +You can then swap `awslocal` with `aws --endpoint-url=http://localhost:4566` or `aws --profile=localstack` in the commands below. {{< /callout >}} #### Create the S3 buckets @@ -139,7 +151,8 @@ $ awslocal ssm put-parameter \ $ awslocal sns create-topic --name failed-resize-topic {{< / command >}} -To receive immediate alerts in case of image resize failures, subscribe an email address to the system. You can use the following command to subscribe an email address to the SNS topic: +To receive immediate alerts in case of image resize failures, subscribe an email address to the system. +You can use the following command to subscribe an email address to the SNS topic: {{< command >}} $ awslocal sns subscribe \ @@ -213,7 +226,7 @@ mkdir package pip install -r requirements.txt -t package zip lambda.zip handler.py cd package -zip -r ../lambda.zip *; +zip -r ../lambda.zip*; cd ../.. {{< /tab >}} {{< /tabpane >}} @@ -270,17 +283,22 @@ To access the application, go to [**https://webapp.s3-website.localhost.localsta Serverless image resizer application -Paste the `presign` and `list` Lambda function URLs into the application and click **Apply**. Alternatively, click on **Load from API** to automatically load the URLs. +Paste the `presign` and `list` Lambda function URLs into the application and click **Apply**. +Alternatively, click on **Load from API** to automatically load the URLs. -Upload an image, and click **Upload**. The upload form uses the `presign` Lambda to request an S3 pre-signed POST URL, forwarding the POST request to S3. Asynchronous resizing (maximum 400x400 pixels) occurs through S3 bucket notifications. +Upload an image, and click **Upload**. +The upload form uses the `presign` Lambda to request an S3 pre-signed POST URL, forwarding the POST request to S3. +Asynchronous resizing (maximum 400x400 pixels) occurs through S3 bucket notifications. -If successful, the application displays a **success!** alert. Click **Refresh** to trigger your browser to request the `list` Lambda URL, returning a JSON document of all items in the images (`localstack-thumbnails-app-images`) and resized images (`localstack-thumbnails-app-resized`) bucket. +If successful, the application displays a **success!** alert. +Click **Refresh** to trigger your browser to request the `list` Lambda URL, returning a JSON document of all items in the images (`localstack-thumbnails-app-images`) and resized images (`localstack-thumbnails-app-resized`) bucket. Serverless image resizer application displaying a resized image ### View the deployed resources -You can inspect the resources deployed as part of the sample application by accessing the [**LocalStack Web Application**](https://app.localstack.cloud/). Navigate to your [**Default Instance**](https://app.localstack.cloud/inst/default/status) to view the deployed resources. +You can inspect the resources deployed as part of the sample application by accessing the [**LocalStack Web Application**](https://app.localstack.cloud/). +Navigate to your [**Default Instance**](https://app.localstack.cloud/inst/default/status) to view the deployed resources. Status Page of the LocalStack Web Application @@ -296,13 +314,15 @@ To run automated integration tests against the sample application, use the follo $ pytest -v {{< / command >}} -Additionally, you can verify that when the `resize` Lambda fails, an SNS message is sent to a topic that an SES subscription listens to, triggering an email with the raw failure message. Since there's no real email server involved, you can use the LocalStack SES developer endpoint to list messages sent via SES: +Additionally, you can verify that when the `resize` Lambda fails, an SNS message is sent to a topic that an SES subscription listens to, triggering an email with the raw failure message. +Since there's no real email server involved, you can use the LocalStack SES developer endpoint to list messages sent via SES: {{< command >}} $ curl -s http://localhost.localstack.cloud:4566/_aws/ses | jq {{< / command >}} -An alternative option is to use a service like MailHog or `smtp4dev`. Start LocalStack with `SMTP_HOST=host.docker.internal:1025`, pointing to the mock SMTP server. +An alternative option is to use a service like MailHog or `smtp4dev`. +Start LocalStack with `SMTP_HOST=host.docker.internal:1025`, pointing to the mock SMTP server. ### Destroy the local infrastructure @@ -312,13 +332,15 @@ Now that you've learned how to deploy a local AWS infrastructure for your sample $ localstack stop {{< / command >}} -LocalStack is ephemeral, meaning it doesn't persist any data across restarts. It runs inside a Docker container, and once it's stopped, all locally created resources are automatically removed. +LocalStack is ephemeral, meaning it doesn't persist any data across restarts. +It runs inside a Docker container, and once it's stopped, all locally created resources are automatically removed. To persist the local cloud resources across restarts, navigate to our [persistence documentation]({{< ref "user-guide/state-management/persistence" >}}) or learn about [Cloud Pods]({{< ref "user-guide/state-management/cloud-pods" >}}), our next generation state management utility. ## Next Steps -Congratulations on deploying an AWS application locally using LocalStack! To expand your LocalStack capabilities, explore the following based on your expertise: +Congratulations on deploying an AWS application locally using LocalStack! +To expand your LocalStack capabilities, explore the following based on your expertise: - [Tutorials]({{< ref "tutorials" >}}): Check out our tutorials to learn how to use LocalStack across various AWS services and application stacks. - [User Guide]({{< ref "user-guide" >}}): Explore LocalStack's emulated AWS services, third-party integrations, tooling, CI service providers, and more in our User Guide. diff --git a/content/en/legal/third-party-software-tools/index.md b/content/en/legal/third-party-software-tools/index.md index b71a0d3f8b..11c93e3d6c 100644 --- a/content/en/legal/third-party-software-tools/index.md +++ b/content/en/legal/third-party-software-tools/index.md @@ -27,4 +27,4 @@ requests | Apache License 2.0 subprocess32 | PSF License **Other tools:** | Elasticsearch | Apache License 2.0 -kinesis-mock | MIT License \ No newline at end of file +kinesis-mock | MIT License diff --git a/content/en/references/api-key.md b/content/en/references/api-key.md index b0dcdeccc5..6e5c99f5cc 100644 --- a/content/en/references/api-key.md +++ b/content/en/references/api-key.md @@ -10,9 +10,11 @@ aliases: --- {{< callout "warning" >}} -- LocalStack is transitioning from API Keys to Auth Tokens for activation. Auth Tokens streamline license management and remove the need for developers to adjust their setup when license changes occur. -- For detailed information and guidance on migrating your LocalStack setup to Auth Tokens, please consult our [Auth Token documentation]({{< ref "auth-token" >}}). -- API Keys will remain functional for LocalStack Pro and Enterprise users until the next major release. Following this release, LocalStack Pro and Enterprise will exclusively use Auth Tokens. +- LocalStack is transitioning from API Keys to Auth Tokens for activation. + Auth Tokens streamline license management and remove the need for developers to adjust their setup when license changes occur. +- For detailed information and guidance on migrating your LocalStack setup to Auth Tokens, please consult our [Auth Token documentation]({{< ref "auth-token" >}}). +- API Keys will remain functional for LocalStack Pro and Enterprise users until the next major release. + Following this release, LocalStack Pro and Enterprise will exclusively use Auth Tokens. {{< /callout >}} The LocalStack API key is a unique identifier to activate your LocalStack license needed to start LocalStack Pro. @@ -20,7 +22,8 @@ You can find your API key in the [LocalStack Web app](https://app.localstack.clo This guide demonstrates how you can use your new LocalStack licenses and go over some best practices regarding the usage, activation, and safety of your LocalStack API key. {{< callout "warning" >}} -- Avoid sharing your API key with anyone. Ensure that you do not commit it to any source code management systems (like Git repositories). +- Avoid sharing your API key with anyone. + Ensure that you do not commit it to any source code management systems (like Git repositories). - If you push an API key to a public repository, it has potentially been exposed and might remain in the history (even if you try to rewrite it). - If you accidentally publish your API key, please [contact us](https://localstack.cloud/contact/) immediately to get your API key rotated! - If you want to use your API key in your CI environment, check out our [CI documentation]({{< ref "user-guide/ci" >}}) to see the proper way to handle secrets in your CI environment to store your API key securely. @@ -28,7 +31,8 @@ This guide demonstrates how you can use your new LocalStack licenses and go over ### Starting LocalStack via CLI -LocalStack expects your API key to be present in the environment variable `LOCALSTACK_API_KEY`. You can define the `LOCALSTACK_API_KEY` environment variable before or while starting LocalStack using the `localstack` CLI. +LocalStack expects your API key to be present in the environment variable `LOCALSTACK_API_KEY`. +You can define the `LOCALSTACK_API_KEY` environment variable before or while starting LocalStack using the `localstack` CLI. {{< tabpane >}} {{< tab header="macOS/Linux" lang="shell" >}} @@ -72,7 +76,8 @@ environment: - LOCALSTACK_API_KEY=${LOCALSTACK_API_KEY- } ``` -You can set the API key manually, or you can use the `export` command to set the API key in your current shell session. The API key will be passed into your LocalStack container, such that the key activation can take place. +You can set the API key manually, or you can use the `export` command to set the API key in your current shell session. +The API key will be passed into your LocalStack container, such that the key activation can take place. ## Licensing-related configuration @@ -86,7 +91,8 @@ The easiest way to check if LocalStack is activated is to query the health endpo $ curl localhost:4566/_localstack/health | jq {{< / command >}} -If a Pro-only [service]({{< ref "aws" >}}) -- like [XRay]({{< ref "xray" >}}) -- is running, LocalStack has started successfully. You can also check the logs of the LocalStack container to see if the activation was successful. +If a Pro-only [service]({{< ref "aws" >}}) -- like [XRay]({{< ref "xray" >}}) -- is running, LocalStack has started successfully. +You can also check the logs of the LocalStack container to see if the activation was successful. {{< command >}} [...] Successfully activated API key @@ -99,7 +105,7 @@ Otherwise, check our collected most [common activation issues](#common-activatio Since LocalStack v2.0.0, the image `localstack/localstack-pro` requires a successful key activation to start. If the key activation fails, LocalStack will quit with an error messages that may look something like this: -``` +```bash =============================================== API key activation failed! 🔑❌ @@ -113,9 +119,9 @@ Due to this error, Localstack has quit. LocalStack pro features can only be used ``` There are several reasons a key activation can fail: -* Missing credentials: Using `localstack/localstack-pro` requires per default to have an API key set. -* Invalid key: there is no valid license associated with the key, for example because the license has expired. -* License server cannot be reached: LocalStack will try to perform an offline license activation if the license server cannot be reached, but will require a re-activation every 24 hours. +- Missing credentials: Using `localstack/localstack-pro` requires per default to have an API key set. +- Invalid key: there is no valid license associated with the key, for example because the license has expired. +- License server cannot be reached: LocalStack will try to perform an offline license activation if the license server cannot be reached, but will require a re-activation every 24 hours. If you are using the `localstack/localstack-pro` image, but cannot activate your license key, we recommend falling back to the community image `localstack/localstack`. If that is not an option, you can set `ACTIVATE_PRO=0` which will attempt to start LocalStack without pro features. diff --git a/content/en/references/arm64-support/index.md b/content/en/references/arm64-support/index.md index c5baeb8139..5a83d2b355 100644 --- a/content/en/references/arm64-support/index.md +++ b/content/en/references/arm64-support/index.md @@ -119,8 +119,8 @@ pyenv global 3.11.9 Then clone LocalStack to your machine, run `make install` and then `make start`. - ### Raspberry Pi + If you want to run LocalStack on your Raspberry Pi, make sure to use a 64bit operating system. In our experience, it works best on a Raspberry Pi 4 8GB with [Ubuntu Server 20.04 64Bit for Raspberry Pi](https://ubuntu.com/download/raspberry-pi). diff --git a/content/en/references/configuration.md b/content/en/references/configuration.md index 06bfdcda5c..e6b1ef43cc 100644 --- a/content/en/references/configuration.md +++ b/content/en/references/configuration.md @@ -20,7 +20,8 @@ For instance, setting `LOCALSTACK_PERSISTENCE=1` is equivalent to `PERSISTENCE=1 You can also use [Profiles](#profiles). -Configurations marked as **Deprecated** will be removed in the next major version. You can find previously removed configuration variables under [Legacy](#legacy). +Configurations marked as **Deprecated** will be removed in the next major version. +You can find previously removed configuration variables under [Legacy](#legacy). ## Core @@ -43,8 +44,6 @@ Options that affect the core LocalStack system. | `ALLOW_NONSTANDARD_REGIONS` | `0` (default) | Allows the use of non-standard AWS regions. By default, LocalStack only accepts [standard AWS regions](https://docs.aws.amazon.com/general/latest/gr/rande.html). | | `PARITY_AWS_ACCESS_KEY_ID` | `0` (default) | Enables the use production-like access key IDs. By default, LocalStack issues keys with `LSIA...` and `LKIA...` prefix, and will reject keys that start with `ASIA...` or `AKIA...`. | -[1]: http://docs.aws.amazon.com/cli/latest/reference/#available-services - ## CLI These options are applicable when using the CLI to start LocalStack. @@ -92,9 +91,10 @@ This section covers configuration options that are specific to certain AWS servi | `BIGDATA_DOCKER_FLAGS` | | Additional flags for the bigdata container. Same restrictions as `LAMBDA_DOCKER_FLAGS`. ### CloudFormation + | Variable | Example Values | Description | | - | - | - | -| `CFN_LEGACY_TEMPLATE_DEPLOYER` | `0` (default) \|`1` | Switch back to the old deployment engine. Note that this is only available temporarily to allow for a smoother roll-out of the new deployment order. +| `CFN_LEGACY_TEMPLATE_DEPLOYER` | `0` (default) \|`1` | Switch back to the old deployment engine. Note that this is only available temporarily to allow for a smoother roll-out of the new deployment order. | `CFN_PER_RESOURCE_TIMEOUT` | `300` (default) | Set the timeout to deploy each individual CloudFormation resource. | `CFN_VERBOSE_ERRORS` | `0` (default) \|`1` | Show exceptions for CloudFormation deploy errors. | `CFN_STRING_REPLACEMENT_DENY_LIST` | `""` (default) \|`https://api-1.execute-api.us-east-2.amazonaws.com/test-resource,https://api-2.execute-api.us-east-2.amazonaws.com/test-resource` | Comma-separated list of AWS URLs that should not be modified to point to Localstack. For example, when deploying a CloudFormation template we might want to leave certain resources pointing to actual AWS URLs, or even leave environment variables with URLs like that untouched. @@ -124,7 +124,7 @@ This section covers configuration options that are specific to certain AWS servi | `DYNAMODB_CORS` | `*` | Enable CORS support for specific allow-list list the domains separated by `,` use `*` for public access (default is `*`) | | `DYNAMODB_REMOVE_EXPIRED_ITEMS` | `0`\|`1` | Enables [Time to Live (TTL)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) feature | -### ECR +### ECR | Variable | Example Values | Description | | - | - | - | @@ -165,7 +165,6 @@ This section covers configuration options that are specific to certain AWS servi | `PROVIDER_OVERRIDE_ELASTICACHE` | `legacy` | Use the legacy ElastiCache provider. | | `REDIS_CONTAINER_MODE` | `1`\|`0` (default) | Start ElastiCache cache nodes in separate containers instead of in the LocalStack container | - ### Elasticsearch {{< callout >}} @@ -180,6 +179,7 @@ See [here](#opensearch). | `PROVIDER_OVERRIDE_EVENTS` | `v2` | Use the new EventBridge provider. | ### IAM + | Variable | Example Values | Description | | - | - | - | | `ENFORCE_IAM` | `0` (default)\|`1` | Enable IAM policy evaluation and enforcement. If this is disabled (the default), IAM policies will have no effect to your requests. | @@ -313,7 +313,6 @@ Please be aware that the following options may have severe security implications | `EXTRA_CORS_EXPOSE_HEADERS` | | Comma-separated list of header names to be be added to Access-Control-Expose-Headers CORS header. | | `ENABLE_CONFIG_UPDATES` | `0` (default) | Whether to enable dynamic configuration updates at runtime. | - ## Emails Please check with your SMTP email service provider for the following settings. @@ -375,7 +374,6 @@ To learn more about these configuration options, see [Cloud Pods]({{< ref "user- | `DEVELOP_PORT` | | Port number for debugpy server | `WAIT_FOR_DEBUGGER` | | Forces LocalStack to wait for a debugger to start the services - ## DNS To learn more about these configuration options, see [DNS Server]({{< ref "dns-server" >}}). @@ -403,7 +401,6 @@ To learn more about these configuration options, see [DNS Server]({{< ref "dns-s | `LOCALSTACK_API_KEY` | | **Deprecated since 3.0.0** [API key]({{< ref "api-key" >}}) to activate LocalStack Pro.
**Use the `LOCALSTACK_AUTH_TOKEN` instead (except for [CI environments]({{< ref "user-guide/ci/" >}})).** | | `LOG_LICENSE_ISSUES` | `0` \| `1` (default) | Whether to log issues with the license activation to the console. | - ## Legacy These configurations have already been removed and **won't have any effect** on newer versions of LocalStack. @@ -416,7 +413,7 @@ These configurations have already been removed and **won't have any effect** on | `_BACKEND` | 3.0.0 | `http://localhost:7577` | Custom endpoint URL to use for a specific service, where `` is the uppercase service name. | | `_PORT_EXTERNAL` | 3.0.0 | `4567` | Port number to expose a specific service externally . `SQS_PORT_EXTERNAL`, e.g. , is used when returning queue URLs from the SQS service to the client. | | `ACTIVATE_NEW_POD_CLIENT` | 3.0.0 | `0`\|`1` (default) | Whether to use the new Cloud Pods client leveraging LocalStack container's APIs. | -| `BIGDATA_MONO_CONTAINER` | 3.0.0 |`0`\|`1` (default) | Whether to spin Big Data services inside the LocalStack main container. Glue jobs breaks when using `BIGDATA_MONO_CONTAINER=0`. | +| `BIGDATA_MONO_CONTAINER` | 3.0.0 |`0`\|`1` (default) | Whether to spin Big Data services inside the LocalStack main container. Glue jobs breaks when using `BIGDATA_MONO_CONTAINER=0`. | | `DEFAULT_REGION` | 3.0.0 | `us-east-1` (default) | AWS region to use when talking to the API (needs to be activated via `USE_SINGLE_REGION=1`). LocalStack now has full multi-region support. | | `EDGE_BIND_HOST` | 3.0.0 | `127.0.0.1` (default), `0.0.0.0` (docker)| Address the edge service binds to. Use `GATEWAY_LISTEN` instead. | | `EDGE_FORWARD_URL` | 3.0.0 | `http://10.0.10.5678` | Optional target URL to forward all edge requests to (e.g., for distributed deployments) | diff --git a/content/en/references/coverage/_index.md b/content/en/references/coverage/_index.md index f2c30713ef..3e4f671c2d 100644 --- a/content/en/references/coverage/_index.md +++ b/content/en/references/coverage/_index.md @@ -18,7 +18,7 @@ function searchForServiceNameInLink() { var input, filter, div, elements, a, i, txtValue; input = document.getElementById('serviceNameCoverageInput'); filter = input.value.toUpperCase(); - div = document.getElementsByClassName('section-index')[0] + div = document.getElementsByClassName('section-index')(0) elements = div.getElementsByClassName('entry'); // Loop through all list items, and hide those who don't match the search query diff --git a/content/en/references/coverage/coverage_account/index.md b/content/en/references/coverage/coverage_account/index.md index ba948edb13..e8ac970b6f 100644 --- a/content/en/references/coverage/coverage_account/index.md +++ b/content/en/references/coverage/coverage_account/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="account" >}} ## Testing Details + {{< localstack_coverage_details service="account" >}} diff --git a/content/en/references/coverage/coverage_acm-pca/index.md b/content/en/references/coverage/coverage_acm-pca/index.md index bccaf32ee2..00ee7c6f7a 100644 --- a/content/en/references/coverage/coverage_acm-pca/index.md +++ b/content/en/references/coverage/coverage_acm-pca/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="acm-pca" >}} ## Testing Details + {{< localstack_coverage_details service="acm-pca" >}} diff --git a/content/en/references/coverage/coverage_acm/index.md b/content/en/references/coverage/coverage_acm/index.md index 83880babfb..eaf8627e6d 100644 --- a/content/en/references/coverage/coverage_acm/index.md +++ b/content/en/references/coverage/coverage_acm/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="acm" >}} ## Testing Details + {{< localstack_coverage_details service="acm" >}} diff --git a/content/en/references/coverage/coverage_amplify/index.md b/content/en/references/coverage/coverage_amplify/index.md index 1bc1befa04..c22d178e2e 100644 --- a/content/en/references/coverage/coverage_amplify/index.md +++ b/content/en/references/coverage/coverage_amplify/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="amplify" >}} ## Testing Details + {{< localstack_coverage_details service="amplify" >}} diff --git a/content/en/references/coverage/coverage_apigateway/index.md b/content/en/references/coverage/coverage_apigateway/index.md index e8a759da15..e9853b58b8 100644 --- a/content/en/references/coverage/coverage_apigateway/index.md +++ b/content/en/references/coverage/coverage_apigateway/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="apigateway" >}} ## Testing Details + {{< localstack_coverage_details service="apigateway" >}} diff --git a/content/en/references/coverage/coverage_apigatewaymanagementapi/index.md b/content/en/references/coverage/coverage_apigatewaymanagementapi/index.md index 08e63020d2..75671e74ca 100644 --- a/content/en/references/coverage/coverage_apigatewaymanagementapi/index.md +++ b/content/en/references/coverage/coverage_apigatewaymanagementapi/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="apigatewaymanagementapi" >}} ## Testing Details + {{< localstack_coverage_details service="apigatewaymanagementapi" >}} diff --git a/content/en/references/coverage/coverage_apigatewayv2/index.md b/content/en/references/coverage/coverage_apigatewayv2/index.md index 602bdda08f..da35a49e7c 100644 --- a/content/en/references/coverage/coverage_apigatewayv2/index.md +++ b/content/en/references/coverage/coverage_apigatewayv2/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="apigatewayv2" >}} ## Testing Details + {{< localstack_coverage_details service="apigatewayv2" >}} diff --git a/content/en/references/coverage/coverage_appconfig/index.md b/content/en/references/coverage/coverage_appconfig/index.md index 88b151cbf1..3bc202698e 100644 --- a/content/en/references/coverage/coverage_appconfig/index.md +++ b/content/en/references/coverage/coverage_appconfig/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="appconfig" >}} ## Testing Details + {{< localstack_coverage_details service="appconfig" >}} diff --git a/content/en/references/coverage/coverage_appconfigdata/index.md b/content/en/references/coverage/coverage_appconfigdata/index.md index cbe636ea14..4b3bb2e949 100644 --- a/content/en/references/coverage/coverage_appconfigdata/index.md +++ b/content/en/references/coverage/coverage_appconfigdata/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="appconfigdata" >}} ## Testing Details + {{< localstack_coverage_details service="appconfigdata" >}} diff --git a/content/en/references/coverage/coverage_application-autoscaling/index.md b/content/en/references/coverage/coverage_application-autoscaling/index.md index 9019113b22..097920d8db 100644 --- a/content/en/references/coverage/coverage_application-autoscaling/index.md +++ b/content/en/references/coverage/coverage_application-autoscaling/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="application-autoscaling" >}} ## Testing Details + {{< localstack_coverage_details service="application-autoscaling" >}} diff --git a/content/en/references/coverage/coverage_appsync/index.md b/content/en/references/coverage/coverage_appsync/index.md index 44395ecde8..4d8267c3d5 100644 --- a/content/en/references/coverage/coverage_appsync/index.md +++ b/content/en/references/coverage/coverage_appsync/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="appsync" >}} ## Testing Details + {{< localstack_coverage_details service="appsync" >}} diff --git a/content/en/references/coverage/coverage_athena/index.md b/content/en/references/coverage/coverage_athena/index.md index 879cc26e7b..f1b173a3be 100644 --- a/content/en/references/coverage/coverage_athena/index.md +++ b/content/en/references/coverage/coverage_athena/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="athena" >}} ## Testing Details + {{< localstack_coverage_details service="athena" >}} diff --git a/content/en/references/coverage/coverage_autoscaling/index.md b/content/en/references/coverage/coverage_autoscaling/index.md index 332510daf8..ac75ba3c12 100644 --- a/content/en/references/coverage/coverage_autoscaling/index.md +++ b/content/en/references/coverage/coverage_autoscaling/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="autoscaling" >}} ## Testing Details + {{< localstack_coverage_details service="autoscaling" >}} diff --git a/content/en/references/coverage/coverage_backup/index.md b/content/en/references/coverage/coverage_backup/index.md index a726d2a82c..3a1ff820ba 100644 --- a/content/en/references/coverage/coverage_backup/index.md +++ b/content/en/references/coverage/coverage_backup/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="backup" >}} ## Testing Details + {{< localstack_coverage_details service="backup" >}} diff --git a/content/en/references/coverage/coverage_batch/index.md b/content/en/references/coverage/coverage_batch/index.md index 54147dfee8..6721a03e95 100644 --- a/content/en/references/coverage/coverage_batch/index.md +++ b/content/en/references/coverage/coverage_batch/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="batch" >}} ## Testing Details + {{< localstack_coverage_details service="batch" >}} diff --git a/content/en/references/coverage/coverage_ce/index.md b/content/en/references/coverage/coverage_ce/index.md index 7903944c12..921013f34d 100644 --- a/content/en/references/coverage/coverage_ce/index.md +++ b/content/en/references/coverage/coverage_ce/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="ce" >}} ## Testing Details + {{< localstack_coverage_details service="ce" >}} diff --git a/content/en/references/coverage/coverage_cloudformation/index.md b/content/en/references/coverage/coverage_cloudformation/index.md index 180eeab28d..ee55e97da9 100644 --- a/content/en/references/coverage/coverage_cloudformation/index.md +++ b/content/en/references/coverage/coverage_cloudformation/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="cloudformation" >}} ## Testing Details + {{< localstack_coverage_details service="cloudformation" >}} diff --git a/content/en/references/coverage/coverage_cloudfront/index.md b/content/en/references/coverage/coverage_cloudfront/index.md index b5fb87d1b2..456f1094a1 100644 --- a/content/en/references/coverage/coverage_cloudfront/index.md +++ b/content/en/references/coverage/coverage_cloudfront/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="cloudfront" >}} ## Testing Details + {{< localstack_coverage_details service="cloudfront" >}} diff --git a/content/en/references/coverage/coverage_cloudtrail/index.md b/content/en/references/coverage/coverage_cloudtrail/index.md index 2dc5115b01..807ad54f1d 100644 --- a/content/en/references/coverage/coverage_cloudtrail/index.md +++ b/content/en/references/coverage/coverage_cloudtrail/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="cloudtrail" >}} ## Testing Details + {{< localstack_coverage_details service="cloudtrail" >}} diff --git a/content/en/references/coverage/coverage_cloudwatch/index.md b/content/en/references/coverage/coverage_cloudwatch/index.md index 7b8e976c40..f1245f890b 100644 --- a/content/en/references/coverage/coverage_cloudwatch/index.md +++ b/content/en/references/coverage/coverage_cloudwatch/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="cloudwatch" >}} ## Testing Details + {{< localstack_coverage_details service="cloudwatch" >}} diff --git a/content/en/references/coverage/coverage_codecommit/index.md b/content/en/references/coverage/coverage_codecommit/index.md index 9a3c846b06..5cfe58e9b5 100644 --- a/content/en/references/coverage/coverage_codecommit/index.md +++ b/content/en/references/coverage/coverage_codecommit/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="codecommit" >}} ## Testing Details + {{< localstack_coverage_details service="codecommit" >}} diff --git a/content/en/references/coverage/coverage_cognito-identity/index.md b/content/en/references/coverage/coverage_cognito-identity/index.md index b20a2cdc01..1f6adde57e 100644 --- a/content/en/references/coverage/coverage_cognito-identity/index.md +++ b/content/en/references/coverage/coverage_cognito-identity/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="cognito-identity" >}} ## Testing Details + {{< localstack_coverage_details service="cognito-identity" >}} diff --git a/content/en/references/coverage/coverage_cognito-idp/index.md b/content/en/references/coverage/coverage_cognito-idp/index.md index 811f648998..8f433d967d 100644 --- a/content/en/references/coverage/coverage_cognito-idp/index.md +++ b/content/en/references/coverage/coverage_cognito-idp/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="cognito-idp" >}} ## Testing Details + {{< localstack_coverage_details service="cognito-idp" >}} diff --git a/content/en/references/coverage/coverage_config/index.md b/content/en/references/coverage/coverage_config/index.md index 4257a2c520..6e2943f550 100644 --- a/content/en/references/coverage/coverage_config/index.md +++ b/content/en/references/coverage/coverage_config/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="config" >}} ## Testing Details + {{< localstack_coverage_details service="config" >}} diff --git a/content/en/references/coverage/coverage_dms/index.md b/content/en/references/coverage/coverage_dms/index.md index 09fda7837b..21c66a99e2 100644 --- a/content/en/references/coverage/coverage_dms/index.md +++ b/content/en/references/coverage/coverage_dms/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="dms" >}} ## Testing Details + {{< localstack_coverage_details service="dms" >}} diff --git a/content/en/references/coverage/coverage_docdb/index.md b/content/en/references/coverage/coverage_docdb/index.md index 0a22d0ae54..2fae2b5405 100644 --- a/content/en/references/coverage/coverage_docdb/index.md +++ b/content/en/references/coverage/coverage_docdb/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="docdb" >}} ## Testing Details + {{< localstack_coverage_details service="docdb" >}} diff --git a/content/en/references/coverage/coverage_dynamodb/index.md b/content/en/references/coverage/coverage_dynamodb/index.md index ef7bbdd441..661e0419de 100644 --- a/content/en/references/coverage/coverage_dynamodb/index.md +++ b/content/en/references/coverage/coverage_dynamodb/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="dynamodb" >}} ## Testing Details + {{< localstack_coverage_details service="dynamodb" >}} diff --git a/content/en/references/coverage/coverage_dynamodbstreams/index.md b/content/en/references/coverage/coverage_dynamodbstreams/index.md index 4cb2adbd9d..c0a4dc6f5c 100644 --- a/content/en/references/coverage/coverage_dynamodbstreams/index.md +++ b/content/en/references/coverage/coverage_dynamodbstreams/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="dynamodbstreams" >}} ## Testing Details + {{< localstack_coverage_details service="dynamodbstreams" >}} diff --git a/content/en/references/coverage/coverage_ec2/index.md b/content/en/references/coverage/coverage_ec2/index.md index 8161e65db1..74cd22f8db 100644 --- a/content/en/references/coverage/coverage_ec2/index.md +++ b/content/en/references/coverage/coverage_ec2/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="ec2" >}} ## Testing Details + {{< localstack_coverage_details service="ec2" >}} diff --git a/content/en/references/coverage/coverage_ecr/index.md b/content/en/references/coverage/coverage_ecr/index.md index c304057805..dfc028d0ae 100644 --- a/content/en/references/coverage/coverage_ecr/index.md +++ b/content/en/references/coverage/coverage_ecr/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="ecr" >}} ## Testing Details + {{< localstack_coverage_details service="ecr" >}} diff --git a/content/en/references/coverage/coverage_ecs/index.md b/content/en/references/coverage/coverage_ecs/index.md index abbc197672..c49670afb6 100644 --- a/content/en/references/coverage/coverage_ecs/index.md +++ b/content/en/references/coverage/coverage_ecs/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="ecs" >}} ## Testing Details + {{< localstack_coverage_details service="ecs" >}} diff --git a/content/en/references/coverage/coverage_efs/index.md b/content/en/references/coverage/coverage_efs/index.md index 36c54330dd..c6afc095f7 100644 --- a/content/en/references/coverage/coverage_efs/index.md +++ b/content/en/references/coverage/coverage_efs/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="efs" >}} ## Testing Details + {{< localstack_coverage_details service="efs" >}} diff --git a/content/en/references/coverage/coverage_eks/index.md b/content/en/references/coverage/coverage_eks/index.md index 07d2ddad47..74bc35218b 100644 --- a/content/en/references/coverage/coverage_eks/index.md +++ b/content/en/references/coverage/coverage_eks/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="eks" >}} ## Testing Details + {{< localstack_coverage_details service="eks" >}} diff --git a/content/en/references/coverage/coverage_elasticache/index.md b/content/en/references/coverage/coverage_elasticache/index.md index e99d6bfc84..29a8571182 100644 --- a/content/en/references/coverage/coverage_elasticache/index.md +++ b/content/en/references/coverage/coverage_elasticache/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="elasticache" >}} ## Testing Details + {{< localstack_coverage_details service="elasticache" >}} diff --git a/content/en/references/coverage/coverage_elasticbeanstalk/index.md b/content/en/references/coverage/coverage_elasticbeanstalk/index.md index 255696e9e6..e7e72880ee 100644 --- a/content/en/references/coverage/coverage_elasticbeanstalk/index.md +++ b/content/en/references/coverage/coverage_elasticbeanstalk/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="elasticbeanstalk" >}} ## Testing Details + {{< localstack_coverage_details service="elasticbeanstalk" >}} diff --git a/content/en/references/coverage/coverage_elastictranscoder/index.md b/content/en/references/coverage/coverage_elastictranscoder/index.md index 9340b111c6..711e6cf1b3 100644 --- a/content/en/references/coverage/coverage_elastictranscoder/index.md +++ b/content/en/references/coverage/coverage_elastictranscoder/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="elastictranscoder" >}} ## Testing Details + {{< localstack_coverage_details service="elastictranscoder" >}} diff --git a/content/en/references/coverage/coverage_elb/index.md b/content/en/references/coverage/coverage_elb/index.md index b0fc74eb27..1af341e56c 100644 --- a/content/en/references/coverage/coverage_elb/index.md +++ b/content/en/references/coverage/coverage_elb/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="elb" >}} ## Testing Details + {{< localstack_coverage_details service="elb" >}} diff --git a/content/en/references/coverage/coverage_elbv2/index.md b/content/en/references/coverage/coverage_elbv2/index.md index 815a45c6fa..47a6e737a5 100644 --- a/content/en/references/coverage/coverage_elbv2/index.md +++ b/content/en/references/coverage/coverage_elbv2/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="elbv2" >}} ## Testing Details + {{< localstack_coverage_details service="elbv2" >}} diff --git a/content/en/references/coverage/coverage_emr-serverless/index.md b/content/en/references/coverage/coverage_emr-serverless/index.md index 6ddb11f4f1..3d2bc4a4cf 100644 --- a/content/en/references/coverage/coverage_emr-serverless/index.md +++ b/content/en/references/coverage/coverage_emr-serverless/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="emr-serverless" >}} ## Testing Details + {{< localstack_coverage_details service="emr-serverless" >}} diff --git a/content/en/references/coverage/coverage_emr/index.md b/content/en/references/coverage/coverage_emr/index.md index d138d9a6dd..b0dca11d2e 100644 --- a/content/en/references/coverage/coverage_emr/index.md +++ b/content/en/references/coverage/coverage_emr/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="emr" >}} ## Testing Details + {{< localstack_coverage_details service="emr" >}} diff --git a/content/en/references/coverage/coverage_es/index.md b/content/en/references/coverage/coverage_es/index.md index e6340721ef..0ae98c114c 100644 --- a/content/en/references/coverage/coverage_es/index.md +++ b/content/en/references/coverage/coverage_es/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="es" >}} ## Testing Details + {{< localstack_coverage_details service="es" >}} diff --git a/content/en/references/coverage/coverage_events/index.md b/content/en/references/coverage/coverage_events/index.md index d94a12697e..9c20fd47d0 100644 --- a/content/en/references/coverage/coverage_events/index.md +++ b/content/en/references/coverage/coverage_events/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="events" >}} ## Testing Details + {{< localstack_coverage_details service="events" >}} diff --git a/content/en/references/coverage/coverage_firehose/index.md b/content/en/references/coverage/coverage_firehose/index.md index 0e2b98a595..1932efce2b 100644 --- a/content/en/references/coverage/coverage_firehose/index.md +++ b/content/en/references/coverage/coverage_firehose/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="firehose" >}} ## Testing Details + {{< localstack_coverage_details service="firehose" >}} diff --git a/content/en/references/coverage/coverage_fis/index.md b/content/en/references/coverage/coverage_fis/index.md index dcb5cbc753..7d5d019bf9 100644 --- a/content/en/references/coverage/coverage_fis/index.md +++ b/content/en/references/coverage/coverage_fis/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="fis" >}} ## Testing Details + {{< localstack_coverage_details service="fis" >}} diff --git a/content/en/references/coverage/coverage_glacier/index.md b/content/en/references/coverage/coverage_glacier/index.md index 413d95c959..81b32ccf38 100644 --- a/content/en/references/coverage/coverage_glacier/index.md +++ b/content/en/references/coverage/coverage_glacier/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="glacier" >}} ## Testing Details + {{< localstack_coverage_details service="glacier" >}} diff --git a/content/en/references/coverage/coverage_glue/index.md b/content/en/references/coverage/coverage_glue/index.md index af4663bffc..e964e1e86a 100644 --- a/content/en/references/coverage/coverage_glue/index.md +++ b/content/en/references/coverage/coverage_glue/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="glue" >}} ## Testing Details + {{< localstack_coverage_details service="glue" >}} diff --git a/content/en/references/coverage/coverage_iam/index.md b/content/en/references/coverage/coverage_iam/index.md index d6d655bc4a..28409996a0 100644 --- a/content/en/references/coverage/coverage_iam/index.md +++ b/content/en/references/coverage/coverage_iam/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="iam" >}} ## Testing Details + {{< localstack_coverage_details service="iam" >}} diff --git a/content/en/references/coverage/coverage_identitystore/index.md b/content/en/references/coverage/coverage_identitystore/index.md index 35555771d6..34eda20d33 100644 --- a/content/en/references/coverage/coverage_identitystore/index.md +++ b/content/en/references/coverage/coverage_identitystore/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="identitystore" >}} ## Testing Details + {{< localstack_coverage_details service="identitystore" >}} diff --git a/content/en/references/coverage/coverage_iot-data/index.md b/content/en/references/coverage/coverage_iot-data/index.md index 60328c560d..82f9db0acf 100644 --- a/content/en/references/coverage/coverage_iot-data/index.md +++ b/content/en/references/coverage/coverage_iot-data/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="iot-data" >}} ## Testing Details + {{< localstack_coverage_details service="iot-data" >}} diff --git a/content/en/references/coverage/coverage_iot/index.md b/content/en/references/coverage/coverage_iot/index.md index 3839604003..301fe1be4a 100644 --- a/content/en/references/coverage/coverage_iot/index.md +++ b/content/en/references/coverage/coverage_iot/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="iot" >}} ## Testing Details + {{< localstack_coverage_details service="iot" >}} diff --git a/content/en/references/coverage/coverage_iotanalytics/index.md b/content/en/references/coverage/coverage_iotanalytics/index.md index 2f00f0a8d5..8ccd2690a3 100644 --- a/content/en/references/coverage/coverage_iotanalytics/index.md +++ b/content/en/references/coverage/coverage_iotanalytics/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="iotanalytics" >}} ## Testing Details + {{< localstack_coverage_details service="iotanalytics" >}} diff --git a/content/en/references/coverage/coverage_iotwireless/index.md b/content/en/references/coverage/coverage_iotwireless/index.md index 19ffd8a6c8..8774fbf8e3 100644 --- a/content/en/references/coverage/coverage_iotwireless/index.md +++ b/content/en/references/coverage/coverage_iotwireless/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="iotwireless" >}} ## Testing Details + {{< localstack_coverage_details service="iotwireless" >}} diff --git a/content/en/references/coverage/coverage_kafka/index.md b/content/en/references/coverage/coverage_kafka/index.md index 393b97e69a..bed3e6e40b 100644 --- a/content/en/references/coverage/coverage_kafka/index.md +++ b/content/en/references/coverage/coverage_kafka/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="kafka" >}} ## Testing Details + {{< localstack_coverage_details service="kafka" >}} diff --git a/content/en/references/coverage/coverage_kinesis/index.md b/content/en/references/coverage/coverage_kinesis/index.md index a86a8c65a9..1f3ab9b445 100644 --- a/content/en/references/coverage/coverage_kinesis/index.md +++ b/content/en/references/coverage/coverage_kinesis/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="kinesis" >}} ## Testing Details + {{< localstack_coverage_details service="kinesis" >}} diff --git a/content/en/references/coverage/coverage_kinesisanalytics/index.md b/content/en/references/coverage/coverage_kinesisanalytics/index.md index e13f888f91..70f1fd618a 100644 --- a/content/en/references/coverage/coverage_kinesisanalytics/index.md +++ b/content/en/references/coverage/coverage_kinesisanalytics/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="kinesisanalytics" >}} ## Testing Details + {{< localstack_coverage_details service="kinesisanalytics" >}} diff --git a/content/en/references/coverage/coverage_kinesisanalyticsv2/index.md b/content/en/references/coverage/coverage_kinesisanalyticsv2/index.md index c0c07e60f0..bb6c5024dc 100644 --- a/content/en/references/coverage/coverage_kinesisanalyticsv2/index.md +++ b/content/en/references/coverage/coverage_kinesisanalyticsv2/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="kinesisanalyticsv2" >}} ## Testing Details + {{< localstack_coverage_details service="kinesisanalyticsv2" >}} diff --git a/content/en/references/coverage/coverage_kms/index.md b/content/en/references/coverage/coverage_kms/index.md index 33842d678f..0f7f880c45 100644 --- a/content/en/references/coverage/coverage_kms/index.md +++ b/content/en/references/coverage/coverage_kms/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="kms" >}} ## Testing Details + {{< localstack_coverage_details service="kms" >}} diff --git a/content/en/references/coverage/coverage_lakeformation/index.md b/content/en/references/coverage/coverage_lakeformation/index.md index 3311c8cdae..5e0afd4302 100644 --- a/content/en/references/coverage/coverage_lakeformation/index.md +++ b/content/en/references/coverage/coverage_lakeformation/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="lakeformation" >}} ## Testing Details + {{< localstack_coverage_details service="lakeformation" >}} diff --git a/content/en/references/coverage/coverage_lambda/index.md b/content/en/references/coverage/coverage_lambda/index.md index d41793a91d..9aaf6dc469 100644 --- a/content/en/references/coverage/coverage_lambda/index.md +++ b/content/en/references/coverage/coverage_lambda/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="lambda" >}} ## Testing Details + {{< localstack_coverage_details service="lambda" >}} diff --git a/content/en/references/coverage/coverage_logs/index.md b/content/en/references/coverage/coverage_logs/index.md index cc3a3041d0..db807ce18d 100644 --- a/content/en/references/coverage/coverage_logs/index.md +++ b/content/en/references/coverage/coverage_logs/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="logs" >}} ## Testing Details + {{< localstack_coverage_details service="logs" >}} diff --git a/content/en/references/coverage/coverage_managedblockchain/index.md b/content/en/references/coverage/coverage_managedblockchain/index.md index e0ca9ae908..8580912ebd 100644 --- a/content/en/references/coverage/coverage_managedblockchain/index.md +++ b/content/en/references/coverage/coverage_managedblockchain/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="managedblockchain" >}} ## Testing Details + {{< localstack_coverage_details service="managedblockchain" >}} diff --git a/content/en/references/coverage/coverage_mediastore-data/index.md b/content/en/references/coverage/coverage_mediastore-data/index.md index dd64f34f51..bc091b3df1 100644 --- a/content/en/references/coverage/coverage_mediastore-data/index.md +++ b/content/en/references/coverage/coverage_mediastore-data/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="mediastore-data" >}} ## Testing Details + {{< localstack_coverage_details service="mediastore-data" >}} diff --git a/content/en/references/coverage/coverage_mediastore/index.md b/content/en/references/coverage/coverage_mediastore/index.md index 9b1d83f2cb..e68f12b4f8 100644 --- a/content/en/references/coverage/coverage_mediastore/index.md +++ b/content/en/references/coverage/coverage_mediastore/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="mediastore" >}} ## Testing Details + {{< localstack_coverage_details service="mediastore" >}} diff --git a/content/en/references/coverage/coverage_memorydb/index.md b/content/en/references/coverage/coverage_memorydb/index.md index 5bd9a49476..b0776a3c5c 100644 --- a/content/en/references/coverage/coverage_memorydb/index.md +++ b/content/en/references/coverage/coverage_memorydb/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="memorydb" >}} ## Testing Details + {{< localstack_coverage_details service="memorydb" >}} diff --git a/content/en/references/coverage/coverage_mq/index.md b/content/en/references/coverage/coverage_mq/index.md index 9772b21516..62ff8ecc47 100644 --- a/content/en/references/coverage/coverage_mq/index.md +++ b/content/en/references/coverage/coverage_mq/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="mq" >}} ## Testing Details + {{< localstack_coverage_details service="mq" >}} diff --git a/content/en/references/coverage/coverage_mwaa/index.md b/content/en/references/coverage/coverage_mwaa/index.md index b5b4c4178b..4d2015f842 100644 --- a/content/en/references/coverage/coverage_mwaa/index.md +++ b/content/en/references/coverage/coverage_mwaa/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="mwaa" >}} ## Testing Details + {{< localstack_coverage_details service="mwaa" >}} diff --git a/content/en/references/coverage/coverage_neptune/index.md b/content/en/references/coverage/coverage_neptune/index.md index 6873773635..e8d6fcea1f 100644 --- a/content/en/references/coverage/coverage_neptune/index.md +++ b/content/en/references/coverage/coverage_neptune/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="neptune" >}} ## Testing Details + {{< localstack_coverage_details service="neptune" >}} diff --git a/content/en/references/coverage/coverage_opensearch/index.md b/content/en/references/coverage/coverage_opensearch/index.md index a53b570c38..a33238b9ec 100644 --- a/content/en/references/coverage/coverage_opensearch/index.md +++ b/content/en/references/coverage/coverage_opensearch/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="opensearch" >}} ## Testing Details + {{< localstack_coverage_details service="opensearch" >}} diff --git a/content/en/references/coverage/coverage_organizations/index.md b/content/en/references/coverage/coverage_organizations/index.md index 9f43c0689b..5560de5706 100644 --- a/content/en/references/coverage/coverage_organizations/index.md +++ b/content/en/references/coverage/coverage_organizations/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="organizations" >}} ## Testing Details + {{< localstack_coverage_details service="organizations" >}} diff --git a/content/en/references/coverage/coverage_pinpoint/index.md b/content/en/references/coverage/coverage_pinpoint/index.md index 8e97f60974..6dd8aa569b 100644 --- a/content/en/references/coverage/coverage_pinpoint/index.md +++ b/content/en/references/coverage/coverage_pinpoint/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="pinpoint" >}} ## Testing Details + {{< localstack_coverage_details service="pinpoint" >}} diff --git a/content/en/references/coverage/coverage_pipes/index.md b/content/en/references/coverage/coverage_pipes/index.md index 0c283cc819..b3f56065f4 100644 --- a/content/en/references/coverage/coverage_pipes/index.md +++ b/content/en/references/coverage/coverage_pipes/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="pipes" >}} ## Testing Details + {{< localstack_coverage_details service="pipes" >}} diff --git a/content/en/references/coverage/coverage_qldb-session/index.md b/content/en/references/coverage/coverage_qldb-session/index.md index b935879e3e..ec09cbd8b6 100644 --- a/content/en/references/coverage/coverage_qldb-session/index.md +++ b/content/en/references/coverage/coverage_qldb-session/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="qldb-session" >}} ## Testing Details + {{< localstack_coverage_details service="qldb-session" >}} diff --git a/content/en/references/coverage/coverage_qldb/index.md b/content/en/references/coverage/coverage_qldb/index.md index 87a79395ae..6b1f76ebc0 100644 --- a/content/en/references/coverage/coverage_qldb/index.md +++ b/content/en/references/coverage/coverage_qldb/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="qldb" >}} ## Testing Details + {{< localstack_coverage_details service="qldb" >}} diff --git a/content/en/references/coverage/coverage_ram/index.md b/content/en/references/coverage/coverage_ram/index.md index 8a56988fcf..84a7fe99fc 100644 --- a/content/en/references/coverage/coverage_ram/index.md +++ b/content/en/references/coverage/coverage_ram/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="ram" >}} ## Testing Details + {{< localstack_coverage_details service="ram" >}} diff --git a/content/en/references/coverage/coverage_rds-data/index.md b/content/en/references/coverage/coverage_rds-data/index.md index 270559d429..daf36a7460 100644 --- a/content/en/references/coverage/coverage_rds-data/index.md +++ b/content/en/references/coverage/coverage_rds-data/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="rds-data" >}} ## Testing Details + {{< localstack_coverage_details service="rds-data" >}} diff --git a/content/en/references/coverage/coverage_rds/index.md b/content/en/references/coverage/coverage_rds/index.md index 37739b3cd7..bdcb729965 100644 --- a/content/en/references/coverage/coverage_rds/index.md +++ b/content/en/references/coverage/coverage_rds/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="rds" >}} ## Testing Details + {{< localstack_coverage_details service="rds" >}} diff --git a/content/en/references/coverage/coverage_redshift-data/index.md b/content/en/references/coverage/coverage_redshift-data/index.md index 4ed5733c43..bc73e337d0 100644 --- a/content/en/references/coverage/coverage_redshift-data/index.md +++ b/content/en/references/coverage/coverage_redshift-data/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="redshift-data" >}} ## Testing Details + {{< localstack_coverage_details service="redshift-data" >}} diff --git a/content/en/references/coverage/coverage_redshift/index.md b/content/en/references/coverage/coverage_redshift/index.md index 349816be74..59522b7734 100644 --- a/content/en/references/coverage/coverage_redshift/index.md +++ b/content/en/references/coverage/coverage_redshift/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="redshift" >}} ## Testing Details + {{< localstack_coverage_details service="redshift" >}} diff --git a/content/en/references/coverage/coverage_resource-groups/index.md b/content/en/references/coverage/coverage_resource-groups/index.md index 0b89c6ca4a..a6cabefdd3 100644 --- a/content/en/references/coverage/coverage_resource-groups/index.md +++ b/content/en/references/coverage/coverage_resource-groups/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="resource-groups" >}} ## Testing Details + {{< localstack_coverage_details service="resource-groups" >}} diff --git a/content/en/references/coverage/coverage_resourcegroupstaggingapi/index.md b/content/en/references/coverage/coverage_resourcegroupstaggingapi/index.md index 1a6163ec04..06c9f070e8 100644 --- a/content/en/references/coverage/coverage_resourcegroupstaggingapi/index.md +++ b/content/en/references/coverage/coverage_resourcegroupstaggingapi/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="resourcegroupstaggingapi" >}} ## Testing Details + {{< localstack_coverage_details service="resourcegroupstaggingapi" >}} diff --git a/content/en/references/coverage/coverage_route53/index.md b/content/en/references/coverage/coverage_route53/index.md index 2a0663e61d..1d4010cd60 100644 --- a/content/en/references/coverage/coverage_route53/index.md +++ b/content/en/references/coverage/coverage_route53/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="route53" >}} ## Testing Details + {{< localstack_coverage_details service="route53" >}} diff --git a/content/en/references/coverage/coverage_route53resolver/index.md b/content/en/references/coverage/coverage_route53resolver/index.md index 709fcb4727..45b34447db 100644 --- a/content/en/references/coverage/coverage_route53resolver/index.md +++ b/content/en/references/coverage/coverage_route53resolver/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="route53resolver" >}} ## Testing Details + {{< localstack_coverage_details service="route53resolver" >}} diff --git a/content/en/references/coverage/coverage_s3/index.md b/content/en/references/coverage/coverage_s3/index.md index 16e7457dc1..1d6c974c96 100644 --- a/content/en/references/coverage/coverage_s3/index.md +++ b/content/en/references/coverage/coverage_s3/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="s3" >}} ## Testing Details + {{< localstack_coverage_details service="s3" >}} diff --git a/content/en/references/coverage/coverage_s3control/index.md b/content/en/references/coverage/coverage_s3control/index.md index 3e37f409f8..32ed32fe3b 100644 --- a/content/en/references/coverage/coverage_s3control/index.md +++ b/content/en/references/coverage/coverage_s3control/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="s3control" >}} ## Testing Details + {{< localstack_coverage_details service="s3control" >}} diff --git a/content/en/references/coverage/coverage_sagemaker-runtime/index.md b/content/en/references/coverage/coverage_sagemaker-runtime/index.md index 476f902a17..6ca547c1d7 100644 --- a/content/en/references/coverage/coverage_sagemaker-runtime/index.md +++ b/content/en/references/coverage/coverage_sagemaker-runtime/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="sagemaker-runtime" >}} ## Testing Details + {{< localstack_coverage_details service="sagemaker-runtime" >}} diff --git a/content/en/references/coverage/coverage_sagemaker/index.md b/content/en/references/coverage/coverage_sagemaker/index.md index be4674140f..05e00bce92 100644 --- a/content/en/references/coverage/coverage_sagemaker/index.md +++ b/content/en/references/coverage/coverage_sagemaker/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="sagemaker" >}} ## Testing Details + {{< localstack_coverage_details service="sagemaker" >}} diff --git a/content/en/references/coverage/coverage_scheduler/index.md b/content/en/references/coverage/coverage_scheduler/index.md index 8c5135bfa9..f9447c96b2 100644 --- a/content/en/references/coverage/coverage_scheduler/index.md +++ b/content/en/references/coverage/coverage_scheduler/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="scheduler" >}} ## Testing Details + {{< localstack_coverage_details service="scheduler" >}} diff --git a/content/en/references/coverage/coverage_secretsmanager/index.md b/content/en/references/coverage/coverage_secretsmanager/index.md index 3c7f9e2545..a7b080e0fc 100644 --- a/content/en/references/coverage/coverage_secretsmanager/index.md +++ b/content/en/references/coverage/coverage_secretsmanager/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="secretsmanager" >}} ## Testing Details + {{< localstack_coverage_details service="secretsmanager" >}} diff --git a/content/en/references/coverage/coverage_serverlessrepo/index.md b/content/en/references/coverage/coverage_serverlessrepo/index.md index ca5d577332..695a29470b 100644 --- a/content/en/references/coverage/coverage_serverlessrepo/index.md +++ b/content/en/references/coverage/coverage_serverlessrepo/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="serverlessrepo" >}} ## Testing Details + {{< localstack_coverage_details service="serverlessrepo" >}} diff --git a/content/en/references/coverage/coverage_servicediscovery/index.md b/content/en/references/coverage/coverage_servicediscovery/index.md index 414c4c326f..abda81fd4f 100644 --- a/content/en/references/coverage/coverage_servicediscovery/index.md +++ b/content/en/references/coverage/coverage_servicediscovery/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="servicediscovery" >}} ## Testing Details + {{< localstack_coverage_details service="servicediscovery" >}} diff --git a/content/en/references/coverage/coverage_ses/index.md b/content/en/references/coverage/coverage_ses/index.md index 24d8701895..ce2d28510b 100644 --- a/content/en/references/coverage/coverage_ses/index.md +++ b/content/en/references/coverage/coverage_ses/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="ses" >}} ## Testing Details + {{< localstack_coverage_details service="ses" >}} diff --git a/content/en/references/coverage/coverage_sesv2/index.md b/content/en/references/coverage/coverage_sesv2/index.md index af779b9e3b..717fd83e46 100644 --- a/content/en/references/coverage/coverage_sesv2/index.md +++ b/content/en/references/coverage/coverage_sesv2/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="sesv2" >}} ## Testing Details + {{< localstack_coverage_details service="sesv2" >}} diff --git a/content/en/references/coverage/coverage_sns/index.md b/content/en/references/coverage/coverage_sns/index.md index 501c805986..59ff0ffa31 100644 --- a/content/en/references/coverage/coverage_sns/index.md +++ b/content/en/references/coverage/coverage_sns/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="sns" >}} ## Testing Details + {{< localstack_coverage_details service="sns" >}} diff --git a/content/en/references/coverage/coverage_sqs/index.md b/content/en/references/coverage/coverage_sqs/index.md index 87618fd860..e7c12a3eaf 100644 --- a/content/en/references/coverage/coverage_sqs/index.md +++ b/content/en/references/coverage/coverage_sqs/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="sqs" >}} ## Testing Details + {{< localstack_coverage_details service="sqs" >}} diff --git a/content/en/references/coverage/coverage_ssm/index.md b/content/en/references/coverage/coverage_ssm/index.md index cc71600c51..3213244dab 100644 --- a/content/en/references/coverage/coverage_ssm/index.md +++ b/content/en/references/coverage/coverage_ssm/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="ssm" >}} ## Testing Details + {{< localstack_coverage_details service="ssm" >}} diff --git a/content/en/references/coverage/coverage_sso-admin/index.md b/content/en/references/coverage/coverage_sso-admin/index.md index 5eac825dec..cf1311da03 100644 --- a/content/en/references/coverage/coverage_sso-admin/index.md +++ b/content/en/references/coverage/coverage_sso-admin/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="sso-admin" >}} ## Testing Details + {{< localstack_coverage_details service="sso-admin" >}} diff --git a/content/en/references/coverage/coverage_stepfunctions/index.md b/content/en/references/coverage/coverage_stepfunctions/index.md index a8ec76b871..1a61305e0c 100644 --- a/content/en/references/coverage/coverage_stepfunctions/index.md +++ b/content/en/references/coverage/coverage_stepfunctions/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="stepfunctions" >}} ## Testing Details + {{< localstack_coverage_details service="stepfunctions" >}} diff --git a/content/en/references/coverage/coverage_sts/index.md b/content/en/references/coverage/coverage_sts/index.md index ae0462616e..8aaadfe15d 100644 --- a/content/en/references/coverage/coverage_sts/index.md +++ b/content/en/references/coverage/coverage_sts/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="sts" >}} ## Testing Details + {{< localstack_coverage_details service="sts" >}} diff --git a/content/en/references/coverage/coverage_support/index.md b/content/en/references/coverage/coverage_support/index.md index 4e18f10f14..1a42cac65f 100644 --- a/content/en/references/coverage/coverage_support/index.md +++ b/content/en/references/coverage/coverage_support/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="support" >}} ## Testing Details + {{< localstack_coverage_details service="support" >}} diff --git a/content/en/references/coverage/coverage_swf/index.md b/content/en/references/coverage/coverage_swf/index.md index 94b256c3f9..31e84cf996 100644 --- a/content/en/references/coverage/coverage_swf/index.md +++ b/content/en/references/coverage/coverage_swf/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="swf" >}} ## Testing Details + {{< localstack_coverage_details service="swf" >}} diff --git a/content/en/references/coverage/coverage_textract/index.md b/content/en/references/coverage/coverage_textract/index.md index 7af74455b5..99296918e6 100644 --- a/content/en/references/coverage/coverage_textract/index.md +++ b/content/en/references/coverage/coverage_textract/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="textract" >}} ## Testing Details + {{< localstack_coverage_details service="textract" >}} diff --git a/content/en/references/coverage/coverage_timestream-query/index.md b/content/en/references/coverage/coverage_timestream-query/index.md index d6a020911b..e0b451397c 100644 --- a/content/en/references/coverage/coverage_timestream-query/index.md +++ b/content/en/references/coverage/coverage_timestream-query/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="timestream-query" >}} ## Testing Details + {{< localstack_coverage_details service="timestream-query" >}} diff --git a/content/en/references/coverage/coverage_timestream-write/index.md b/content/en/references/coverage/coverage_timestream-write/index.md index d0a8c90442..19e864ab70 100644 --- a/content/en/references/coverage/coverage_timestream-write/index.md +++ b/content/en/references/coverage/coverage_timestream-write/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="timestream-write" >}} ## Testing Details + {{< localstack_coverage_details service="timestream-write" >}} diff --git a/content/en/references/coverage/coverage_transcribe/index.md b/content/en/references/coverage/coverage_transcribe/index.md index 13bb4afcf4..1d4c24b408 100644 --- a/content/en/references/coverage/coverage_transcribe/index.md +++ b/content/en/references/coverage/coverage_transcribe/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="transcribe" >}} ## Testing Details + {{< localstack_coverage_details service="transcribe" >}} diff --git a/content/en/references/coverage/coverage_transfer/index.md b/content/en/references/coverage/coverage_transfer/index.md index 88ec35a7c7..be0cb21de9 100644 --- a/content/en/references/coverage/coverage_transfer/index.md +++ b/content/en/references/coverage/coverage_transfer/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="transfer" >}} ## Testing Details + {{< localstack_coverage_details service="transfer" >}} diff --git a/content/en/references/coverage/coverage_wafv2/index.md b/content/en/references/coverage/coverage_wafv2/index.md index 3758bf7075..b356f81660 100644 --- a/content/en/references/coverage/coverage_wafv2/index.md +++ b/content/en/references/coverage/coverage_wafv2/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="wafv2" >}} ## Testing Details + {{< localstack_coverage_details service="wafv2" >}} diff --git a/content/en/references/coverage/coverage_xray/index.md b/content/en/references/coverage/coverage_xray/index.md index 9335c19e86..0ea58574fa 100644 --- a/content/en/references/coverage/coverage_xray/index.md +++ b/content/en/references/coverage/coverage_xray/index.md @@ -7,7 +7,9 @@ hide_readingtime: true --- ## Coverage Overview + {{< localstack_coverage_table service="xray" >}} ## Testing Details + {{< localstack_coverage_details service="xray" >}} diff --git a/content/en/references/cross-account-access.md b/content/en/references/cross-account-access.md index e033aac7e9..d7457d8b5d 100644 --- a/content/en/references/cross-account-access.md +++ b/content/en/references/cross-account-access.md @@ -18,40 +18,35 @@ Please report any issues on our [GitHub issue tracker](https://github.com/locals Cross-account/cross-region access happens when a client attempts to access a resource in another account or region than what it is configured with: {{< command >}} -# # Create a queue in one account and region -# - $ AWS_ACCESS_KEY_ID=111111111111 awslocal sqs create-queue \ --queue-name my-queue \ --region ap-south-1 + { "QueueUrl": "http://sqs.ap-south-1.localhost.localstack.cloud:443/111111111111/my-queue" } + -# # Set some attributes -# - $ AWS_ACCESS_KEY_ID=111111111111 awslocal sqs set-queue-attributes \ --attributes VisibilityTimeout=60 \ --queue-url http://sqs.ap-south-1.localhost.localstack.cloud:443/111111111111/my-queue \ - --region ap-south-1 - -# -# Retrieve the queue attribute from another account and region. -# The required information for LocalStack to locate the queue is available in the queue URL. -# + --region ap-south-1 +# Retrieve the queue attribute from another account and region +# The required information for LocalStack to locate the queue is available in the queue URL $ AWS_ACCESS_KEY_ID=222222222222 awslocal sqs get-queue-attributes \ --attribute-names VisibilityTimeout \ --region eu-central-1 \ --queue-url http://sqs.ap-south-1.localhost.localstack.cloud:443/111111111111/my-queue + { "Attributes": { "VisibilityTimeout": "60" } } + {{< /command >}} ## Cross-Account diff --git a/content/en/references/custom-tls-certificates.md b/content/en/references/custom-tls-certificates.md index 9e007cd6c4..8be4d091f6 100644 --- a/content/en/references/custom-tls-certificates.md +++ b/content/en/references/custom-tls-certificates.md @@ -24,7 +24,7 @@ They all can be summarised as: 1. get your proxy's custom certificate into the system certificate store, and 2. configure [`requests`](https://pypi.python.org/pypi/requests) to use the custom certificate, -3. configure [`curl`](https://curl.se/) to use the custom certificate, and +3. configure [`curl`](https://curl.se/) to use the custom certificate, and 4. configure [`node.js`](https://nodejs.org/) to use the custom certificate. ## Creating a custom docker image @@ -53,12 +53,13 @@ $ docker build -t . {{< callout "tip" >}} Certificate files must end in `.crt` to be included in the system certificate store. -If your certificate file ends with `.pem`, you can rename it to end in `.crt`. +If your certificate file ends with `.pem`, you can rename it to end in `.crt`. {{< /callout >}} ### Starting LocalStack with the custom image -LocalStack now needs to be configured to use this custom image. The workflow is different depending on how you start localstack. +LocalStack now needs to be configured to use this custom image. +The workflow is different depending on how you start localstack. {{< tabpane lang="bash">}} {{< tab header="CLI" lang="bash" >}} @@ -77,7 +78,8 @@ services: ## Custom TLS certificates with init hooks -It is recommended to create a `boot` init hook. Create a directory on your local system that includes +It is recommended to create a `boot` init hook. +Create a directory on your local system that includes * the certificate you wish to copy, and * the following shell script: @@ -103,11 +105,14 @@ and follow the instructions fn the [init hooks documentation]({{< ref "init-hook ### Linux -On linux the custom certificate should be added to your `ca-certificates` bundle. For example on Debian based systems (as root): +On linux the custom certificate should be added to your `ca-certificates` bundle. +For example on Debian based systems (as root): {{< command >}} # cp /usr/local/share/ca-certificates + # update-ca-certificates + {{< / command >}} Then run LocalStack with the environment variables `REQUESTS_CA_BUNDLE`, `CURL_CA_BUNDLE`, and `NODE_EXTRA_CA_CERTS``: @@ -121,7 +126,8 @@ $ NODE_EXTRA_CA_CERTS=/etc/ssl/certs/ca-certificates.crt \ ### macOS -On macOS the custom certificate should be added to your keychain. See [this Apple support article](https://support.apple.com/en-gb/guide/keychain-access/kyca2431/mac) for more information. +On macOS the custom certificate should be added to your keychain. +See [this Apple support article](https://support.apple.com/en-gb/guide/keychain-access/kyca2431/mac) for more information. Then run LocalStack with the environment variables `REQUESTS_CA_BUNDLE`, `CURL_CA_BUNDLE`, and `NODE_EXTRA_CA_CERTS``: @@ -134,4 +140,5 @@ $ NODE_EXTRA_CA_CERTS=/etc/ssl/certs/ca-certificates.crt \ ### Windows -Currently host mode does not work with Windows. If you are using WSL2 you should follow the [Linux]({{< ref "#linux" >}}) steps above. +Currently host mode does not work with Windows. +If you are using WSL2 you should follow the [Linux]({{< ref "#linux" >}}) steps above. diff --git a/content/en/references/docker-images.md b/content/en/references/docker-images.md index cb9d6ce36f..c904e0f95d 100644 --- a/content/en/references/docker-images.md +++ b/content/en/references/docker-images.md @@ -7,40 +7,59 @@ description: > Overview of LocalStack Docker images and their purpose --- -LocalStack functions as a local “mini-cloud” operating system that runs inside a Docker container. LocalStack has multiple components, which include process management, file system abstraction, event processing, schedulers, and more. Running inside a Docker container, LocalStack exposes external network ports for integrations, SDKs, or CLI interfaces to connect to LocalStack APIs. The LocalStack & LocalStack Pro Docker images have been downloaded over 130+ million times and provide a multi-arch build compatible with AMD/x86 and ARM-based CPU architectures. This section will cover the different Docker images available for LocalStack and how to use them. +LocalStack functions as a local “mini-cloud” operating system that runs inside a Docker container. +LocalStack has multiple components, which include process management, file system abstraction, event processing, schedulers, and more. +Running inside a Docker container, LocalStack exposes external network ports for integrations, SDKs, or CLI interfaces to connect to LocalStack APIs. +The LocalStack & LocalStack Pro Docker images have been downloaded over 130+ million times and provide a multi-arch build compatible with AMD/x86 and ARM-based CPU architectures. +This section will cover the different Docker images available for LocalStack and how to use them. ## LocalStack Community image -The LocalStack Community image (`localstack/localstack`) contains the community and open-source version of our [core cloud emulator](https://github.com/localstack/localstack). To use the LocalStack Community image, you can pull the image from Docker Hub: +The LocalStack Community image (`localstack/localstack`) contains the community and open-source version of our [core cloud emulator](https://github.com/localstack/localstack). +To use the LocalStack Community image, you can pull the image from Docker Hub: {{< command >}} $ docker pull localstack/localstack:latest {{< / command >}} -To use the LocalStack Community image, you don't need to sign-up for an account on [LocalStack Web Application](https://app.localstack.cloud). The Community image is free to use and does not require any API key to run. The Community image can be used to run [local AWS services](https://docs.localstack.cloud/user-guide/aws/) with [integrations](https://docs.localstack.cloud/user-guide/integrations/) on your local machine or in your [continuous integration pipelines](https://docs.localstack.cloud/user-guide/ci/). +To use the LocalStack Community image, you don't need to sign-up for an account on [LocalStack Web Application](https://app.localstack.cloud). +The Community image is free to use and does not require any API key to run. +The Community image can be used to run [local AWS services](https://docs.localstack.cloud/user-guide/aws/) with [integrations](https://docs.localstack.cloud/user-guide/integrations/) on your local machine or in your [continuous integration pipelines](https://docs.localstack.cloud/user-guide/ci/). -The Community image also covers a limited set of [LocalStack Tools](https://docs.localstack.cloud/user-guide/tools/) to make your life as a cloud developer easier. You can use [LocalStack Desktop](https://docs.localstack.cloud/user-guide/tools/localstack-desktop/) or [LocalStack Docker Extension](https://docs.localstack.cloud/user-guide/tools/localstack-docker-extension/) to use LocalStack with a graphical user interface. +The Community image also covers a limited set of [LocalStack Tools](https://docs.localstack.cloud/user-guide/tools/) to make your life as a cloud developer easier. +You can use [LocalStack Desktop](https://docs.localstack.cloud/user-guide/tools/localstack-desktop/) or [LocalStack Docker Extension](https://docs.localstack.cloud/user-guide/tools/localstack-docker-extension/) to use LocalStack with a graphical user interface. -You can use the Community image to start your LocalStack container using various [installation methods](https://docs.localstack.cloud/getting-started/installation/). While configuring to run LocalStack with Docker or Docker Compose, run the `localstack/localstack` image with the appropriate tag you have pulled (if not `latest`). +You can use the Community image to start your LocalStack container using various [installation methods](https://docs.localstack.cloud/getting-started/installation/). +While configuring to run LocalStack with Docker or Docker Compose, run the `localstack/localstack` image with the appropriate tag you have pulled (if not `latest`). ## LocalStack Pro image -LocalStack Pro contains various advanced extensions to the LocalStack base platform. With LocalStack Pro image, you can access all the emulated AWS cloud services running entirely on your local machine. To use the LocalStack Pro image, you can pull the image from Docker Hub: +LocalStack Pro contains various advanced extensions to the LocalStack base platform. +With LocalStack Pro image, you can access all the emulated AWS cloud services running entirely on your local machine. +To use the LocalStack Pro image, you can pull the image from Docker Hub: {{< command >}} $ docker pull localstack/localstack-pro:latest {{< / command >}} -To use the LocalStack Pro image, you must configure an environment variable named `LOCALSTACK_AUTH_TOKEN` to contain your auth token. The LocalStack Pro image will display a warning if you do not set an auth token (or if the license is invalid/expired) and will not activate the Pro features. LocalStack Pro gives you access to the complete set of LocalStack features, including the [LocalStack Web Application](https://app.localstack.cloud) and [dedicated customer support](https://docs.localstack.cloud/getting-started/help-and-support/#pro-support). +To use the LocalStack Pro image, you must configure an environment variable named `LOCALSTACK_AUTH_TOKEN` to contain your auth token. +The LocalStack Pro image will display a warning if you do not set an auth token (or if the license is invalid/expired) and will not activate the Pro features. +LocalStack Pro gives you access to the complete set of LocalStack features, including the [LocalStack Web Application](https://app.localstack.cloud) and [dedicated customer support](https://docs.localstack.cloud/getting-started/help-and-support/#pro-support). -You can use the Pro image to start your LocalStack container using various [installation methods](https://docs.localstack.cloud/getting-started/installation/). While configuring to run LocalStack with Docker or Docker Compose, run the `localstack/localstack-pro` image with the appropriate tag you have pulled (if not `latest`). +You can use the Pro image to start your LocalStack container using various [installation methods](https://docs.localstack.cloud/getting-started/installation/). +While configuring to run LocalStack with Docker or Docker Compose, run the `localstack/localstack-pro` image with the appropriate tag you have pulled (if not `latest`). {{< callout >}} -Earlier, we maintained `localstack/localstack-light` and `localstack/localstack-full` images. They have been deprecated and are removed with the LocalStack 2.0 release. The [BigData image](https://hub.docker.com/r/localstack/bigdata/tags), which started as a `bigdata_container` container, has also been deprecated in favor of a BigData Mono container which installs dependencies directly into the LocalStack (`localstack-main`) container. +Earlier, we maintained `localstack/localstack-light` and `localstack/localstack-full` images. +They have been deprecated and are removed with the LocalStack 2.0 release. +The [BigData image](https://hub.docker.com/r/localstack/bigdata/tags), which started as a `bigdata_container` container, has also been deprecated in favor of a BigData Mono container which installs dependencies directly into the LocalStack (`localstack-main`) container. {{< /callout >}} ## Image tags -We use tags for versions with significant features, enhancements, or bug fixes - following [semantic versioning](https://semver.org). To ensure that we move quickly and steadily, we run nightly builds, where all our updates are available on the `latest` tag of LocalStack's Docker image. We intend to announce more significant features and enhancements during major & minor releases. We occasionally create patch releases for minor bug fixes and enhancements, to ensure that we can deliver changes quickly while not breaking your existing workflows (in case you prefer not to use `latest`). +We use tags for versions with significant features, enhancements, or bug fixes - following [semantic versioning](https://semver.org). +To ensure that we move quickly and steadily, we run nightly builds, where all our updates are available on the `latest` tag of LocalStack's Docker image. +We intend to announce more significant features and enhancements during major & minor releases. +We occasionally create patch releases for minor bug fixes and enhancements, to ensure that we can deliver changes quickly while not breaking your existing workflows (in case you prefer not to use `latest`). To check out the various tags available for LocalStack, you can visit the [LocalStack Community](https://hub.docker.com/r/localstack/localstack/tags?page=1&ordering=last_updated) & [LocalStack Pro](https://hub.docker.com/r/localstack/localstack-pro/tags?page=1&ordering=last_updated) Docker Hub pages. diff --git a/content/en/references/external-ports.md b/content/en/references/external-ports.md index 25d8ad3f55..3e14ca2db4 100644 --- a/content/en/references/external-ports.md +++ b/content/en/references/external-ports.md @@ -13,7 +13,7 @@ This documentation discusses two approaches to access these external services wi ## Proxy Functionality for External Services LocalStack offers a proxy functionality to access external services indirectly. -In this approach, LocalStack assigns local domains to the external services based on the individual service's configuration. +In this approach, LocalStack assigns local domains to the external services based on the individual service's configuration. For instance, if OpenSearch is configured to use the [`OPENSEARCH_ENDPOINT_STRATEGY=domain`]({{< ref "opensearch#endpoints" >}}) setting, a cluster can be reached using the domain name `...localhost.localstack.cloud`. Incoming messages to these domains are relayed to servers running on ports that do not require external accessibility. diff --git a/content/en/references/filesystem.md b/content/en/references/filesystem.md index 2265d9524c..dc1f1fce6b 100644 --- a/content/en/references/filesystem.md +++ b/content/en/references/filesystem.md @@ -45,8 +45,8 @@ LocalStack uses following directory layout when running within a container. - `/var/lib/localstack/tmp`: temporary data that is not expected to survive LocalStack runs (may be cleared when LocalStack starts or stops) - `/var/lib/localstack/cache`: temporary data that is expected to survive LocalStack runs (is not cleared when LocalStack starts or stops) - ### Configuration + - `/etc/localstack`: configuration directory - `/etc/localstack/init`: root directory for [initialization hooks]({{< ref `init-hooks` >}})
@@ -133,7 +131,6 @@ For example, you have created an OpenSearch cluster and are trying to access tha
-
}}" class="justify-content-between d-flex flex-column text-center"> diff --git a/content/en/references/network-troubleshooting/endpoint-url/_index.md b/content/en/references/network-troubleshooting/endpoint-url/_index.md index 65efa39605..b7092008f6 100644 --- a/content/en/references/network-troubleshooting/endpoint-url/_index.md +++ b/content/en/references/network-troubleshooting/endpoint-url/_index.md @@ -12,7 +12,10 @@ This documentation provides step-by-step guidance on how to access LocalStack se {{< figure src="../images/1.svg" width="400" >}} -Suppose you have LocalStack installed on your machine and want to access it using the AWS CLI. To connect, you must expose port 4566 from your LocalStack instance and connect to `localhost` or a domain name that points to `localhost`. While the LocalStack CLI does this automatically, when running the Docker container directly or with docker compose, you must configure it manually. Check out the [getting started documentation]({{< ref "getting-started/installation" >}}) for more information. +Suppose you have LocalStack installed on your machine and want to access it using the AWS CLI. +To connect, you must expose port 4566 from your LocalStack instance and connect to `localhost` or a domain name that points to `localhost`. +While the LocalStack CLI does this automatically, when running the Docker container directly or with docker compose, you must configure it manually. +Check out the [getting started documentation]({{< ref "getting-started/installation" >}}) for more information. {{< callout "tip" >}} If you bind a domain name to `localhost`, ensure that you are not subject to [DNS rebind protection]({{< ref "dns-server#dns-rebind-protection" >}}). @@ -24,7 +27,7 @@ You can also use the `GATEWAY_LISTEN` [configuration variable]({{< ref "referenc {{< figure src="../images/4.svg" width="400" >}} -Suppose your code is running inside an ECS container that LocalStack has created. +Suppose your code is running inside an ECS container that LocalStack has created. The LocalStack instance is available at the domain `localhost.localstack.cloud`. All subdomains of `localhost.localstack.cloud` also resolve to the LocalStack instance, e.g. API Gateway default URLs. @@ -55,7 +58,7 @@ aws --endpoint-url http://localstack-main:4566 s3api list-buckets {{}} services: localstack: - # ... other configuration here + # other configuration here environment: MAIN_DOCKER_NETWORK=ls networks: @@ -71,7 +74,6 @@ networks: {{}} - ## From your container {{< figure src="../images/7.svg" width="400" >}} @@ -95,7 +97,7 @@ localstack wait # get the ip address of the LocalStack container docker inspect localstack-main | \ - jq -r '.[0].NetworkSettings.Networks | to_entries | .[].value.IPAddress' + jq -r '.[0].NetworkSettings.Networks | to_entries | .[].value.IPAddress' # prints 172.27.0.2 # run your application container @@ -108,7 +110,7 @@ docker run --rm -it --network ls --name localstack-main localstack # get the ip address of the LocalStack container docker inspect localstack-main | \ - jq -r '.[0].NetworkSettings.Networks | to_entries | .[].value.IPAddress' + jq -r '.[0].NetworkSettings.Networks | to_entries | .[].value.IPAddress' # prints 172.27.0.2 # run your application container @@ -123,7 +125,7 @@ services: image: localstack/localstack ports: # Now only required if you need to access LocalStack from the host - - "127.0.0.1:4566:4566" + - "127.0.0.1:4566:4566" # Now only required if you need to access LocalStack from the host - "127.0.0.1:4510-4559:4510-4559" environment: @@ -155,7 +157,6 @@ networks: {{< / tab >}} {{% / tabpane %}} -
For LocalStack versions before 2.3.0 To facilitate access to LocalStack from within the container, it's recommended to start LocalStack in a user-defined network and set the MAIN_DOCKER_NETWORK environment variable to the network's name. @@ -184,11 +185,11 @@ docker run --rm it --network my-network {{}} services: localstack: - # ... other configuration here + # other configuration here networks: - ls your_container: - # ... other configuration here + # other configuration here networks: - ls networks: @@ -225,10 +226,10 @@ docker run --rm -it -p 4566:4566 localstack {{}} services: localstack: - # ... other configuration here + # other configuration here ports: - "4566:4566" - # ... other ports + # other ports {{}} {{}} diff --git a/content/en/references/network-troubleshooting/transparent-endpoint-injection/_index.md b/content/en/references/network-troubleshooting/transparent-endpoint-injection/_index.md index 2899dcb813..69b4c35af0 100644 --- a/content/en/references/network-troubleshooting/transparent-endpoint-injection/_index.md +++ b/content/en/references/network-troubleshooting/transparent-endpoint-injection/_index.md @@ -7,7 +7,8 @@ tags: - networking --- -Suppose you're attempting to access LocalStack, but you're relying on transparent endpoint injection to redirect AWS (`*.amazonaws.com`) requests. In such cases, there are different approaches you can take depending on your setup. +Suppose you're attempting to access LocalStack, but you're relying on transparent endpoint injection to redirect AWS (`*.amazonaws.com`) requests. +In such cases, there are different approaches you can take depending on your setup. ## From your host diff --git a/content/en/references/podman.md b/content/en/references/podman.md index 2d3d465bfc..4ae9b1e53e 100644 --- a/content/en/references/podman.md +++ b/content/en/references/podman.md @@ -13,7 +13,9 @@ Podman support is still experimental, and the following docs give you an overvie From the Podman docs: -> Podman is a daemonless, open source, Linux native tool designed to make it easy to find, run, build, share and deploy applications using Open Containers Initiative (OCI) Containers and Container Images. Podman provides a command line interface (CLI) familiar to anyone who has used the Docker Container Engine. Most users can simply alias Docker to Podman (`alias docker=podman`) without any problems. +> Podman is a daemonless, open source, Linux native tool designed to make it easy to find, run, build, share and deploy applications using Open Containers Initiative (OCI) Containers and Container Images. +> Podman provides a command line interface (CLI) familiar to anyone who has used the Docker Container Engine. +> Most users can simply alias Docker to Podman (`alias docker=podman`) without any problems. ## Options @@ -24,7 +26,9 @@ To run `localstack`, simply aliasing `alias docker=podman` is not enough, for th Here are several options on running LocalStack using podman: ### podman-docker -The package `podman-docker` emulates the Docker CLI using podman. It creates the following links: + +The package `podman-docker` emulates the Docker CLI using podman. +It creates the following links: - `/usr/bin/docker -> /usr/bin/podman` - `/var/run/docker.sock -> /run/podman/podman.sock` @@ -34,7 +38,9 @@ This package is available for some distros: - https://packages.debian.org/sid/podman-docker ### Rootfull Podman with podman-docker + The simplest option is to run `localstack` using `podman` by having `podman-docker` and running `localstack start` as root + ```sh # you have to start the podman socket first sudo systemctl start podman @@ -44,6 +50,7 @@ sudo sh -c 'DEBUG=1 localstack start' ``` ### Rootfull Podman without podman-docker + ```sh # you still have to start the podman socket first sudo systemctl start podman @@ -53,6 +60,7 @@ sudo sh -c 'DEBUG=1 DOCKER_CMD=podman DOCKER_HOST=unix://run/podman/podman.sock ``` ### Rootless Podman + You have to prepare your environment first: - https://wiki.archlinux.org/title/Podman#Rootless_Podman - https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md @@ -67,12 +75,15 @@ DEBUG=1 DOCKER_CMD="podman" DOCKER_SOCK=$XDG_RUNTIME_DIR/podman/podman.sock DOCK ``` If you have problems with [subuid and subgid](https://wiki.archlinux.org/title/Podman#Set_subuid_and_subgid), you could try to use [overlay.ignore_chown_errors option](https://www.redhat.com/sysadmin/controlling-access-rootless-podman-users) + ```sh DEBUG=1 DOCKER_CMD="podman --storage-opt overlay.ignore_chown_errors=true" DOCKER_SOCK=$XDG_RUNTIME_DIR/podman/podman.sock DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock localstack start ``` + ### Podman on Windows -You can run Podman on Windows using [WSLv2](https://learn.microsoft.com/en-us/windows/wsl/about#what-is-wsl-2). In the guide, we use a Docker Compose setup to run LocalStack. +You can run Podman on Windows using [WSLv2](https://learn.microsoft.com/en-us/windows/wsl/about#what-is-wsl-2). +In the guide, we use a Docker Compose setup to run LocalStack. Initialize and start Podman: @@ -81,13 +92,15 @@ $ podman machine init $ podman machine start {{< / command >}} -At this stage, Podman operates in rootless mode, where exposing port 443 on Windows is not possible. To enable this, switch Podman to rootful mode using the following command: +At this stage, Podman operates in rootless mode, where exposing port 443 on Windows is not possible. +To enable this, switch Podman to rootful mode using the following command: {{< command >}} podman machine set --rootful {{< / command >}} -For the Docker Compose setup, use the following configuration. When running in rootless mode, ensure to comment out the HTTPS gateway port, as it is unable to bind to privileged ports below 1024. +For the Docker Compose setup, use the following configuration. +When running in rootless mode, ensure to comment out the HTTPS gateway port, as it is unable to bind to privileged ports below 1024. ```yaml version: "3.8" diff --git a/content/en/references/usage-tracking.md b/content/en/references/usage-tracking.md index ad72f10ec2..e525294463 100644 --- a/content/en/references/usage-tracking.md +++ b/content/en/references/usage-tracking.md @@ -9,11 +9,14 @@ aliases: ## Overview -For license activations, we track the timestamp and the licensing credentials. We need to do this to make CI credits work. It is tracked regardless of whether the user disables event tracking since we collect this in the backend, not the client. +For license activations, we track the timestamp and the licensing credentials. +We need to do this to make CI credits work. +It is tracked regardless of whether the user disables event tracking since we collect this in the backend, not the client. ## LocalStack usage statistics -For Pro users, most of the information is collected to populate the [Stack Insights](https://docs.localstack.cloud/user-guide/web-application/stack-insights) dashboard. Collecting basic anonymized usage of AWS services helps us better direct engineering efforts to services that are used the most or cause the most issues. +For Pro users, most of the information is collected to populate the [Stack Insights](https://docs.localstack.cloud/user-guide/web-application/stack-insights) dashboard. +Collecting basic anonymized usage of AWS services helps us better direct engineering efforts to services that are used the most or cause the most issues. ### Session information @@ -50,7 +53,8 @@ The AWS API call metadata includes: - The service being called (like `s3` or `lambda`) - The operation being called (like `PutObject`, `CreateQueue`, `DeleteQueue`) - The HTTP status code of the response -- If it is a 400 error, we collect the error type and message. If it is a 500 error (internal LocalStack error), and `DEBUG=1` is enabled, we may also collect the stack trace to help us identify LocalStack bugs +- If it is a 400 error, we collect the error type and message. + If it is a 500 error (internal LocalStack error), and `DEBUG=1` is enabled, we may also collect the stack trace to help us identify LocalStack bugs - Whether the call originated from inside LocalStack - The region user made the call to - The dummy account ID user made the request @@ -83,7 +87,8 @@ For the community image, we only track service, operation, status code, and how ### CLI invocations -We collect an anonymized event if a CLI command was invoked, but do not collect any of the parameter values. This event is not connected to the session or the auth token. +We collect an anonymized event if a CLI command was invoked, but do not collect any of the parameter values. +This event is not connected to the session or the auth token. Here is an example of a CLI invocation event: @@ -113,7 +118,8 @@ We collect the usage of particular features in an anonymized and aggregated way. - Specific LocalStack configuration values - Content or file names of files being uploaded to S3 -- More generally, we don't collect any parameters of AWS API Calls. We do not track S3 bucket names, Lambda function names, EC2 configurations, or anything similar +- More generally, we don't collect any parameters of AWS API Calls. + We do not track S3 bucket names, Lambda function names, EC2 configurations, or anything similar - Any sensitive information about the request (like credentials and URL parameters) ## Configuration diff --git a/content/en/tutorials/_index.md b/content/en/tutorials/_index.md index 0cc7469c5a..272fdfa0d2 100644 --- a/content/en/tutorials/_index.md +++ b/content/en/tutorials/_index.md @@ -15,4 +15,4 @@ type: tutorials --- -
\ No newline at end of file +
diff --git a/content/en/tutorials/cloud-pods-collaborative-debugging/index.md b/content/en/tutorials/cloud-pods-collaborative-debugging/index.md index 2904101a6a..75ea166835 100644 --- a/content/en/tutorials/cloud-pods-collaborative-debugging/index.md +++ b/content/en/tutorials/cloud-pods-collaborative-debugging/index.md @@ -33,8 +33,10 @@ By replicating environments, teams can share the exact conditions under which a For developing AWS applications locally, the tool of choice is LocalStack, which can sustain a full-blown comprehensive stack. However, when issues appear, and engineers need a second opinion from a colleague, recreating the environment from scratch can leave -details slipping through the cracks. This is where Cloud Pods come in, to encapsulate the state of the LocalStack instance and allow for seamless -collaboration. While databases have snapshots, similarly, LocalStack uses Cloud Pods for reproducing state and data. +details slipping through the cracks. +This is where Cloud Pods come in, to encapsulate the state of the LocalStack instance and allow for seamless +collaboration. +While databases have snapshots, similarly, LocalStack uses Cloud Pods for reproducing state and data. In this tutorial, we will explore a common situation where a basic IAM misconfiguration causes unnecessary delays in finding the right solution. We will also discuss the best practices to prevent this and review some options for configuring Cloud Pod storage. @@ -50,27 +52,32 @@ The full sample application can be found [on GitHub](https://github.com/localsta - Basic knowledge of AWS services (API Gateway, Lambda, DynamoDB, IAM) - Basic understanding of Terraform for provisioning AWS resources -In this demo scenario, a new colleague, Bob, joins the company, clones the application repository, and starts working on the Lambda code. He will add the necessary -resources in the Terraform configuration file and some IAM policies that the functions need in order to access the database. -He is following good practice rules, where the resource has only the necessary permissions. However, Bob encounters an error despite this. +In this demo scenario, a new colleague, Bob, joins the company, clones the application repository, and starts working on the Lambda code. +He will add the necessary +resources in the Terraform configuration file and some IAM policies that the functions need in order to access the database. +He is following good practice rules, where the resource has only the necessary permissions. +However, Bob encounters an error despite this. ### Architecture Overview The stack consists of an API Gateway that exposes endpoints and integrates with two Lambda functions responsible for adding and fetching -products from a DynamoDB database. IAM policies are enforced to ensure compliance with the +products from a DynamoDB database. +IAM policies are enforced to ensure compliance with the **[principle of least privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege)**, and the logs will be sent to the CloudWatch service. ### Note -This demo application is suitable for AWS and behaves the same as on LocalStack. You can try this out by running the Terraform configuration file against the AWS platform. +This demo application is suitable for AWS and behaves the same as on LocalStack. +You can try this out by running the Terraform configuration file against the AWS platform. ![Application Diagram](cloud-pod-collab.png) ### Starting LocalStack -In the root directory, there is a `docker-compose.yml` file that will spin up version 3.3.0 of LocalStack, with an -important configuration flag, `ENFORCE_IAM=1`, which will facilitate IAM policy evaluation and enforcement. For this -example, a `LOCALSTACK_AUTH_TOKEN` is needed, which you can find in the LocalStack web app on the +In the root directory, there is a `docker-compose.yml` file that will spin up version 3.3.0 of LocalStack, with an +important configuration flag, `ENFORCE_IAM=1`, which will facilitate IAM policy evaluation and enforcement. +For this +example, a `LOCALSTACK_AUTH_TOKEN` is needed, which you can find in the LocalStack web app on the [Getting Started](https://app.localstack.cloud/getting-started) page. {{< command >}} @@ -81,7 +88,8 @@ $ docker compose up ### The Terraform Configuration File The entire Terraform configuration file for setting up the application stack is available in the same repository at -https://github.com/localstack-samples/cloud-pods-collaboration-demo/blob/main/terraform/main.tf. To deploy all the resources on LocalStack, +https://github.com/localstack-samples/cloud-pods-collaboration-demo/blob/main/terraform/main.tf. +To deploy all the resources on LocalStack, navigate to the project's root folder and use the following commands: {{< command >}} @@ -91,15 +99,16 @@ $ tflocal plan $ tflocal apply --auto-approve {{}} -`tflocal` is a small wrapper script to run Terraform against LocalStack. The endpoints for all services are configured to point to the +`tflocal` is a small wrapper script to run Terraform against LocalStack. +The endpoints for all services are configured to point to the LocalStack API, which allows you to deploy your unmodified Terraform scripts against LocalStack. - **`init`**: This command initializes the Terraform working directory, installs any necessary plugins, and sets up the backend. - **`plan`**: Creates an execution plan, which allows you to review the actions Terraform will take to change your infrastructure. -- **`apply`**: Finally, the **`apply`** command applies the changes required to reach the desired state of the configuration. +- **`apply`**: Finally, the **`apply`** command applies the changes required to reach the desired state of the configuration. If **`-auto-approve`** is used, it bypasses the interactive approval step normally required. -As mentioned previously, there is something missing from this configuration, and that is the **`GetItem`** operation permission +As mentioned previously, there is something missing from this configuration, and that is the **`GetItem`** operation permission for one of the Lambda functions: ```java @@ -129,7 +138,8 @@ Bob has mistakenly used `dynamodb:Scan` and `dynamodb:Query`, but missed adding ### Reproducing the issue locally -Let’s test out the current state of the application. The Terraform configuration file outputs the REST API ID of the API Gateway. +Let’s test out the current state of the application. +The Terraform configuration file outputs the REST API ID of the API Gateway. We can capture that value and use it further to invoke the **`add-product`** Lambda: {{< command >}} @@ -150,7 +160,8 @@ $ curl --location "http://$rest_api_id.execute-api.localhost.localstack.cloud:45 --data '{ "id": "34534", "name": "EcoFriendly Water Bottle", - "description": "A durable, eco-friendly water bottle designed to keep your drinks cold for up to 24 hours and hot for up to 12 hours. Made from high-quality, food-grade stainless steel, it'\''s perfect for your daily hydration needs.", + "description": "A durable, eco-friendly water bottle designed to keep your drinks cold for up to 24 hours and hot for up to 12 hou +s. Made from high-quality, food-grade stainless steel, it'\''s perfect for your daily hydration needs.", "price": "29.99" }' @@ -159,7 +170,8 @@ $ curl --location "http://$rest_api_id.execute-api.localhost.localstack.cloud:45 --data '{ "id": "82736", "name": "Sustainable Hydration Flask", - "description": "This sustainable hydration flask is engineered to maintain your beverages at the ideal temperature—cold for 24 hours and hot for 12 hours. Constructed with premium, food-grade stainless steel, it offers an environmentally friendly solution to stay hydrated throughout the day.", + "description": "This sustainable hydration flask is engineered to maintain your beverages at the ideal temperature—cold for 24 hours and hot for 12 hours. +Constructed with premium, food-grade stainless steel, it offers an environmentally friendly solution to stay hydrated throughout the day.", "price": "31.50" }' {{}} @@ -175,9 +187,10 @@ Internal server error⏎ {{}} - -An `Internal server error⏎` does not give out too much information. Bob does not know for sure what could be -causing this. The Lambda code and the configurations look fine to him. +An `Internal server error⏎` does not give out too much information. +Bob does not know for sure what could be +causing this. +The Lambda code and the configurations look fine to him. ## Using Cloud Pods for collaborative debugging @@ -197,7 +210,7 @@ Services: sts,iam,apigateway,dynamodb,lambda,s3,cloudwatch,logs LocalStack provides a remote storage backend that can be used to store the state of your application and share it with your team members. -The Cloud Pods CLI is included in the LocalStack CLI installation, so there’s no need for additional plugins to begin using it. +The Cloud Pods CLI is included in the LocalStack CLI installation, so there’s no need for additional plugins to begin using it. The `LOCALSTACK_AUTH_TOKEN` needs to be set as an environment variable. Additionally, there are other commands for managing Cloud Pods included in the CLI: @@ -222,14 +235,13 @@ Commands: {{}} - ### Pulling and Loading the Cloud Pod The workflow between Alice and Bob is incredibly easy: ![Bob and Alice Collab](bob-alice-cloud-pod-collab.png) -Now, in a fresh LocalStack instance, Alice can immediately load the Cloud Pod, because she's part of the +Now, in a fresh LocalStack instance, Alice can immediately load the Cloud Pod, because she's part of the same organization: {{< command >}} @@ -241,35 +253,44 @@ Cloud Pod cloud-pod-product-app successfully loaded ### Debugging and Resolving the Issue -Not only can Alice easily reproduce the bug now, but she also has access to the state and data of the services +Not only can Alice easily reproduce the bug now, but she also has access to the state and data of the services involved, meaning that the Lambda logs are still in the CloudWatch log groups. ![CloudWatch Logs](cloudwatch-logs.png) -By spotting the error message, there’s an instant starting point for checking the source of the problem. The error message displayed in the logs is very specific: +By spotting the error message, there’s an instant starting point for checking the source of the problem. +The error message displayed in the logs is very specific: `"Error: User: arn:aws:sts::000000000000:assumed-role/productRole/get-product is not authorized to perform: dynamodb:GetItem on resource: arn:aws:dynamodb:us-east-1:000000000000:table/Products because no identity-based policy allows the dynamodb:GetItem action (Service: DynamoDb, Status Code: 400, Request ID: d50e9dad-a01a-4860-8c21-e844a930ba7d)"` ### Identifying the Misconfiguration -The error points to a permissions issue related to accessing DynamoDB. The action **`dynamodb:GetItem`** is -not authorized for the role, preventing the retrieval of a product by its ID. This kind of error was not foreseen as one -of the exceptions to be handled in the application. IAM policies are not always easy and straightforward, so it's a well known fact that +The error points to a permissions issue related to accessing DynamoDB. +The action **`dynamodb:GetItem`** is +not authorized for the role, preventing the retrieval of a product by its ID. +This kind of error was not foreseen as one +of the exceptions to be handled in the application. +IAM policies are not always easy and straightforward, so it's a well known fact that these configurations are prone to mistakes. -To confirm the finding, Alice now has the exact same environment to reproduces the error in. There are no machine specific configurations and -no other manual changes. This leads to the next step in troubleshooting: **inspecting the Terraform configuration file** responsible +To confirm the finding, Alice now has the exact same environment to reproduces the error in. +There are no machine specific configurations and +no other manual changes. +This leads to the next step in troubleshooting: **inspecting the Terraform configuration file** responsible for defining the permissions attached to the Lambda role for interacting with DynamoDB. ### Fixing the Terraform Configuration Upon review, Alice discovers that the Terraform configuration does not include the necessary permission **`dynamodb:GetItem`** in the -policy attached to the Lambda role. This oversight explains the error message. The Terraform configuration file acts as a +policy attached to the Lambda role. +This oversight explains the error message. +The Terraform configuration file acts as a blueprint for AWS resource permissions, and any missing action can lead to errors related to authorization. -This scenario underscores the importance of thorough review and testing of IAM roles and policies when working with AWS resources. -It's easy to overlook a single action in a policy, but as we've seen, such an omission can significantly impact application -functionality. By carefully checking the Terraform configuration files and ensuring that all necessary permissions are included, +This scenario underscores the importance of thorough review and testing of IAM roles and policies when working with AWS resources. +It's easy to overlook a single action in a policy, but as we've seen, such an omission can significantly impact application +functionality. +By carefully checking the Terraform configuration files and ensuring that all necessary permissions are included, developers can avoid similar issues and ensure a smoother, error-free interaction with AWS services. The action list should now look like this: @@ -293,19 +314,22 @@ resource "aws_iam_policy" "lambda_dynamodb_policy" { }, ] }) -} +} {{}} -To double-check, Alice creates the stack on AWS, and observes that the issue is the same, related to policy +To double-check, Alice creates the stack on AWS, and observes that the issue is the same, related to policy misconfiguration: ![AWS CloudWatch Logs](aws-cloudwatch-logs.png) ### Impact on the team -Alice has updated the infrastructure and deployed a new version of the Cloud Pod with the necessary fixes. Bob will -access the updated infrastructure and proceed with his tasks. Meanwhile, Carol is developing integration tests for the -CI pipeline. She will use the stable version of the infrastructure to ensure that the workflows function effectively from +Alice has updated the infrastructure and deployed a new version of the Cloud Pod with the necessary fixes. +Bob will +access the updated infrastructure and proceed with his tasks. +Meanwhile, Carol is developing integration tests for the +CI pipeline. +She will use the stable version of the infrastructure to ensure that the workflows function effectively from start to finish. ![Carol writes tests](carol-bob-alice-cloud-pod-collab.png) @@ -320,12 +344,14 @@ The Cloud Pods command-line interface enables users to manage these remotes with ## Conclusion -Cloud Pods play a crucial role in team collaboration, significantly speeding up development processes. The multiple and -versatile options for remote storage can support different business requirements for companies that prefer using the -environments they control. Cloud Pods are not just for teamwork; they also excel in other areas, such as creating +Cloud Pods play a crucial role in team collaboration, significantly speeding up development processes. +The multiple and +versatile options for remote storage can support different business requirements for companies that prefer using the +environments they control. +Cloud Pods are not just for teamwork; they also excel in other areas, such as creating resources in Continuous Integration (CI) for ultra-fast testing pipelines. ## Additional resources - [Cloud Pods documentation](https://docs.localstack.cloud/user-guide/state-management/cloud-pods/) -- [Terraform for AWS](https://developer.hashicorp.com/terraform/tutorials/aws-get-started) \ No newline at end of file +- [Terraform for AWS](https://developer.hashicorp.com/terraform/tutorials/aws-get-started) diff --git a/content/en/tutorials/ecs-ecr-container-app/index.md b/content/en/tutorials/ecs-ecr-container-app/index.md index 0db2965367..b138b23b79 100644 --- a/content/en/tutorials/ecs-ecr-container-app/index.md +++ b/content/en/tutorials/ecs-ecr-container-app/index.md @@ -24,22 +24,28 @@ pro: true leadimage: "ecs-ecr-container-app-featured-image.png" --- -[Amazon Elastic Container Service (ECS)](https://aws.amazon.com/ecs/) is a fully-managed container orchestration service that simplifies the deployment, management, and scaling of Docker containers on AWS. With support for two [launch types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html), EC2 and Fargate, ECS allows you to run containers on your cluster of EC2 instances or have AWS manage your underlying infrastructure with Fargate. The Fargate launch type provides a serverless-like experience for running containers, allowing you to focus on your applications instead of infrastructure. +[Amazon Elastic Container Service (ECS)](https://aws.amazon.com/ecs/) is a fully-managed container orchestration service that simplifies the deployment, management, and scaling of Docker containers on AWS. +With support for two [launch types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html), EC2 and Fargate, ECS allows you to run containers on your cluster of EC2 instances or have AWS manage your underlying infrastructure with Fargate. +The Fargate launch type provides a serverless-like experience for running containers, allowing you to focus on your applications instead of infrastructure. -[Amazon Elastic Container Registry (ECR)](https://aws.amazon.com/ecr/) is a fully-managed service that allows you to store, manage, and deploy Docker container images. It is tightly integrated with other AWS services such as ECS, EKS, and Lambda, enabling you to quickly deploy your container images to these services. With ECR, you can version, tag, and manage your container images’ lifecycles independently of your applications, making it easy to maintain and deploy your containers. +[Amazon Elastic Container Registry (ECR)](https://aws.amazon.com/ecr/) is a fully-managed service that allows you to store, manage, and deploy Docker container images. +It is tightly integrated with other AWS services such as ECS, EKS, and Lambda, enabling you to quickly deploy your container images to these services. +With ECR, you can version, tag, and manage your container images’ lifecycles independently of your applications, making it easy to maintain and deploy your containers. -ECS tasks can pull container images from ECR repositories and are customizable using task definitions to specify settings such as CPU and memory limits, environment variables, and networking configurations. [LocalStack Pro](https://localstack.cloud/) allows creating ECR registries, repositories, and ECS clusters and tasks on your local machine. This tutorial will showcase using LocalStack to set up an NGINX web server to serve a static website using CloudFormation templates in a local AWS environment. +ECS tasks can pull container images from ECR repositories and are customizable using task definitions to specify settings such as CPU and memory limits, environment variables, and networking configurations. [LocalStack Pro](https://localstack.cloud/) allows creating ECR registries, repositories, and ECS clusters and tasks on your local machine. +This tutorial will showcase using LocalStack to set up an NGINX web server to serve a static website using CloudFormation templates in a local AWS environment. ## Prerequisites -- [LocalStack Pro](https://localstack.cloud/pricing/) -- [awslocal]({{< ref "aws-cli#localstack-aws-cli-awslocal" >}}) -- [Docker](https://docker.io/) -- [`cURL`](https://curl.se/download.html) +- [LocalStack Pro](https://localstack.cloud/pricing/) +- [awslocal]({{< ref "aws-cli#localstack-aws-cli-awslocal" >}}) +- [Docker](https://docker.io/) +- [`cURL`](https://curl.se/download.html) ## Creating the Docker image -To start setting up an NGINX web server on an ECS cluster, we need to create a Docker image that can be pushed to an ECR repository. We'll begin by creating a `Dockerfile` that defines the configuration for our NGINX web server. +To start setting up an NGINX web server on an ECS cluster, we need to create a Docker image that can be pushed to an ECR repository. +We'll begin by creating a `Dockerfile` that defines the configuration for our NGINX web server. ```dockerfile FROM nginx @@ -47,19 +53,23 @@ FROM nginx ENV foo=bar ``` -The `Dockerfile` uses the official `nginx` image from Docker Hub, which allows us to serve the default index page. Before building our Docker image, we need to start LocalStack and create an ECR repository to push our Docker image. To start LocalStack with the `LOCALSTACK_AUTH_TOKEN` environment variable, run the following command: +The `Dockerfile` uses the official `nginx` image from Docker Hub, which allows us to serve the default index page. +Before building our Docker image, we need to start LocalStack and create an ECR repository to push our Docker image. +To start LocalStack with the `LOCALSTACK_AUTH_TOKEN` environment variable, run the following command: {{< command >}} $ LOCALSTACK_AUTH_TOKEN= localstack start -d {{< / command >}} -Next, we will create an ECR repository to push our Docker image. We will use the `awslocal` CLI to create the repository. +Next, we will create an ECR repository to push our Docker image. +We will use the `awslocal` CLI to create the repository. {{< command >}} $ awslocal ecr create-repository --repository-name {{< / command >}} -Replace `` with your desired repository name. The output of this command will contain the `repositoryUri` value that we'll need in the next step: +Replace `` with your desired repository name. +The output of this command will contain the `repositoryUri` value that we'll need in the next step: ```json { @@ -86,19 +96,23 @@ Copy the `repositoryUri` value from the output and replace `` in $ docker build -t . {{< / command >}} -This command will build the Docker image for our NGINX web server. After the build is complete, we'll push the Docker image to the ECR repository we created earlier using the following command: +This command will build the Docker image for our NGINX web server. +After the build is complete, we'll push the Docker image to the ECR repository we created earlier using the following command: {{< command >}} $ docker push {{< / command >}} -After a few seconds, the Docker image will be pushed to the local ECR repository. We can now create an ECS cluster and deploy our NGINX web server. +After a few seconds, the Docker image will be pushed to the local ECR repository. +We can now create an ECS cluster and deploy our NGINX web server. ## Creating the local ECS infrastructure -LocalStack enables the deployment of ECS task definitions, services, and tasks, allowing us to deploy our ECR containers via the ECS Fargate launch type, which uses the local Docker engine to deploy containers locally. To create the necessary ECS infrastructure on our local machine before deploying our NGINX web server, we will use a CloudFormation template. +LocalStack enables the deployment of ECS task definitions, services, and tasks, allowing us to deploy our ECR containers via the ECS Fargate launch type, which uses the local Docker engine to deploy containers locally. +To create the necessary ECS infrastructure on our local machine before deploying our NGINX web server, we will use a CloudFormation template. -You can create a new file named `ecs.infra.yml` inside a new `templates` directory, using a [publicly available CloudFormation template as a starting point](https://github.com/awslabs/aws-cloudformation-templates/blob/master/aws/services/ECS/FargateLaunchType/clusters/public-vpc.yml). To begin, we'll add the `Mappings` section and configure the subnet mask values, which define the range of internal IP addresses that can be assigned. +You can create a new file named `ecs.infra.yml` inside a new `templates` directory, using a [publicly available CloudFormation template as a starting point](https://github.com/awslabs/aws-cloudformation-templates/blob/master/aws/services/ECS/FargateLaunchType/clusters/public-vpc.yml). +To begin, we'll add the `Mappings` section and configure the subnet mask values, which define the range of internal IP addresses that can be assigned. ```yaml AWSTemplateFormatVersion: '2010-09-09' @@ -299,9 +313,11 @@ Resources: Resource: '*' ``` -So far, we have set up the VPC where the containers will be networked and created networking resources for the public subnets. We have also added a security group for the container running in Fargate and an IAM role that authorizes ECS to manage resources in the VPC. +So far, we have set up the VPC where the containers will be networked and created networking resources for the public subnets. +We have also added a security group for the container running in Fargate and an IAM role that authorizes ECS to manage resources in the VPC. -Next, we can configure the outputs generated by the CloudFormation template. These outputs are values generated during the creation of the CloudFormation stack and can be used by other resources or scripts in your application. +Next, we can configure the outputs generated by the CloudFormation template. +These outputs are values generated during the creation of the CloudFormation stack and can be used by other resources or scripts in your application. To export the values as CloudFormation outputs, we can add the following to the end of our `ecs.infra.yml` file: @@ -360,17 +376,21 @@ To deploy the CloudFormation template we created earlier, use the following comm $ awslocal cloudformation create-stack --stack-name --template-body file://templates/ecs.infra.yml {{< /command >}} -Make sure to replace `` with a name of your choice. Wait until the stack status changes to `CREATE_COMPLETE` by running the following command: +Make sure to replace `` with a name of your choice. +Wait until the stack status changes to `CREATE_COMPLETE` by running the following command: {{< command >}} $ awslocal cloudformation wait stack-create-complete --stack-name {{< /command >}} -You can also check your deployed stack on the LocalStack Web Application by navigating to the [CloudFormation resource browser](https://app.localstack.cloud/resources/cloudformation/stacks). With the ECS infrastructure now in place, we can proceed to deploy our NGINX web server. +You can also check your deployed stack on the LocalStack Web Application by navigating to the [CloudFormation resource browser](https://app.localstack.cloud/resources/cloudformation/stacks). +With the ECS infrastructure now in place, we can proceed to deploy our NGINX web server. ## Deploying the ECS service -To deploy the ECS service, we'll use another CloudFormation template. You can create a new file named `ecs.sample.yml` in the `templates` directory, based on the [publicly available CloudFormation template](https://github.com/awslabs/aws-cloudformation-templates/blob/master/aws/services/ECS/FargateLaunchType/services/public-service.yml). This template will deploy the ECS service on AWS Fargate and expose it via a public load balancer. +To deploy the ECS service, we'll use another CloudFormation template. +You can create a new file named `ecs.sample.yml` in the `templates` directory, based on the [publicly available CloudFormation template](https://github.com/awslabs/aws-cloudformation-templates/blob/master/aws/services/ECS/FargateLaunchType/services/public-service.yml). +This template will deploy the ECS service on AWS Fargate and expose it via a public load balancer. Before we proceed, let's declare the parameters for the CloudFormation template: @@ -532,41 +552,50 @@ Next, let's deploy the CloudFormation template by running the following command: $ awslocal cloudformation create-stack --stack-name --template-body file://templates/ecs.sample.yml --parameters ParameterKey=ImageUrl,ParameterValue= {{< /command >}} -Replace `` with a name of your choice and `` with the URI of the Docker image that you want to deploy. Wait for the stack to be created by running the following command: +Replace `` with a name of your choice and `` with the URI of the Docker image that you want to deploy. +Wait for the stack to be created by running the following command: {{< command >}} $ awslocal cloudformation wait stack-create-complete --stack-name {{< /command >}} -Now that the ECS service has been deployed successfully, let's access the application endpoint. First, let's list all the ECS clusters we have deployed in our local environment by running the following command to retrieve the cluster ARN: +Now that the ECS service has been deployed successfully, let's access the application endpoint. +First, let's list all the ECS clusters we have deployed in our local environment by running the following command to retrieve the cluster ARN: {{< command >}} $ awslocal ecs list-clusters | jq -r '.clusterArns[0]' {{< /command >}} -Save the output of the above command as `CLUSTER_ARN`, as we will use it to list the tasks running in the cluster. Next, run the following command to list the task ARN: +Save the output of the above command as `CLUSTER_ARN`, as we will use it to list the tasks running in the cluster. +Next, run the following command to list the task ARN: {{< command >}} $ awslocal ecs list-tasks --cluster | jq -r '.taskArns[0]' {{< /command >}} -Save the task ARN as `TASK_ARN`. Let us now list the port number on which the application is running. Run the following command: +Save the task ARN as `TASK_ARN`. +Let us now list the port number on which the application is running. +Run the following command: {{< command >}} $ awslocal ecs describe-tasks --cluster --tasks | jq -r '.tasks[0].containers[0].networkBindings[0].hostPort' {{< /command >}} -Earlier, we configured the application to run on port `45139`, in our `HostPort` parameter. Let us now access the application endpoint. Run the following command to get the public IP address of the host: +Earlier, we configured the application to run on port `45139`, in our `HostPort` parameter. +Let us now access the application endpoint. +Run the following command to get the public IP address of the host: {{< command >}} $ curl localhost:45139 {{< /command >}} -Alternatively, in the address bar of your web browser, you can navigate to [`localhost:45139`](https://localhost:45139/). You should see the default index page of the NGINX web server. +Alternatively, in the address bar of your web browser, you can navigate to [`localhost:45139`](https://localhost:45139/). +You should see the default index page of the NGINX web server. ## Conclusion -In this tutorial, we have demonstrated how to deploy a containerized service locally using Amazon ECS, ECR, and LocalStack. We have also shown how you can use CloudFormation templates with the awslocal CLI to deploy your local AWS infrastructure. +In this tutorial, we have demonstrated how to deploy a containerized service locally using Amazon ECS, ECR, and LocalStack. +We have also shown how you can use CloudFormation templates with the awslocal CLI to deploy your local AWS infrastructure. With LocalStack, you can easily mount code from your host filesystem into the ECS container, allowing for a quicker debugging loop that doesn't require rebuilding and redeploying the task's Docker image for each change. diff --git a/content/en/tutorials/elb-load-balancing/index.md b/content/en/tutorials/elb-load-balancing/index.md index ea002dfd5a..f00d2357ac 100644 --- a/content/en/tutorials/elb-load-balancing/index.md +++ b/content/en/tutorials/elb-load-balancing/index.md @@ -24,13 +24,22 @@ pro: true leadimage: "elb-load-balancing-featured-image.png" --- -[Elastic Load Balancer (ELB)](https://aws.amazon.com/elasticloadbalancing/) is a service that distributes incoming application traffic across multiple targets, such as EC2 instances, containers, IP addresses, and Lambda functions. ELBs can be physical hardware or virtual software components. They accept incoming traffic and distribute it across multiple targets in one or more Availability Zones. Using ELB, you can quickly scale your load balancer to accommodate changes in traffic over time, ensuring optimal performance for your application and workloads running on the AWS infrastructure. +[Elastic Load Balancer (ELB)](https://aws.amazon.com/elasticloadbalancing/) is a service that distributes incoming application traffic across multiple targets, such as EC2 instances, containers, IP addresses, and Lambda functions. +ELBs can be physical hardware or virtual software components. +They accept incoming traffic and distribute it across multiple targets in one or more Availability Zones. +Using ELB, you can quickly scale your load balancer to accommodate changes in traffic over time, ensuring optimal performance for your application and workloads running on the AWS infrastructure. ELB provides three types of load balancers: [Application Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html), [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html), [Classic Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/gateway/introduction.html), and [Application Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html). -In this tutorial we focus on the Application Load Balancer (ALB), which operates at the Application layer of the OSI model and is specifically designed for load balancing HTTP and HTTPS traffic for web applications. ALB works at the request level, allowing advanced load-balancing features for HTTP and HTTPS requests. It also enables you to register Lambda functions as targets. You can configure a listener rule that forwards requests to a target group for your Lambda function, triggering its execution to process the request. +In this tutorial we focus on the Application Load Balancer (ALB), which operates at the Application layer of the OSI model and is specifically designed for load balancing HTTP and HTTPS traffic for web applications. +ALB works at the request level, allowing advanced load-balancing features for HTTP and HTTPS requests. +It also enables you to register Lambda functions as targets. +You can configure a listener rule that forwards requests to a target group for your Lambda function, triggering its execution to process the request. -[LocalStack Pro](https://localstack.cloud) extends support for ELB Application Load Balancers and the configuration of target groups, including Lambda functions. This tutorial will guide you through setting up an ELB Application Load Balancer to configure Node.js Lambda functions as targets. We will utilize the [Serverless framework](http://serverless.com/) along with the [`serverless-localstack` plugin](https://www.serverless.com/plugins/serverless-localstack) to simplify the setup. Additionally, we will demonstrate how to set up ELB endpoints to efficiently forward requests to the target group associated with your Lambda functions. +[LocalStack Pro](https://localstack.cloud) extends support for ELB Application Load Balancers and the configuration of target groups, including Lambda functions. +This tutorial will guide you through setting up an ELB Application Load Balancer to configure Node.js Lambda functions as targets. +We will utilize the [Serverless framework](http://serverless.com/) along with the [`serverless-localstack` plugin](https://www.serverless.com/plugins/serverless-localstack) to simplify the setup. +Additionally, we will demonstrate how to set up ELB endpoints to efficiently forward requests to the target group associated with your Lambda functions. ## Prerequisites @@ -42,13 +51,16 @@ In this tutorial we focus on the Application Load Balancer (ALB), which operates ## Setup a Serverless project -Serverless is an open-source framework that enables you to build, package, and deploy serverless applications seamlessly across various cloud providers and platforms. With the Serverless framework, you can easily set up your serverless development environment, define your applications as functions and events, and deploy your entire infrastructure to the cloud using a single command. To start using the Serverless framework, install the Serverless framework globally by executing the following command using `npm`: +Serverless is an open-source framework that enables you to build, package, and deploy serverless applications seamlessly across various cloud providers and platforms. +With the Serverless framework, you can easily set up your serverless development environment, define your applications as functions and events, and deploy your entire infrastructure to the cloud using a single command. +To start using the Serverless framework, install the Serverless framework globally by executing the following command using `npm`: {{< command >}} $ npm install -g serverless {{< / command >}} -The above command installs the Serverless framework globally on your machine. After the installation is complete, you can verify it by running the following command: +The above command installs the Serverless framework globally on your machine. +After the installation is complete, you can verify it by running the following command: {{< command >}} $ serverless --version @@ -58,21 +70,27 @@ Plugin: 6.2.2 SDK: 4.3.2 {{< / command >}} -This command displays the version numbers of the Serverless framework's core, plugins, and SDK you installed. Now, let's proceed with creating a new Serverless project using the `serverless` command: +This command displays the version numbers of the Serverless framework's core, plugins, and SDK you installed. +Now, let's proceed with creating a new Serverless project using the `serverless` command: {{< command >}} $ serverless create --template aws-nodejs --path serverless-elb {{< / command >}} -In this example, we use the `aws-nodejs` template to create our Serverless project. This template includes a simple Node.js Lambda function that returns a message when invoked. It also generates a `serverless.yml` file that contains the project's configuration. +In this example, we use the `aws-nodejs` template to create our Serverless project. +This template includes a simple Node.js Lambda function that returns a message when invoked. +It also generates a `serverless.yml` file that contains the project's configuration. -The `serverless.yml` file is where you configure your project. It includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. If you prefer to set up your project using a different template, refer to the [Serverless templates documentation](https://www.serverless.com/framework/docs/providers/aws/cli-reference/create/) for more options. +The `serverless.yml` file is where you configure your project. +It includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. +If you prefer to set up your project using a different template, refer to the [Serverless templates documentation](https://www.serverless.com/framework/docs/providers/aws/cli-reference/create/) for more options. Now that we have created our Serverless project, we can proceed to configure it to use LocalStack. ## Configure Serverless project to use LocalStack -To configure your Serverless project to use LocalStack, you need to install the `serverless-localstack` plugin. Before that, let's initialize the project and install some dependencies: +To configure your Serverless project to use LocalStack, you need to install the `serverless-localstack` plugin. +Before that, let's initialize the project and install some dependencies: {{< command >}} $ npm init -y @@ -81,9 +99,11 @@ $ npm install -D serverless serverless-localstack serverless-deployment-bucket In the above commands, we use `npm init -y` to initialize a new Node.js project with default settings and then install the necessary dependencies, including `serverless`, `serverless-localstack`, and `serverless-deployment-bucket`, as dev dependencies. -The `serverless-localstack` plugin enables your Serverless project to redirect AWS API calls to LocalStack, while the `serverless-deployment-bucket` plugin creates a deployment bucket in LocalStack. This bucket is responsible for storing the deployment artifacts and ensuring that old deployment buckets are properly cleaned up after each deployment. +The `serverless-localstack` plugin enables your Serverless project to redirect AWS API calls to LocalStack, while the `serverless-deployment-bucket` plugin creates a deployment bucket in LocalStack. +This bucket is responsible for storing the deployment artifacts and ensuring that old deployment buckets are properly cleaned up after each deployment. -We have a `serverless.yml` file in the directory to define our Serverless project's configuration, which includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. To set up the plugins we installed earlier, you need to add the following properties to your `serverless.yml` file: +We have a `serverless.yml` file in the directory to define our Serverless project's configuration, which includes information such as the service name, the provider (AWS in this case), the functions, and example events that trigger those functions. +To set up the plugins we installed earlier, you need to add the following properties to your `serverless.yml` file: ```yaml service: serverless-elb @@ -111,7 +131,9 @@ custom: To configure Serverless to use the LocalStack plugin specifically for the `local` stage and ensure that your Serverless project only deploys to LocalStack instead of the real AWS Cloud, you need to set the `--stage` flag when using the `serverless deploy` command and specify the flag variable as `local`. -Configure a `deploy` script in your `package.json` file to simplify the deployment process. It lets you run the `serverless deploy` command directly over your local infrastructure. Update your `package.json` file to include the following: +Configure a `deploy` script in your `package.json` file to simplify the deployment process. +It lets you run the `serverless deploy` command directly over your local infrastructure. +Update your `package.json` file to include the following: ```json { @@ -143,7 +165,8 @@ This will execute the `serverless deploy --stage local` command, deploying your ## Create Lambda functions & ELB Application Load Balancers -Now, let's create two Lambda functions named `hello1` and `hello2` that will run on the Node.js 12.x runtime. Open the `handler.js` file and replace the existing code with the following: +Now, let's create two Lambda functions named `hello1` and `hello2` that will run on the Node.js 12.x runtime. +Open the `handler.js` file and replace the existing code with the following: ```js 'use strict'; @@ -175,7 +198,11 @@ module.exports.hello2 = async (event) => { }; ``` -We have defined the `hello1` and `hello2` Lambda functions in the updated code. Each function receives an event parameter and logs it to the console. The function then returns a response with a status code of 200 and a plain text body containing the respective `"Hello"` message. It's important to note that the `isBase64Encoded` property is not required for plain text responses. It is typically used when you need to include binary content in the response body and want to indicate that the content is Base64 encoded. +We have defined the `hello1` and `hello2` Lambda functions in the updated code. +Each function receives an event parameter and logs it to the console. +The function then returns a response with a status code of 200 and a plain text body containing the respective `"Hello"` message. +It's important to note that the `isBase64Encoded` property is not required for plain text responses. +It is typically used when you need to include binary content in the response body and want to indicate that the content is Base64 encoded. Let us now configure the `serverless.yml` file to create an Application Load Balancer (ALB) and attach the Lambda functions to it. @@ -216,9 +243,13 @@ custom: - local ``` -In the above configuration, we specify the service name (`serverless-elb` in this case) and set the provider to AWS with the Node.js 12.x runtime. We include the necessary plugins, `serverless-localstack` and `serverless-deployment-bucket`, for LocalStack support and deployment bucket management. Next, we define the `hello1` and `hello2` functions with their respective handlers and event triggers. In this example, both functions are triggered by HTTP GET requests to the `/hello1` and `/hello2` paths. +In the above configuration, we specify the service name (`serverless-elb` in this case) and set the provider to AWS with the Node.js 12.x runtime. +We include the necessary plugins, `serverless-localstack` and `serverless-deployment-bucket`, for LocalStack support and deployment bucket management. +Next, we define the `hello1` and `hello2` functions with their respective handlers and event triggers. +In this example, both functions are triggered by HTTP GET requests to the `/hello1` and `/hello2` paths. -Lastly, let's create a VPC, a subnet, an Application Load Balancer, and an HTTP listener on the load balancer that redirects traffic to the target group. To do this, add the following resources to your `serverless.yml` file: +Lastly, let's create a VPC, a subnet, an Application Load Balancer, and an HTTP listener on the load balancer that redirects traffic to the target group. +To do this, add the following resources to your `serverless.yml` file: ```yaml ... @@ -257,23 +288,28 @@ resources: CidrBlock: 12.2.1.0/24 ``` -With these resource definitions, you have completed the configuration of your Serverless project. Now you can create your local AWS infrastructure on LocalStack and deploy your Application Load Balancers with the two Lambda functions as targets. +With these resource definitions, you have completed the configuration of your Serverless project. +Now you can create your local AWS infrastructure on LocalStack and deploy your Application Load Balancers with the two Lambda functions as targets. ## Creating the infrastructure on LocalStack -Now that we have completed the initial setup let's run LocalStack's AWS emulation on our local machine. Start LocalStack by running the following command: +Now that we have completed the initial setup let's run LocalStack's AWS emulation on our local machine. +Start LocalStack by running the following command: {{< command >}} $ LOCALSTACK_AUTH_TOKEN= localstack start -d {{< / command >}} -This command launches LocalStack in the background, enabling you to use the AWS services locally. Now, let's deploy our Serverless project and verify the resources created in LocalStack. Run the following command: +This command launches LocalStack in the background, enabling you to use the AWS services locally. +Now, let's deploy our Serverless project and verify the resources created in LocalStack. +Run the following command: {{< command >}} $ npm run deploy {{< / command >}} -This command deploys your Serverless project using the "local" stage. The output will resemble the following: +This command deploys your Serverless project using the "local" stage. +The output will resemble the following: ```bash > serverless-elb@1.0.0 deploy @@ -293,7 +329,9 @@ functions: hello2: test-elb-load-balancing-local-hello2 (157 kB) ``` -This output confirms the successful deployment of your Serverless service to the `local` stage in LocalStack. It also displays information about the deployed Lambda functions (`hello1` and `hello2`). You can run the following command to verify that the functions and the load balancers have been deployed: +This output confirms the successful deployment of your Serverless service to the `local` stage in LocalStack. +It also displays information about the deployed Lambda functions (`hello1` and `hello2`). +You can run the following command to verify that the functions and the load balancers have been deployed: {{< command >}} $ awslocal lambda list-functions @@ -334,13 +372,13 @@ $ awslocal elbv2 describe-load-balancers } {{< / command >}} - The ALB endpoints for the two Lambda functions, hello1 and hello2, are accessible at the following URLs: - [`http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1`](http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1) - [`http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2`](http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2) -To test these endpoints, you can use the curl command along with the jq tool for better formatting. Run the following commands: +To test these endpoints, you can use the curl command along with the jq tool for better formatting. +Run the following commands: {{< command >}} $ curl http://lb-test-1.elb.localhost.localstack.cloud:4566/hello1 | jq @@ -349,10 +387,15 @@ $ curl http://lb-test-1.elb.localhost.localstack.cloud:4566/hello2 | jq "Hello 2" {{< / command >}} -Both commands send an HTTP GET request to the endpoints and uses `jq` to format the response. The expected outputs are `Hello 1` & `Hello 2`, representing the Lambda functions' response. +Both commands send an HTTP GET request to the endpoints and uses `jq` to format the response. +The expected outputs are `Hello 1` & `Hello 2`, representing the Lambda functions' response. ## Conclusion -In this tutorial, we have learned how to create an Application Load Balancer (ALB) with two Lambda functions as targets using LocalStack. We have also explored creating, configuring, and deploying a Serverless project with LocalStack. This enables developers to develop and test Cloud and Serverless applications locally conveniently. +In this tutorial, we have learned how to create an Application Load Balancer (ALB) with two Lambda functions as targets using LocalStack. +We have also explored creating, configuring, and deploying a Serverless project with LocalStack. +This enables developers to develop and test Cloud and Serverless applications locally conveniently. -LocalStack offers integrations with various popular tools such as Terraform, Pulumi, Serverless Application Model (SAM), and more. For more information about LocalStack integrations, you can refer to our [Integration documentation]({{< ref "user-guide/integrations">}}). To further explore and experiment with the concepts covered in this tutorial, you can access the code and resources on our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/elb-load-balancing) along with a `Makefile` for step-by-step execution. +LocalStack offers integrations with various popular tools such as Terraform, Pulumi, Serverless Application Model (SAM), and more. +For more information about LocalStack integrations, you can refer to our [Integration documentation]({{< ref "user-guide/integrations">}}). +To further explore and experiment with the concepts covered in this tutorial, you can access the code and resources on our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/elb-load-balancing) along with a `Makefile` for step-by-step execution. diff --git a/content/en/tutorials/ephemeral-application-previews/index.md b/content/en/tutorials/ephemeral-application-previews/index.md index 5765a410e7..f3c582d924 100644 --- a/content/en/tutorials/ephemeral-application-previews/index.md +++ b/content/en/tutorials/ephemeral-application-previews/index.md @@ -21,11 +21,17 @@ leadimage: "ephemeral-application-previews-banner.png" ## Introduction -LocalStack's core cloud emulator allows you set up your cloud infrastructure on your local machine. You can access databases, queues, and other managed services without needing to connect to a remote cloud provider. This speeds up your Software Development Life Cycle (SDLC) by making development and testing more efficient. Despite this, you still need a staging environment to do final acceptance tests before deploying your application to production. +LocalStack's core cloud emulator allows you set up your cloud infrastructure on your local machine. +You can access databases, queues, and other managed services without needing to connect to a remote cloud provider. +This speeds up your Software Development Life Cycle (SDLC) by making development and testing more efficient. +Despite this, you still need a staging environment to do final acceptance tests before deploying your application to production. -In many cases, staging environments are costly and deploying changes to them takes a lot of time. Also, teams can only use one staging environment at a time, which makes it difficult to test changes quickly. +In many cases, staging environments are costly and deploying changes to them takes a lot of time. +Also, teams can only use one staging environment at a time, which makes it difficult to test changes quickly. -With LocalStack's Ephemeral Instances, you can create short-lived, self-contained deployments of LocalStack in the cloud. These Ephemeral Instances let you deploy your application on a remote LocalStack container, creating an Application Preview. This allows you to run end-to-end tests, preview features, and collaborate within your team or across teams asynchronously. +With LocalStack's Ephemeral Instances, you can create short-lived, self-contained deployments of LocalStack in the cloud. +These Ephemeral Instances let you deploy your application on a remote LocalStack container, creating an Application Preview. +This allows you to run end-to-end tests, preview features, and collaborate within your team or across teams asynchronously. This tutorial will show you how to use LocalStack's Ephemeral Instance feature to generate an Application Preview automatically for every new Pull Request (PR) using a GitHub Action workflow. @@ -36,19 +42,21 @@ This tutorial will show you how to use LocalStack's Ephemeral Instance feature t ## Tutorial: Setting up Application Previews for your cloud application -This tutorial uses a [public LocalStack sample](https://github.com/localstack-samples/sample-notes-app-dynamodb-lambda-apigateway) to showcase a simple note-taking application using the modular AWS SDK for JavaScript. The example application deploys several AWS resources including DynamoDB, Lambda, API Gateway, S3, Cognito, and CloudFront, functioning as follows: +This tutorial uses a [public LocalStack sample](https://github.com/localstack-samples/sample-notes-app-dynamodb-lambda-apigateway) to showcase a simple note-taking application using the modular AWS SDK for JavaScript. +The example application deploys several AWS resources including DynamoDB, Lambda, API Gateway, S3, Cognito, and CloudFront, functioning as follows: -- Five Lambda functions handle basic CRUD functionality around note entities. -- The frontend is built with React and served via Cloudfront and an S3 bucket. -- DynamoDB is used as a persistence layer to store the notes. -- API Gateway exposes the Lambda functions through HTTP APIs. -- A Cognito User Pool is used for Authentication and Authorization. +- Five Lambda functions handle basic CRUD functionality around note entities. +- The frontend is built with React and served via Cloudfront and an S3 bucket. +- DynamoDB is used as a persistence layer to store the notes. +- API Gateway exposes the Lambda functions through HTTP APIs. +- A Cognito User Pool is used for Authentication and Authorization. This tutorial guides you through setting up a GitHub Action workflow to create an Application Preview of the sample application by deploying it on an ephemeral instance. ### Create the GitHub Action workflow -GitHub Actions serves as a continuous integration and continuous delivery (CI/CD) platform, automating software development workflows directly from GitHub. It allows customization of actions and automation throughout the software development lifecycle. +GitHub Actions serves as a continuous integration and continuous delivery (CI/CD) platform, automating software development workflows directly from GitHub. +It allows customization of actions and automation throughout the software development lifecycle. In this tutorial, you'll implement a workflow that: @@ -56,13 +64,15 @@ In this tutorial, you'll implement a workflow that: - Installs necessary dependencies. - Deploys the application on a ephemeral LocalStack Instance using a GitHub Action Runner to generate a sharable application preview. -To begin, fork the [LocalStack sample repository](https://github.com/localstack-samples/sample-notes-app-dynamodb-lambda-apigateway) on GitHub. If you're using GitHub's `gh` CLI, fork and clone the repository with this command: +To begin, fork the [LocalStack sample repository](https://github.com/localstack-samples/sample-notes-app-dynamodb-lambda-apigateway) on GitHub. +If you're using GitHub's `gh` CLI, fork and clone the repository with this command: -```bash +```bash gh repo fork https://github.com/localstack-samples/sample-notes-app-dynamodb-lambda-apigateway ``` -After forking and cloning, navigate to the `.github/workflows` directory in your forked repository and open the `preview.yml` file. This file will contain the GitHub Action workflow configuration. +After forking and cloning, navigate to the `.github/workflows` directory in your forked repository and open the `preview.yml` file. +This file will contain the GitHub Action workflow configuration. Now you're set to create your GitHub Action workflow, which will deploy your cloud application on an ephemeral instance using LocalStack. @@ -70,13 +80,13 @@ Now you're set to create your GitHub Action workflow, which will deploy your clo To achieve the goal, you can utilize a few prebuilt Actions: -- [`actions/checkout`](https://github.com/actions/checkout): Checkout the application code with Git. -- [`setup-localstack/ephemeral/startup`](https://github.com/localstack/setup-localstack): Configure the workflow to generate the application preview. -- [`LocalStack/setup-localstack/finish`](https://github.com/localstack/setup-localstack): Add a comment to the PR, which includes a URL to the application preview. +- [`actions/checkout`](https://github.com/actions/checkout): Checkout the application code with Git. +- [`setup-localstack/ephemeral/startup`](https://github.com/localstack/setup-localstack): Configure the workflow to generate the application preview. +- [`LocalStack/setup-localstack/finish`](https://github.com/localstack/setup-localstack): Add a comment to the PR, which includes a URL to the application preview. You will find the following content to the `preview.yml` file that you opened earlier: -```yaml +```yaml name: Create PR Preview on: @@ -88,7 +98,7 @@ This configuration ensures that every time a pull request is raised, the action A new job named `preview` specifies the GitHub-hosted runner to execute our workflow steps, while checking out the code: -```yaml +```yaml jobs: preview: permissions: write-all @@ -103,13 +113,13 @@ jobs: To deploy the application preview, you can utilize the `LocalStack/setup-localstack/ephemeral/startup` action, which requires the following parameters: -- `github-token`: Automatically configured on the GitHub Action runner. -- `localstack-api-key`: Configuration of a LocalStack CI key (`LOCALSTACK_API_KEY`) to activate licensed features in LocalStack. -- `preview-cmd`: The set of commands necessary to deploy the application, including its infrastructure, on LocalStack. +- `github-token`: Automatically configured on the GitHub Action runner. +- `localstack-api-key`: Configuration of a LocalStack CI key (`LOCALSTACK_API_KEY`) to activate licensed features in LocalStack. +- `preview-cmd`: The set of commands necessary to deploy the application, including its infrastructure, on LocalStack. The following step sets up the dependencies and deploys the application preview on an ephemeral LocalStack instance: -```yaml +```yaml - name: Deploy Preview uses: LocalStack/setup-localstack/ephemeral/startup@v0.2.2 with: @@ -131,14 +141,15 @@ The following step sets up the dependencies and deploys the application preview In the provided workflow: -- Dependencies such as `awslocal`, AWS CDK library, and the `cdklocal` wrapper are installed. -- `Makefile` targets are employed to build the application, bootstrap the CDK stack, and deploy it. -- Additionally, the frontend application is built and deployed on an S3 bucket served via a CloudFront distribution. -- The application preview URL is provided by querying the CloudFront distribution ID using `awslocal`. +- Dependencies such as `awslocal`, AWS CDK library, and the `cdklocal` wrapper are installed. +- `Makefile` targets are employed to build the application, bootstrap the CDK stack, and deploy it. +- Additionally, the frontend application is built and deployed on an S3 bucket served via a CloudFront distribution. +- The application preview URL is provided by querying the CloudFront distribution ID using `awslocal`. -To complete the process, the last step attaches the application preview URL to the Pull Request (PR) as a comment. This allows for quick access to the deployed URL for validating features or enhancements pushed to your application. +To complete the process, the last step attaches the application preview URL to the Pull Request (PR) as a comment. +This allows for quick access to the deployed URL for validating features or enhancements pushed to your application. -```yaml +```yaml - name: Finalize PR comment uses: LocalStack/setup-localstack/finish@v0.2.2 with: @@ -149,24 +160,27 @@ To complete the process, the last step attaches the application preview URL to t ### Configure a CI key for GitHub Actions -Before triggering your workflow, set up a continuous integration (CI) key for LocalStack. LocalStack requires a CI Key for usage in CI or similar automated environments to activate licensed features. +Before triggering your workflow, set up a continuous integration (CI) key for LocalStack. +LocalStack requires a CI Key for usage in CI or similar automated environments to activate licensed features. Follow these steps to add your LocalStack CI key to your forked GitHub repository: -- Navigate to the [LocalStack Web Application](https://app.localstack.cloud/) and access the [CI Keys](https://app.localstack.cloud/workspace/ci-keys) page. -- Scroll down to the **Generate CI Key** card, where you can provide a name, and click **Generate CI Key** to receive a new key. -- In your [GitHub repository secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions), set the **Name** as `LOCALSTACK_API_KEY` and the **Secret** as the CI Key. +- Navigate to the [LocalStack Web Application](https://app.localstack.cloud/) and access the [CI Keys](https://app.localstack.cloud/workspace/ci-keys) page. +- Scroll down to the **Generate CI Key** card, where you can provide a name, and click **Generate CI Key** to receive a new key. +- In your [GitHub repository secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions), set the **Name** as `LOCALSTACK_API_KEY` and the **Secret** as the CI Key. Now, you can commit and push your workflow to your forked GitHub repository. ### Run the GitHub Action workflow -Now that the GitHub Action Workflow is set up, each pull request in your cloud application will undergo building, deployment, and packaging as an application preview running within an ephemeral instance. The workflow will automatically update the application preview whenever new commits are pushed to the pull request. +Now that the GitHub Action Workflow is set up, each pull request in your cloud application will undergo building, deployment, and packaging as an application preview running within an ephemeral instance. +The workflow will automatically update the application preview whenever new commits are pushed to the pull request. PR preview comment for every pull request

-In case your deployment encounters issues and fails on LocalStack, you can troubleshoot by incorporating additional steps to generate a diagnostics report. After downloading, you can visualize logs and environment variables using a tool like [`diapretty`](https://github.com/silv-io/diapretty): +In case your deployment encounters issues and fails on LocalStack, you can troubleshoot by incorporating additional steps to generate a diagnostics report. +After downloading, you can visualize logs and environment variables using a tool like [`diapretty`](https://github.com/silv-io/diapretty): ```yaml - name: Generate a Diagnostic Report @@ -183,7 +197,8 @@ In case your deployment encounters issues and fails on LocalStack, you can troub ## Conclusion -In this tutorial, you've learned how to utilize LocalStack's Ephemeral Instances to generate application previews for your cloud applications. You can explore additional use cases with Ephemeral Instances, including: +In this tutorial, you've learned how to utilize LocalStack's Ephemeral Instances to generate application previews for your cloud applications. +You can explore additional use cases with Ephemeral Instances, including: - Injecting a pre-defined Cloud Pod into an ephemeral instance to rapidly spin up infrastructure. - Running your automated end-to-end (E2E) test suite to conduct thorough testing before deploying to production. diff --git a/content/en/tutorials/fault-injection-service-experiments/index.md b/content/en/tutorials/fault-injection-service-experiments/index.md index 19b2541e7b..e53c89b207 100644 --- a/content/en/tutorials/fault-injection-service-experiments/index.md +++ b/content/en/tutorials/fault-injection-service-experiments/index.md @@ -28,14 +28,20 @@ leadimage: "fis-experiments.png" ## Introduction -Fault Injection Simulator (FIS) is a service designed for conducting controlled chaos engineering tests on AWS infrastructure. Its purpose is to uncover vulnerabilities and improve system robustness. FIS offers a means to deliberately introduce failures and observe their impacts, helping developers to better equip their systems against actual outages. To read about the FIS service, refer to the dedicated [FIS documentation](https://docs.localstack.cloud/user-guide/aws/fis/). +Fault Injection Simulator (FIS) is a service designed for conducting controlled chaos engineering tests on AWS infrastructure. +Its purpose is to uncover vulnerabilities and improve system robustness. +FIS offers a means to deliberately introduce failures and observe their impacts, helping developers to better equip their systems against actual outages. +To read about the FIS service, refer to the dedicated [FIS documentation](https://docs.localstack.cloud/user-guide/aws/fis/). ## Getting started This tutorial is designed for users new to the Fault Injection Simulator and assumes basic knowledge of the AWS CLI and our -[`awslocal`](https://github.com/localstack/awscli-local) wrapper script. In this example, we will use the FIS to create controlled outages in a DynamoDB database. The aim is to test the software's behavior and error handling capabilities. +[`awslocal`](https://github.com/localstack/awscli-local) wrapper script. +In this example, we will use the FIS to create controlled outages in a DynamoDB database. +The aim is to test the software's behavior and error handling capabilities. -For this particular example, we'll be using a [sample application repository](https://github.com/localstack-samples/samples-chaos-engineering/tree/main/FIS-experiments). Clone the repository, and follow the instructions below to get started. +For this particular example, we'll be using a [sample application repository](https://github.com/localstack-samples/samples-chaos-engineering/tree/main/FIS-experiments). +Clone the repository, and follow the instructions below to get started. ### Prerequisites @@ -45,7 +51,9 @@ The general prerequisites for this guide are: - [AWS CLI]({{}}) with the [`awslocal` wrapper]({{}}) - [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) -Start LocalStack by using the `docker-compose.yml` file from the repository. Ensure to set your Auth Token as an environment variable during this process. The cloud resources will be automatically created upon the LocalStack start. +Start LocalStack by using the `docker-compose.yml` file from the repository. +Ensure to set your Auth Token as an environment variable during this process. +The cloud resources will be automatically created upon the LocalStack start. {{< command >}} $ LOCALSTACK_AUTH_TOKEN= @@ -60,7 +68,9 @@ The following diagram shows the architecture that this application builds and de ### Creating an experiment template -Before starting any FIS experiments, it's important to verify that our application is functioning correctly. Start by creating an entity and saving it. To do this, use `cURL` to call the API Gateway endpoint for the POST method: +Before starting any FIS experiments, it's important to verify that our application is functioning correctly. +Start by creating an entity and saving it. +To do this, use `cURL` to call the API Gateway endpoint for the POST method: {{< command >}} $ curl --location 'http://12345.execute-api.localhost.localstack.cloud:4566/dev/productApi' \ @@ -69,14 +79,16 @@ $ curl --location 'http://12345.execute-api.localhost.localstack.cloud:4566/dev/ "id": "prod-2004", "name": "Ultimate Gadget", "price": "49.99", - "description": "The Ultimate Gadget is the perfect tool for tech enthusiasts looking for the next level in gadgetry. Compact, powerful, and loaded with features." + "description": "The Ultimate Gadget is the perfect tool for tech enthusiasts looking for the next level in gadgetry. +Compact, powerful, and loaded with features." }' Product added/updated successfully. {{< /command >}} -You can use the file named `experiment-ddb.json` that contains the FIS experiment configuration. This file will be used in the upcoming call to the [`CreateExperimentTemplate`](https://docs.aws.amazon.com/fis/latest/APIReference/API_CreateExperimentTemplate.html) API within the FIS resource. +You can use the file named `experiment-ddb.json` that contains the FIS experiment configuration. +This file will be used in the upcoming call to the [`CreateExperimentTemplate`](https://docs.aws.amazon.com/fis/latest/APIReference/API_CreateExperimentTemplate.html) API within the FIS resource. ```bash $ cat experiment-ddb.json @@ -101,7 +113,8 @@ $ cat experiment-ddb.json } ``` -This template is designed to target all APIs of the DynamoDB resource. While it's possible to specify particular operations like `PutItem` or `GetItem`, the objective here is to entirely disconnect the database. +This template is designed to target all APIs of the DynamoDB resource. +While it's possible to specify particular operations like `PutItem` or `GetItem`, the objective here is to entirely disconnect the database. As a result, this configuration will cause all API calls to fail with a 100% failure rate, each resulting in an HTTP 500 status code and a `DynamoDbException`. @@ -132,12 +145,13 @@ $ awslocal fis create-experiment-template --cli-input-json file://experiment-ddb "creationTime": 1699308754.415716, "lastUpdateTime": 1699308754.415716, "roleArn": "arn:aws:iam:000000000000:role/ExperimentRole" - } + } } {{}} -Take note of the `id` field in the response. This is the ID of the experiment template that will be used in the next step. +Take note of the `id` field in the response. +This is the ID of the experiment template that will be used in the next step. ### Starting the experiment @@ -182,7 +196,9 @@ Replace the `` placeholder with the ID of the experiment ### Simulating an outage -Once the experiment starts, the database becomes inaccessible. This means users cannot retrieve or add new products, resulting in the API Gateway returning an Internal Server Error. Downtime and data loss are critical issues to avoid in enterprise applications. +Once the experiment starts, the database becomes inaccessible. +This means users cannot retrieve or add new products, resulting in the API Gateway returning an Internal Server Error. +Downtime and data loss are critical issues to avoid in enterprise applications. Fortunately, encountering this issue early in the development phase allows developers to implement effective error handling and develop mechanisms to prevent data loss during a database outage. @@ -192,7 +208,9 @@ It's important to note that this approach is not limited to DynamoDB; outages ca {{< figure src="fis-experiment-2.png" width="800">}} -A possible solution involves setting up an SNS topic, an SQS queue, and a Lambda function. The Lambda function will be responsible for retrieving queued items and attempting to re-execute the `PutItem` operation on the database. If DynamoDB remains unavailable, the item will be placed back in the queue for a later retry. +A possible solution involves setting up an SNS topic, an SQS queue, and a Lambda function. +The Lambda function will be responsible for retrieving queued items and attempting to re-execute the `PutItem` operation on the database. +If DynamoDB remains unavailable, the item will be placed back in the queue for a later retry. {{< command >}} $ curl --location 'http://12345.execute-api.localhost.localstack.cloud:4566/dev/productApi' \ @@ -201,10 +219,12 @@ $ curl --location 'http://12345.execute-api.localhost.localstack.cloud:4566/dev/ "id": "prod-1003", "name": "Super Widget", "price": "29.99", - "description": "A versatile widget that can be used for a variety of purposes. Durable, reliable, and affordable." + "description": "A versatile widget that can be used for a variety of purposes. +Durable, reliable, and affordable." }' - -A DynamoDB error occurred. Message sent to queue. + +A DynamoDB error occurred. +Message sent to queue. {{< /command >}} @@ -263,7 +283,8 @@ $ awslocal fis stop-experiment --id Replace the `` placeholder with the ID of the experiment that was created in the previous step. -The experiment has been terminated, allowing the Product that initially failed to reach the database to finally be stored successfully. This can be confirmed by scanning the database. +The experiment has been terminated, allowing the Product that initially failed to reach the database to finally be stored successfully. +This can be confirmed by scanning the database. {{< command >}} $ awslocal dynamodb scan --table-name Products @@ -275,7 +296,8 @@ $ awslocal dynamodb scan --table-name Products "S": "Super Widget" }, "description": { - "S": "A versatile widget that can be used for a variety of purposes. Durable, reliable, and affordable." + "S": "A versatile widget that can be used for a variety of purposes. +Durable, reliable, and affordable." }, "id": { "S": "prod-1003" @@ -289,7 +311,8 @@ $ awslocal dynamodb scan --table-name Products "S": "Ultimate Gadget" }, "description": { - "S": "The Ultimate Gadget is the perfect tool for tech enthusiasts looking for the next level in gadgetry. Compact, powerful, and loaded with features." + "S": "The Ultimate Gadget is the perfect tool for tech enthusiasts looking for the next level in gadgetry. +Compact, powerful, and loaded with features." }, "id": { "S": "prod-2004" @@ -329,6 +352,7 @@ The LocalStack FIS service can also introduce latency using the following experi "roleArn": "arn:aws:iam:000000000000:role/ExperimentRole" } ``` + Save this template as `latency-experiment.json` and use it to create an experiment definition through the FIS service: {{< command >}} @@ -371,10 +395,10 @@ $ curl --location 'http://12345.execute-api.localhost.localstack.cloud:4566/dev/ "id": "prod-1088", "name": "Super Widget", "price": "29.99", - "description": "A versatile widget that can be used for a variety of purposes. Durable, reliable, and affordable." + "description": "A versatile widget that can be used for a variety of purposes. +Durable, reliable, and affordable." }' An error occurred (InternalError) when calling the GetResources operation (reached max retries: 4): Failing as per Fault Injection Simulator configuration {{< /command >}} - diff --git a/content/en/tutorials/gitlab_ci_testcontainers/index.md b/content/en/tutorials/gitlab_ci_testcontainers/index.md index a6c3cea14f..aeb69274bd 100644 --- a/content/en/tutorials/gitlab_ci_testcontainers/index.md +++ b/content/en/tutorials/gitlab_ci_testcontainers/index.md @@ -30,25 +30,29 @@ leadimage: "ls-gitlab-testcontainers.png" Testcontainers is an open-source framework that provides lightweight APIs for bootstrapping local development and test dependencies with real services wrapped in Docker containers. Running tests with Testcontainers and LocalStack is crucial for AWS-powered applications because it ensures each test runs in a clean, -isolated environment, providing consistency across all development and CI machines. LocalStack avoids AWS costs by emulating +isolated environment, providing consistency across all development and CI machines. +LocalStack avoids AWS costs by emulating services locally, preventing exceeding AWS free tier limits, and eliminates reliance on potentially unstable external AWS services. This allows for the simulation of difficult-to-reproduce scenarios, edge cases, and enables testing of the -entire application stack in an integrated manner. Testing with LocalStack and Testcontainers also integrates +entire application stack in an integrated manner. +Testing with LocalStack and Testcontainers also integrates seamlessly with CI/CD pipelines like GitLab CI or GitHub Actions, allowing developers to run automated tests without requiring AWS credentials or services. ## Prerequisites For this tutorial, you will need: -- [LocalStack Pro](https://docs.localstack.cloud/getting-started/auth-token/) to emulate the AWS services. If you don't have a subscription yet, you can just get a trial license for free. +- [LocalStack Pro](https://docs.localstack.cloud/getting-started/auth-token/) to emulate the AWS services. + If you don't have a subscription yet, you can just get a trial license for free. - [Docker](https://docker.io/) - [A GitLab account](https://gitlab.com/) ## GitLab overview GitLab is striving to be a complete tool for DevOps practices, offering not just source code management and continuous integration, but also features for -monitoring, security, planning, deploying and more. By having your code and CI on the same platform, workflows are simplified and collaboration is enhanced. -While Jenkins is still a very prominent CI/CD tool in the industry, it is up to the user to figure out where to host it and focuses +monitoring, security, planning, deploying and more. +By having your code and CI on the same platform, workflows are simplified and collaboration is enhanced. +While Jenkins is still a very prominent CI/CD tool in the industry, it is up to the user to figure out where to host it and focuses solely on CI/CD features. ## GitLab architecture @@ -57,11 +61,13 @@ solely on CI/CD features.
-As users, we only interact directly with a GitLab instance which is responsible for hosting the application code and all the needed configurations, including the -ones for pipelines. The instance is then in charge of running the pipelines and assigning runners to execute the defined jobs. +As users, we only interact directly with a GitLab instance which is responsible for hosting the application code and all the needed configurations, including the +ones for pipelines. +The instance is then in charge of running the pipelines and assigning runners to execute the defined jobs. -When running CI pipelines, you can choose to use [**GitLab-hosted runners**](https://docs.gitlab.com/ee/ci/runners/index.html), or provision and register -[**self-managed runners**](https://docs.gitlab.com/runner/install/docker.html). This tutorial will cover both. +When running CI pipelines, you can choose to use [**GitLab-hosted runners**](https://docs.gitlab.com/ee/ci/runners/index.html), or provision and register +[**self-managed runners**](https://docs.gitlab.com/runner/install/docker.html). +This tutorial will cover both. ### Runners hosted by GitLab @@ -69,31 +75,34 @@ The GitLab documentation highlights some key aspects about the provided runners: - They can run on Linux, Windows (beta) and MacOS (beta). - They are enabled by default for all projects, with no configuration required. -- Each job is executed by a newly provisioned VM. +- Each job is executed by a newly provisioned VM. - Job runs have `sudo` access without a password. -- VMs are isolated between job executions. +- VMs are isolated between job executions. - Their storage is shared by the operating system, the image with pre-installed software, and a copy of your cloned repository, meaning that the remaining disk space for jobs will be reduced. -- The runners are configured to run in privileged mode to support Docker in Docker to build images natively or +- The runners are configured to run in privileged mode to support Docker in Docker to build images natively or run multiple containers within each job. ### Self-hosted runners -Essentially, the architecture does not change, except the runners will be executing the jobs on a local machine. For developing locally, +Essentially, the architecture does not change, except the runners will be executing the jobs on a local machine. +For developing locally, this approach is very convenient and there are several benefits: -- **Customization**: you can configure the runners to suit your specific needs and environment. -- **Performance**: improved performance and faster builds by leveraging your own hardware. -- **Security**: enhanced control over your data and build environment, reducing exposure to external threats. +- **Customization**: you can configure the runners to suit your specific needs and environment. +- **Performance**: improved performance and faster builds by leveraging your own hardware. +- **Security**: enhanced control over your data and build environment, reducing exposure to external threats. - **Resource Management**: better management and allocation of resources to meet your project's demands. - **Cost Efficiency**: depending on your alternatives, you can avoid usage fees associated with cloud-hosted runners. - ## Application Overview -Our sample backend application stores information about different types of coffee in files, with descriptions stored in an S3 bucket. It utilizes two -Lambda functions to create/update and retrieve these descriptions, all accessible through an API Gateway. While we won't delve -into the details of creating these AWS resources, we'll use AWS CLI to initialize them during container startup using init hooks. You can +Our sample backend application stores information about different types of coffee in files, with descriptions stored in an S3 bucket. +It utilizes two +Lambda functions to create/update and retrieve these descriptions, all accessible through an API Gateway. +While we won't delve +into the details of creating these AWS resources, we'll use AWS CLI to initialize them during container startup using init hooks. +You can find the whole setup in the [init-resources.sh](https://gitlab.com/tinyg210/coffee-backend-localstack/-/blob/main/src/test/resources/init-resources.sh?ref_type=heads) file. The following diagram visually explains the simple workflows that we want to check in our automated test in CI, using Testcontainers. We'll need to make sure that the files are correctly created and named, that the validations and exceptions happen as expected. @@ -107,7 +116,7 @@ We'll need to make sure that the files are correctly created and named, that the To follow along, make changes to the code or run your own pipelines, you may fork the repository from the [coffee-backend-localstack sample](https://gitlab.com/tinyg210/coffee-backend-localstack).
-The application is developed, built and tested locally, the next step is to establish a quality gate in the pipeline, to make sure nothing breaks. +The application is developed, built and tested locally, the next step is to establish a quality gate in the pipeline, to make sure nothing breaks. The basis for the container used for testing looks like this: @@ -140,7 +149,8 @@ Here's a breakdown of what's important: - The image used for the test LocalStack instance is set to the latest Pro version (at the time of writing). - In order to use the Pro image, a `LOCALSTACK_AUTH_TOKEN` variable needs to be set and read from the environment. - There are two files copied to the container before startup: the JAR file for the Lambda functions and the script for provisioning -all the necessary AWS resources. Both files are copied with read/write/execute permissions. +all the necessary AWS resources. + Both files are copied with read/write/execute permissions. - `DEBUG=1` enables a more verbose logging of LocalStack. - `LAMBDA_DOCKER_FLAGS` sets specific Testcontainers labels to the Lambda containers, as a solution to be correctly managed by Ryuk. Since the compute containers are created by LocalStack and not the Testcontainers framework, they do not receive the necessary tags. @@ -149,11 +159,13 @@ Since the compute containers are created by LocalStack and not the Testcontainer {{< alert title="Sidenote" >}} -Ryuk is a component of Testcontainers that helps manage and clean up Docker resources created during testing. Specifically, Ryuk +Ryuk is a component of Testcontainers that helps manage and clean up Docker resources created during testing. +Specifically, Ryuk ensures that any Docker containers, networks, volumes, and other resources are properly removed when they are no longer needed. This prevents resource leaks and ensures that the testing environment remains clean and consistent between test runs. -When Testcontainers starts, it typically launches a Ryuk container in the background. This container continuously monitors +When Testcontainers starts, it typically launches a Ryuk container in the background. +This container continuously monitors the Docker resources created by Testcontainers and removes them once the test execution is complete or if they are no longer in use. {{< /alert >}} @@ -163,10 +175,14 @@ For this tutorial you don't really need to dive into the specifics of the tests, ### Setting up the pipeline configuration The `.gitlab-ci.yml` file is a configuration file for defining GitLab CI/CD pipelines, which automate the process of building, testing, -and deploying applications. It specifies stages (such as build, test, and deploy) and the jobs within each stage, detailing the commands -to be executed. Jobs can define dependencies, artifacts, and environment variables. Pipelines are triggered by events like code pushes, +and deploying applications. +It specifies stages (such as build, test, and deploy) and the jobs within each stage, detailing the commands +to be executed. +Jobs can define dependencies, artifacts, and environment variables. +Pipelines are triggered by events like code pushes, merge requests, or schedules, and they are executed by runners. -This file enables automated, consistent, and repeatable workflows for software development and deployment. In this example we will focus on +This file enables automated, consistent, and repeatable workflows for software development and deployment. +In this example we will focus on just the building and testing parts. Let's break down the `.gitlab-ci.yml` for this project: @@ -223,17 +239,20 @@ test_job: ``` - `image: ubuntu:latest` - This specifies the base Docker image used for all jobs in the pipeline. `ubuntu:latest` is a popular and -easy choice because it's a well-known, stable, and widely-supported Linux distribution. It ensures a consistent environment across -all pipeline stages. Each job can define its own image (for example `maven` or `docker` images), but in this case a generic image with the +easy choice because it's a well-known, stable, and widely-supported Linux distribution. + It ensures a consistent environment across +all pipeline stages. + Each job can define its own image (for example `maven` or `docker` images), but in this case a generic image with the necessary dependencies (curl, Java, maven, docker) installed covers the needs for both stages. - `before_script` - these commands are run before any job script in the pipeline, on top of the Ubuntu image. - The two stages are defined at the top: `build` and `test`. -- `cache` - caches the Maven dependencies to speed up subsequent pipeline runs. +- `cache` - caches the Maven dependencies to speed up subsequent pipeline runs. - `.m2/repository` - this is the default location where Maven stores its local repository of dependencies. - The `script` section - specifies the scripts that run for each job. - `artifacts` - specifies the build artifacts (e.g., JAR files) to be preserved and passed to the next stages (the `target` folder). - The build job runs only on the `main` branch. -- `docker:26.1.2-dind` - specifies the service necessary to use Docker-in-Docker to run Docker commands inside the pipeline job. This is +- `docker:26.1.2-dind` - specifies the service necessary to use Docker-in-Docker to run Docker commands inside the pipeline job. + This is useful for integration testing with Docker containers. - Variables: - `DOCKER_HOST: tcp://docker:2375` - sets the Docker host to communicate with the Docker daemon inside the dind service. @@ -243,26 +262,34 @@ useful for integration testing with Docker containers. ### Executors -We mentioned in the beginning that each job runs in a newly provisioned VM. You can also notice that the pipeline configuration mentions -a docker image, which is a template that contains instructions for creating a container. This might look confusing, but a runner is responsible -for the execution of one job. This runner is installed on a machine and implements -a certain [executor](https://docs.gitlab.com/runner/executors/). The executor determines the environment in which the job runs. By -default, the GitLab-managed runners use a Docker Machine executor. Some other available executor options are: SSH, Shell, Parallels, +We mentioned in the beginning that each job runs in a newly provisioned VM. +You can also notice that the pipeline configuration mentions +a docker image, which is a template that contains instructions for creating a container. +This might look confusing, but a runner is responsible +for the execution of one job. +This runner is installed on a machine and implements +a certain [executor](https://docs.gitlab.com/runner/executors/). +The executor determines the environment in which the job runs. +By +default, the GitLab-managed runners use a Docker Machine executor. +Some other available executor options are: SSH, Shell, Parallels, VirtualBox, Docker, Docker Autoscaler, Kubernetes. Sometimes visualizing the components of a pipeline can be tricky, so let's simplify this into a diagram: {{< figure src="gitlab-ci-diagram.png" width="80%" height="auto">}} -Basically, the `service` is an additional container that starts at the same time as the one running the `test_job`. The job container has +Basically, the `service` is an additional container that starts at the same time as the one running the `test_job`. +The job container has a Docker client, and it communicates with the Docker daemon, running in the service container, in order to spin up more containers, in this case for the Lambda functions. -Don't forget to add your `LOCALSTACK_AUTH_TOKEN` as a masked variable in your CI/CD settings. +Don't forget to add your `LOCALSTACK_AUTH_TOKEN` as a masked variable in your CI/CD settings. ```vue Settings -> CI/CD -> Expand the Variables section -> Add variable ``` + {{< figure src="ci-variable.png" width="80%" height="auto">}} In the web interface, under the Jobs section, you can see the jobs that ran, and you can also filter them based on their status. @@ -271,11 +298,13 @@ In the web interface, under the Jobs section, you can see the jobs that ran, and ## CI Pipeline Using Self-hosted Runners -There are some cases when you want to run your pipelines locally and GitLab can provide that functionality. -If you're new to the GitLab ecosystem, you need to be careful in configuring this setup, because it's easy to overlook an important field which +There are some cases when you want to run your pipelines locally and GitLab can provide that functionality. +If you're new to the GitLab ecosystem, you need to be careful in configuring this setup, because it's easy to overlook an important field which can hinder your job runs. -Let's get started by using the web interface. In your GitLab project, in the left-hand side panel, follow the path: +Let's get started by using the web interface. +In your GitLab project, in the left-hand side panel, follow the path: + ```vue Settings -> CI/CD -> Expand the Runners section -> Project runners -> New project runner ``` @@ -292,15 +321,22 @@ This dashboard may suffer changes and improvements over time, but the attributes {{< figure src="create-runner-2.png" width="80%" height="auto">}} -After selecting the Linux machine you're done with defining the runner. Now you need a place to execute this runner, which will be your local -computer. Notice the token in the first step command and save it for later. Runner authentication tokens have the prefix `glrt-`. +After selecting the Linux machine you're done with defining the runner. +Now you need a place to execute this runner, which will be your local +computer. +Notice the token in the first step command and save it for later. +Runner authentication tokens have the prefix `glrt-`. -For simplicity, we'll use a GitLab Runner Docker image. The GitLab Runner Docker images are designed as wrappers around the standard -`gitlab-runner` command, like if GitLab Runner was installed directly on the host. You can read more about it in the [GitLab documentation](https://docs.gitlab.com/runner/install/docker.html). +For simplicity, we'll use a GitLab Runner Docker image. +The GitLab Runner Docker images are designed as wrappers around the standard +`gitlab-runner` command, like if GitLab Runner was installed directly on the host. +You can read more about it in the [GitLab documentation](https://docs.gitlab.com/runner/install/docker.html). -Make sure you have Docker installed. To verify your setup you can run the `docker info` command. +Make sure you have Docker installed. +To verify your setup you can run the `docker info` command. -Now, you need to create a volume on the disk that holds the configuration for the runner. You can have different volumes that can be +Now, you need to create a volume on the disk that holds the configuration for the runner. +You can have different volumes that can be used for different runners. {{}} @@ -352,14 +388,16 @@ Configuration loaded builds=0 max_builds=1 ``` Let's look at the `config.toml` file and make the final adjustment before successfully running the pipeline. -For running a job that does not require any additional containers to be created, you can stop here. However, since -we need to run Docker commands in our CI/CD jobs, we must configure GitLab Runner to support those commands. +For running a job that does not require any additional containers to be created, you can stop here. +However, since +we need to run Docker commands in our CI/CD jobs, we must configure GitLab Runner to support those commands. This method requires `privileged` mode. -Let's use the current running container to do that. Run the following: +Let's use the current running container to do that. +Run the following: ```commandline -$ docker exec -it gitlab-runner bin/bash +docker exec -it gitlab-runner bin/bash ``` Inside the container, let's run: @@ -384,7 +422,8 @@ $ apt update && apt install nano $ nano config.toml {{}} -The `privileged` field needs to be changed to `true`. Now the configurations should look like this: +The `privileged` field needs to be changed to `true`. +Now the configurations should look like this: ```toml connection_max_age = "15m0s" @@ -419,13 +458,16 @@ shutdown_timeout = 0 network_mtu = 0 ``` -`[CTRL] + [X]` to save and exit the file. The runner is ready to use. You can now run your pipeline by pushing changes to your project +`[CTRL] + [X]` to save and exit the file. +The runner is ready to use. +You can now run your pipeline by pushing changes to your project or from the dashboard, by going to `Build -> Pipelines` and using the `Run pipeline` button. ## Conclusion In this tutorial, we've covered setting up a CI pipeline with GitLab runners and configuring a local Docker container to run the pipeline -using a self-configured GitLab runner. Overall, the GitLab platform is an intricate system that can be used for highly complex projects to serve -a multitude of purposes. With the steps learnt in this article, you can efficiently run end-to-end tests for your application using Testcontainers +using a self-configured GitLab runner. +Overall, the GitLab platform is an intricate system that can be used for highly complex projects to serve +a multitude of purposes. +With the steps learnt in this article, you can efficiently run end-to-end tests for your application using Testcontainers and LocalStack. - diff --git a/content/en/tutorials/iam-policy-stream/index.md b/content/en/tutorials/iam-policy-stream/index.md index 0c6c97dc83..6a7bba1f4c 100644 --- a/content/en/tutorials/iam-policy-stream/index.md +++ b/content/en/tutorials/iam-policy-stream/index.md @@ -20,32 +20,45 @@ platform: ## Introduction -When you're developing cloud and serverless applications, you need to grant access to various AWS resources like S3 buckets and RDS databases. To handle this, you create IAM roles and assign permissions through policies. However, configuring these policies can be challenging, especially if you want to ensure minimal access of all principals to your resources. +When you're developing cloud and serverless applications, you need to grant access to various AWS resources like S3 buckets and RDS databases. +To handle this, you create IAM roles and assign permissions through policies. +However, configuring these policies can be challenging, especially if you want to ensure minimal access of all principals to your resources. -[LocalStack IAM Policy Stream](https://app.localstack.cloud/policy-stream) automates the generation of IAM policies for your AWS API requests on your local machine. This stream helps you identify the necessary permissions for your cloud application and allows you to detect logical errors, such as unexpected actions in your policies. +[LocalStack IAM Policy Stream](https://app.localstack.cloud/policy-stream) automates the generation of IAM policies for your AWS API requests on your local machine. +This stream helps you identify the necessary permissions for your cloud application and allows you to detect logical errors, such as unexpected actions in your policies. -This tutorial will guide you through setting up IAM Policy Stream for a locally running AWS application. We'll use a basic example involving an S3 bucket, an SQS queue, and a bucket notification configuration. You'll generate the policy for the bucket notification configuration and insert it into the SQS queue. +This tutorial will guide you through setting up IAM Policy Stream for a locally running AWS application. +We'll use a basic example involving an S3 bucket, an SQS queue, and a bucket notification configuration. +You'll generate the policy for the bucket notification configuration and insert it into the SQS queue. ## Why use IAM Policy Stream? -LocalStack enables you to create and enforce local IAM roles and policies using the [`ENFORCE_IAM` feature](https://docs.localstack.cloud/user-guide/security-testing/iam-enforcement/). However, users often struggle to figure out the necessary permissions for different actions. It's important to find a balance, avoiding giving too many permissions while making sure the right ones are granted. +LocalStack enables you to create and enforce local IAM roles and policies using the [`ENFORCE_IAM` feature](https://docs.localstack.cloud/user-guide/security-testing/iam-enforcement/). +However, users often struggle to figure out the necessary permissions for different actions. +It's important to find a balance, avoiding giving too many permissions while making sure the right ones are granted. -This challenge becomes more complex when dealing with AWS services that make requests not directly visible to users. For instance, if an SNS topic sends a message to an SQS queue and the underlying call fails, there might be no clear error message, causing confusion, especially for those less familiar with the services. +This challenge becomes more complex when dealing with AWS services that make requests not directly visible to users. +For instance, if an SNS topic sends a message to an SQS queue and the underlying call fails, there might be no clear error message, causing confusion, especially for those less familiar with the services. -IAM Policy Stream simplifies this by automatically generating the needed policies and showing them to users. This makes it easier to integrate with resources, roles, and users, streamlining the development process. Additionally, it serves as a useful learning tool, helping users understand the permissions linked to various AWS calls and improving the onboarding experience for newcomers to AWS. +IAM Policy Stream simplifies this by automatically generating the needed policies and showing them to users. +This makes it easier to integrate with resources, roles, and users, streamlining the development process. +Additionally, it serves as a useful learning tool, helping users understand the permissions linked to various AWS calls and improving the onboarding experience for newcomers to AWS. ## Prerequisites -- [LocalStack CLI](https://docs.localstack.cloud/getting-started/installation/#localstack-cli) with [`LOCALSTACK_AUTH_TOKEN`](https://docs.localstack.cloud/getting-started/auth-token/) -- [Docker](https://docs.docker.com/get-docker/) -- [Terraform](https://developer.hashicorp.com/terraform/install) & [`tflocal` wrapper](https://github.com/localstack/terraform-local) -- [AWS](https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-install.html) CLI with [`awslocal` wrapper](https://github.com/localstack/awscli-local) -- [LocalStack Web Application account](https://app.localstack.cloud/sign-up) -- [`jq`](https://jqlang.github.io/jq/download/) +- [LocalStack CLI](https://docs.localstack.cloud/getting-started/installation/#localstack-cli) with [`LOCALSTACK_AUTH_TOKEN`](https://docs.localstack.cloud/getting-started/auth-token/) +- [Docker](https://docs.docker.com/get-docker/) +- [Terraform](https://developer.hashicorp.com/terraform/install) & [`tflocal` wrapper](https://github.com/localstack/terraform-local) +- [AWS](https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-install.html) CLI with [`awslocal` wrapper](https://github.com/localstack/awscli-local) +- [LocalStack Web Application account](https://app.localstack.cloud/sign-up) +- [`jq`](https://jqlang.github.io/jq/download/) ## Tutorial: Configure an S3 bucket for event notifications using SQS -In this tutorial, you will configure a LocalStack S3 bucket to send event notifications to an SQS queue. You will then use IAM Policy Stream to generate the necessary IAM policy for the SQS queue. You will use Terraform to create the resources and the AWS CLI to interact with them. With LocalStack's IAM enforcement enabled, you can thoroughly test your policy and ensure that the development setup mirrors the production environment. +In this tutorial, you will configure a LocalStack S3 bucket to send event notifications to an SQS queue. +You will then use IAM Policy Stream to generate the necessary IAM policy for the SQS queue. +You will use Terraform to create the resources and the AWS CLI to interact with them. +With LocalStack's IAM enforcement enabled, you can thoroughly test your policy and ensure that the development setup mirrors the production environment. ### Start your LocalStack container @@ -57,12 +70,13 @@ $ DEBUG=1 IAM_SOFT_MODE=1 localstack start In the above command: -- `DEBUG=1` turns on detailed logging to check API calls and IAM violations. -- `IAM_SOFT_MODE=1` lets you test IAM enforcement by logging violations without stopping the API calls. +- `DEBUG=1` turns on detailed logging to check API calls and IAM violations. +- `IAM_SOFT_MODE=1` lets you test IAM enforcement by logging violations without stopping the API calls. ### Create the Terraform configuration -Create a new file called `main.tf` for the Terraform setup of an S3 bucket and an SQS queue. Start by using the `aws_sqs_queue` resource to create an SQS queue named `s3-event-notification-queue`. +Create a new file called `main.tf` for the Terraform setup of an S3 bucket and an SQS queue. +Start by using the `aws_sqs_queue` resource to create an SQS queue named `s3-event-notification-queue`. ```hcl resource "aws_sqs_queue" "queue" { @@ -93,14 +107,18 @@ resource "aws_s3_bucket_notification" "bucket_notification" { ### Deploy the Terraform configuration -You can use `tflocal` to deploy your Terraform configuration within the LocalStack environment. Run the following commands to initialize and apply the Terraform configuration: +You can use `tflocal` to deploy your Terraform configuration within the LocalStack environment. +Run the following commands to initialize and apply the Terraform configuration: {{< command >}} $ tflocal init $ tflocal apply {{< /command >}} -You will be prompted to confirm the changes. Type `yes` to continue. Since LocalStack is used, no real AWS resources are created. LocalStack will emulate ephemeral development resources that will be removed automatically once you stop the LocalStack container. +You will be prompted to confirm the changes. +Type `yes` to continue. +Since LocalStack is used, no real AWS resources are created. +LocalStack will emulate ephemeral development resources that will be removed automatically once you stop the LocalStack container. After applying the Terraform configuration, the output will appear similar to this: @@ -118,12 +136,14 @@ Apply complete! Resources: 3 added, 0 changed, 0 destroyed. ### Start the IAM Policy Stream -Access the [LocalStack Web Application](https://app.localstack.cloud/) and go to the [IAM Policy Stream dashboard](https://app.localstack.cloud/policy-stream). This feature enables you to directly examine the generated policies, displaying the precise permissions required for each API call. +Access the [LocalStack Web Application](https://app.localstack.cloud/) and go to the [IAM Policy Stream dashboard](https://app.localstack.cloud/policy-stream). +This feature enables you to directly examine the generated policies, displaying the precise permissions required for each API call. IAM Policy Stream dashboard

-You'll observe the Stream active status icon, indicating that making any local AWS API request will trigger the generation of an IAM Policy. Now, let's proceed to upload a file to the S3 bucket to trigger the event notification and generate the IAM policy. +You'll observe the Stream active status icon, indicating that making any local AWS API request will trigger the generation of an IAM Policy. +Now, let's proceed to upload a file to the S3 bucket to trigger the event notification and generate the IAM policy. ### Trigger the event notification @@ -134,7 +154,8 @@ $ echo "Hello, LocalStack" > some-log-file.log $ awslocal s3 cp some-log-file.log s3://s3-event-notification-bucket/ {{< /command >}} -Uploading a file will activate an event notification, sending a message to the SQS queue. However, since the SQS queue lacks the necessary permissions, an IAM violation will appear in the [IAM Policy Stream dashboard](https://app.localstack.cloud/policy-stream). +Uploading a file will activate an event notification, sending a message to the SQS queue. +However, since the SQS queue lacks the necessary permissions, an IAM violation will appear in the [IAM Policy Stream dashboard](https://app.localstack.cloud/policy-stream). IAM Policy Stream showcasing an IAM violation

@@ -151,12 +172,15 @@ You can also navigate to the LocalStack logs and observe the IAM violation messa ### Generate the IAM policy -Go to the IAM Policy Stream dashboard and review the API calls such as `PutObject`, `SendMessage`, and `ReceiveMessage`. Notice that the `SendMessage` call was denied due to an IAM violation. Click on the **SQS.SendMessage** action to see the suggested IAM policy. +Go to the IAM Policy Stream dashboard and review the API calls such as `PutObject`, `SendMessage`, and `ReceiveMessage`. +Notice that the `SendMessage` call was denied due to an IAM violation. +Click on the **SQS.SendMessage** action to see the suggested IAM policy. IAM Policy Stream showcasing the required SQS policy

-LocalStack automatically recommends a resource-based policy for the SQS queue `arn:aws:sqs:us-east-1:000000000000:s3-event-notification-queue`. Copy this policy and incorporate it into your Terraform configuration under the `aws_sqs_queue` resource by adding the `policy` attribute: +LocalStack automatically recommends a resource-based policy for the SQS queue `arn:aws:sqs:us-east-1:000000000000:s3-event-notification-queue`. +Copy this policy and incorporate it into your Terraform configuration under the `aws_sqs_queue` resource by adding the `policy` attribute: ```hcl resource "aws_sqs_queue" "queue" { @@ -194,7 +218,8 @@ Now, re-apply the Terraform configuration to update the SQS queue with the new p $ tflocal apply {{< /command >}} -Next, trigger the event notification again by uploading a file to the S3 bucket. You can confirm that the S3 bucket is correctly set up for event notifications through the SQS queue by checking if the message is received in the SQS queue: +Next, trigger the event notification again by uploading a file to the S3 bucket. +You can confirm that the S3 bucket is correctly set up for event notifications through the SQS queue by checking if the message is received in the SQS queue: {{< command >}} $ awslocal sqs receive-message \ @@ -223,20 +248,29 @@ You can now check the IAM Policy Stream dashboard to confirm that there are no v ### Generate a comprehensive policy -In scenarios where there are many AWS services, and every AWS API request generates a policy it might be cumbersome to analyze every policy. In such cases, you can generate one comprehensive policy for all your AWS resources together. +In scenarios where there are many AWS services, and every AWS API request generates a policy it might be cumbersome to analyze every policy. +In such cases, you can generate one comprehensive policy for all your AWS resources together. -You can navigate to the **Summary Policy** tab on the IAM Policy Stream dashboard. This concatenates the policy per principle which the policy should be attached to. For the example above, you would be able to see the **Identity Policy** for the root user which has all the actions and resources inside one single policy file for the operations we performed. +You can navigate to the **Summary Policy** tab on the IAM Policy Stream dashboard. +This concatenates the policy per principle which the policy should be attached to. +For the example above, you would be able to see the **Identity Policy** for the root user which has all the actions and resources inside one single policy file for the operations we performed. Required identity based policy

-On the other hand, you have the **Resource Policy** for the SQS queue, where you can see the permission necessary for the subscription. For larger AWS applications, you would be able to find multiple roles and multiple resource-based policies depending on your scenario. +On the other hand, you have the **Resource Policy** for the SQS queue, where you can see the permission necessary for the subscription. +For larger AWS applications, you would be able to find multiple roles and multiple resource-based policies depending on your scenario. Required resource based policy

## Conclusion -IAM Policy Stream streamlines your development process by minimizing the manual creation of policies and confirming the necessity of granted permissions. However, it is advisable to manually confirm that your policy aligns with your intended actions. Your code may unintentionally make requests, and LocalStack considers all requests made during policy generation as valid. +IAM Policy Stream streamlines your development process by minimizing the manual creation of policies and confirming the necessity of granted permissions. +However, it is advisable to manually confirm that your policy aligns with your intended actions. +Your code may unintentionally make requests, and LocalStack considers all requests made during policy generation as valid. -A practical scenario is automating tests, such as integration or end-to-end testing, against your application using LocalStack. This setup allows LocalStack to automatically generate policies with the required permissions. However, it's important to note that these generated policies may not cover all possible requests, as only the requests made during testing are included. You can then review and customize the policies to meet your needs, ensuring that overly permissive policies don't find their way into production environments. +A practical scenario is automating tests, such as integration or end-to-end testing, against your application using LocalStack. +This setup allows LocalStack to automatically generate policies with the required permissions. +However, it's important to note that these generated policies may not cover all possible requests, as only the requests made during testing are included. +You can then review and customize the policies to meet your needs, ensuring that overly permissive policies don't find their way into production environments. diff --git a/content/en/tutorials/java-notification-app/index.md b/content/en/tutorials/java-notification-app/index.md index aadace22ee..b8f0f854eb 100644 --- a/content/en/tutorials/java-notification-app/index.md +++ b/content/en/tutorials/java-notification-app/index.md @@ -29,11 +29,18 @@ leadimage: "java-notification-app-featured-image.png" --- Java is a popular platform for cloud applications that use Amazon Web Services. -With the AWS Java SDK, Java developers can build applications that work with various AWS services, like Simple Email Service (SES), Simple Queue Service (SQS), Simple Notification Service (SNS), and more. Simple Email Service (SES) is a cloud-based email-sending service that enables developers to integrate email functionality into their applications running on AWS. SES allows developers to work without an on-prem Simple Mail Transfer Protocol (SMTP) system and send bulk emails to many recipients. +With the AWS Java SDK, Java developers can build applications that work with various AWS services, like Simple Email Service (SES), Simple Queue Service (SQS), Simple Notification Service (SNS), and more. +Simple Email Service (SES) is a cloud-based email-sending service that enables developers to integrate email functionality into their applications running on AWS. +SES allows developers to work without an on-prem Simple Mail Transfer Protocol (SMTP) system and send bulk emails to many recipients. -[LocalStack Pro](https://app.localstack.cloud/) supports SES along with a simple user interface to inspect email accounts and sent messages. LocalStack also supports sending SES messages through an actual SMTP email server. We will use SQS and SNS to process the emails. We would further employ a CloudFormation stack to configure the infrastructure and configure SNS & SQS subscriptions. AWS Java SDK would be employed to receive these SQS messages and to send these messages through SES further. +[LocalStack Pro](https://app.localstack.cloud/) supports SES along with a simple user interface to inspect email accounts and sent messages. +LocalStack also supports sending SES messages through an actual SMTP email server. +We will use SQS and SNS to process the emails. +We would further employ a CloudFormation stack to configure the infrastructure and configure SNS & SQS subscriptions. +AWS Java SDK would be employed to receive these SQS messages and to send these messages through SES further. -In this tutorial, we will build a Java Spring Boot application that uses locally emulated AWS infrastructure on LocalStack provisioned by CloudFormation, and that uses the Java AWS SDK to send SES, SQS, and SNS messages. We will further use [MailHog](https://github.com/mailhog/MailHog), a local SMTP server, to inspect the emails sent through SES via an intuitive user interface. +In this tutorial, we will build a Java Spring Boot application that uses locally emulated AWS infrastructure on LocalStack provisioned by CloudFormation, and that uses the Java AWS SDK to send SES, SQS, and SNS messages. +We will further use [MailHog](https://github.com/mailhog/MailHog), a local SMTP server, to inspect the emails sent through SES via an intuitive user interface. ## Prerequisites @@ -48,7 +55,10 @@ For this tutorial, you will need: ## Project setup -To get started, we will set up our Spring Boot project by implementing a single module named `example` that will house our application code. The module will contain the code required to set up our AWS configuration, notification service, and message application. We will have another directory called `resources` that will house our CloudFormation stack required to set up an SNS topic and an SQS queue. The project directory would look like this: +To get started, we will set up our Spring Boot project by implementing a single module named `example` that will house our application code. +The module will contain the code required to set up our AWS configuration, notification service, and message application. +We will have another directory called `resources` that will house our CloudFormation stack required to set up an SNS topic and an SQS queue. +The project directory would look like this: ```bash ├── pom.xml @@ -145,11 +155,15 @@ In our root POM configuration, we will add the following dependencies: ``` -In the above POM file, we have added the AWS Java SDK dependencies for SES, SNS, SQS, and CloudFormation. We have also added the Spring Boot dependencies for our application. We can move on to the next step with the initial setup complete. +In the above POM file, we have added the AWS Java SDK dependencies for SES, SNS, SQS, and CloudFormation. +We have also added the Spring Boot dependencies for our application. +We can move on to the next step with the initial setup complete. ## Setting up AWS configuration -To get started, we will setup the AWS configuration, to be defined in `AwsConfiguration.java`, required for our Spring Boot application. We will create a configuration class to use the Spring Bean annotation to create two beans: `SesClient` and a `SqsClient`, to connect to the SES and SQS clients respectively. We will then create a bean to retrieve the `queueUrl` for the `email-notification-queue`: +To get started, we will setup the AWS configuration, to be defined in `AwsConfiguration.java`, required for our Spring Boot application. +We will create a configuration class to use the Spring Bean annotation to create two beans: `SesClient` and a `SqsClient`, to connect to the SES and SQS clients respectively. +We will then create a bean to retrieve the `queueUrl` for the `email-notification-queue`: ```java package com.example; @@ -203,7 +217,8 @@ public class AwsConfiguration { } ``` -In the above code, we have used the `@Autowired` annotation to autowrire the dependencies that are required for the application (`SqsClient` `SesClient`, and `notificationQueueUrl` in this case). Now that we have got the URL of the queue created in the previous step, we can move on to the next step. +In the above code, we have used the `@Autowired` annotation to autowrire the dependencies that are required for the application (`SqsClient` `SesClient`, and `notificationQueueUrl` in this case). +Now that we have got the URL of the queue created in the previous step, we can move on to the next step. {{< callout "note" >}} You can also use the pre-defined clients from the [localstack-utils](https://mvnrepository.com/artifact/cloud.localstack/localstack-utils) Maven project, as an alternative to creating the AWS SDK clients with endpoint overrides manually. @@ -211,7 +226,8 @@ You can also use the pre-defined clients from the [localstack-utils](https://mvn ## Creating a Notification Service -To get started with creating a Notification Service, we would need to create a `Notification` class to define the structure of the notification that we would be sending to the SQS queue. We will create a `Notification` class in the `Notification.java` file: +To get started with creating a Notification Service, we would need to create a `Notification` class to define the structure of the notification that we would be sending to the SQS queue. +We will create a `Notification` class in the `Notification.java` file: ```java package com.example; @@ -247,7 +263,9 @@ public class Notification { } ``` -In the above code, we have defined three instance variables: `address`, `subject`, and `body`. We have also defined the getters and setters for the instance variables. Let's now create a `@Component` class to listen to a queue, receive and transform the notifications into emails, and send the emails transactionally: +In the above code, we have defined three instance variables: `address`, `subject`, and `body`. +We have also defined the getters and setters for the instance variables. +Let's now create a `@Component` class to listen to a queue, receive and transform the notifications into emails, and send the emails transactionally: ```java package com.example; @@ -480,7 +498,9 @@ You can now build the application using the following command: $ mvn clean install {{< / command >}} -If the build is successful, you will notice a `BUILD SUCCESS` message. Now that we have the application ready, let us setup the infrastructure using CloudFormation. Create a new file in ``src/main/resources` called `email-infra.yml` and add the following content: +If the build is successful, you will notice a `BUILD SUCCESS` message. +Now that we have the application ready, let us setup the infrastructure using CloudFormation. +Create a new file in ``src/main/resources` called `email-infra.yml` and add the following content: ```yaml AWSTemplateFormatVersion: 2010-09-09 @@ -502,11 +522,13 @@ Resources: TopicArn: !GetAtt EmailTopic.TopicArn ``` -In the above code, we have created a queue called `email-notification-queue` and a topic called `email-notifications`. We have also created a subscription between the queue and the topic, allowing any message published to the topic to be sent to the queue. +In the above code, we have created a queue called `email-notification-queue` and a topic called `email-notifications`. +We have also created a subscription between the queue and the topic, allowing any message published to the topic to be sent to the queue. ## Creating the infrastructure -Now that the initial coding is done, we can give it a try. Let's start LocalStack using a custom `docker-compose` setup, which includes MailHog to capture the emails sent by SES: +Now that the initial coding is done, we can give it a try. +Let's start LocalStack using a custom `docker-compose` setup, which includes MailHog to capture the emails sent by SES: ```yaml version: "3.8" @@ -534,7 +556,8 @@ services: - "8025:8025" ``` -The above `docker-compose` file will start LocalStack and pull the MailHog image to start the SMTP server (if it doesn't exist yet!) on port `8025`. You can start LocalStack using the following command: +The above `docker-compose` file will start LocalStack and pull the MailHog image to start the SMTP server (if it doesn't exist yet!) on port `8025`. +You can start LocalStack using the following command: {{< command >}} $ LOCALSTACK_AUTH_TOKEN= docker-compose up -d @@ -548,7 +571,8 @@ $ awslocal cloudformation deploy \ --stack-name email-infra {{< / command >}} -With our infrastructure ready, we can now start the Spring Boot application. We will set dummy AWS access credentials as environment variables in the command: +With our infrastructure ready, we can now start the Spring Boot application. +We will set dummy AWS access credentials as environment variables in the command: {{< command >}} $ AWS_ACCESS_KEY_ID=test AWS_SECRET_ACCESS_KEY=test mvn spring-boot:run @@ -570,7 +594,8 @@ $ awslocal sns publish \ --message '{"subject":"hello", "address": "alice@example.com", "body": "hello world"}' {{< / command >}} -In the above command, we have published a message to the topic `email-notifications` with a generic message body. The output of the command should look like this: +In the above command, we have published a message to the topic `email-notifications` with a generic message body. +The output of the command should look like this: ```json { @@ -648,4 +673,5 @@ In this tutorial, we have demonstrated, how you can: - Use CloudFormation to provision infrastructure for SNS & SQS subscriptions on LocalStack - Use the AWS Java SDK and Spring Boot to build an application that sends SQS and SES messages. -Using [LocalStack Pro](https://app.localstack.cloud), you can use our Web user interface to view the email messages sent by SES. The code for this tutorial can be found in our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/java-notification-app). +Using [LocalStack Pro](https://app.localstack.cloud), you can use our Web user interface to view the email messages sent by SES. +The code for this tutorial can be found in our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/java-notification-app). diff --git a/content/en/tutorials/lambda-ecr-container-images/index.md b/content/en/tutorials/lambda-ecr-container-images/index.md index ebb5e1c863..66a1f2da59 100644 --- a/content/en/tutorials/lambda-ecr-container-images/index.md +++ b/content/en/tutorials/lambda-ecr-container-images/index.md @@ -23,11 +23,19 @@ pro: true leadimage: "lambda-ecr-container-images-featured-image.png" --- -[Lambda](https://aws.amazon.com/lambda/) is a powerful serverless compute system that enables you to break down your application into smaller, independent functions. These functions can be deployed as individual units within the AWS ecosystem. Lambda offers seamless integration with various AWS services and supports multiple programming languages for different runtime environments. To deploy Lambda functions programmatically, you have two options: [uploading a ZIP file containing your code and dependencies](https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-zip.html) or [packaging your code in a container image](https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-images.html) and deploying it through Elastic Container Registry (ECR). +[Lambda](https://aws.amazon.com/lambda/) is a powerful serverless compute system that enables you to break down your application into smaller, independent functions. +These functions can be deployed as individual units within the AWS ecosystem. +Lambda offers seamless integration with various AWS services and supports multiple programming languages for different runtime environments. +To deploy Lambda functions programmatically, you have two options: [uploading a ZIP file containing your code and dependencies](https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-zip.html) or [packaging your code in a container image](https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-images.html) and deploying it through Elastic Container Registry (ECR). -[ECR](https://aws.amazon.com/ecr/) is an AWS-managed registry that facilitates the storage and distribution of containerized software. With ECR, you can effectively manage your image lifecycles, versioning, and tagging, separate from your application. It seamlessly integrates with other AWS services like ECS, EKS, and Lambda, enabling you to deploy your container images effortlessly. Creating container images for your Lambda functions involves using Docker and implementing the Lambda Runtime API according to the Open Container Initiative (OCI) specifications. +[ECR](https://aws.amazon.com/ecr/) is an AWS-managed registry that facilitates the storage and distribution of containerized software. +With ECR, you can effectively manage your image lifecycles, versioning, and tagging, separate from your application. +It seamlessly integrates with other AWS services like ECS, EKS, and Lambda, enabling you to deploy your container images effortlessly. +Creating container images for your Lambda functions involves using Docker and implementing the Lambda Runtime API according to the Open Container Initiative (OCI) specifications. -[LocalStack Pro](https://localstack.cloud) extends support for Lambda functions using container images through ECR. It enables you to deploy your Lambda functions locally using LocalStack. In this tutorial, we will explore creating a Lambda function using a container image and deploying it locally with the help of LocalStack. +[LocalStack Pro](https://localstack.cloud) extends support for Lambda functions using container images through ECR. +It enables you to deploy your Lambda functions locally using LocalStack. +In this tutorial, we will explore creating a Lambda function using a container image and deploying it locally with the help of LocalStack. ## Prerequisites @@ -41,14 +49,16 @@ Before diving into this tutorial, make sure you have the following prerequisites ## Creating a Lambda function -To package and deploy a Lambda function as a container image, we'll create a Lambda function containing our code and a Dockerfile. Create a new directory for your lambda function and navigate to it: +To package and deploy a Lambda function as a container image, we'll create a Lambda function containing our code and a Dockerfile. +Create a new directory for your lambda function and navigate to it: {{< command >}} $ mkdir -p lambda-container-image $ cd lambda-container-image {{< / command >}} -Initialize the directory by creating two files: `handler.py` and `Dockerfile`. Use the following commands to create the files: +Initialize the directory by creating two files: `handler.py` and `Dockerfile`. +Use the following commands to create the files: {{< command >}} $ touch handler.py Dockerfile @@ -61,13 +71,18 @@ def handler(event, context): print('Hello from LocalStack Lambda container image!') ``` -In the code above, the `handler` function is executed by the Lambda service whenever a trigger event occurs. It serves as the entry point for the Lambda function within the runtime environment and accepts `event` and `context` as parameters, providing information about the event and invocation properties, respectively. +In the code above, the `handler` function is executed by the Lambda service whenever a trigger event occurs. +It serves as the entry point for the Lambda function within the runtime environment and accepts `event` and `context` as parameters, providing information about the event and invocation properties, respectively. -Following these steps, you have created the foundation for your Lambda function and defined its behaviour using Python code. In the following sections, we will package this code and its dependencies into a container image using the `Dockerfile`. +Following these steps, you have created the foundation for your Lambda function and defined its behaviour using Python code. +In the following sections, we will package this code and its dependencies into a container image using the `Dockerfile`. ## Building the image -To package our Lambda function as a container image, we must create a Dockerfile containing the necessary instructions for building the image. Open the Dockerfile and add the following content. This Dockerfile uses the `python:3.8` base image provided by AWS for Lambda and copies the `handler.py` file into the image. It also specifies the function handler as `handler.handler` to ensure the Lambda runtime can locate it where the Lambda handler is available. +To package our Lambda function as a container image, we must create a Dockerfile containing the necessary instructions for building the image. +Open the Dockerfile and add the following content. +This Dockerfile uses the `python:3.8` base image provided by AWS for Lambda and copies the `handler.py` file into the image. +It also specifies the function handler as `handler.handler` to ensure the Lambda runtime can locate it where the Lambda handler is available. ```Dockerfile FROM public.ecr.aws/lambda/python:3.8 @@ -78,7 +93,9 @@ CMD [ "handler.handler" ] ``` {{< callout "note">}} -If your Lambda function has additional dependencies, create a file named `requirements.txt` in the same directory as the Dockerfile. List the required libraries in this file. You can install these dependencies in the `Dockerfile` under the `${LAMBDA_TASK_ROOT}` directory. +If your Lambda function has additional dependencies, create a file named `requirements.txt` in the same directory as the Dockerfile. +List the required libraries in this file. +You can install these dependencies in the `Dockerfile` under the `${LAMBDA_TASK_ROOT}` directory. {{< /callout >}} With the Dockerfile prepared, you can now build the container image using the following command, to check if everything works as intended: @@ -87,17 +104,22 @@ With the Dockerfile prepared, you can now build the container image using the fo $ docker build . {{< / command >}} -By executing these steps, you have defined the Dockerfile that instructs Docker on how to build the container image for your Lambda function. The resulting image will contain your function code and any specified dependencies. +By executing these steps, you have defined the Dockerfile that instructs Docker on how to build the container image for your Lambda function. +The resulting image will contain your function code and any specified dependencies. ## Publishing the image to ECR -Now that the initial setup is complete let's explore how to leverage LocalStack's AWS emulation by pushing our image to ECR and deploying the Lambda container image. Start LocalStack by executing the following command. Make sure to replace `` with your actual auth token: +Now that the initial setup is complete let's explore how to leverage LocalStack's AWS emulation by pushing our image to ECR and deploying the Lambda container image. +Start LocalStack by executing the following command. +Make sure to replace `` with your actual auth token: {{< command >}} $ LOCALSTACK_AUTH_TOKEN= DEBUG=1 localstack start -d {{< / command >}} -Once the LocalStack container is running, we can create a new ECR repository to store our container image. Use the `awslocal` CLI to achieve this. Run the following command to create the repository, replacing `localstack-lambda-container-image` with the desired name for your repository: +Once the LocalStack container is running, we can create a new ECR repository to store our container image. +Use the `awslocal` CLI to achieve this. +Run the following command to create the repository, replacing `localstack-lambda-container-image` with the desired name for your repository: {{< command >}} $ awslocal ecr create-repository --repository-name localstack-lambda-container-image @@ -120,17 +142,20 @@ $ awslocal ecr create-repository --repository-name localstack-lambda-container-i {{< / command >}} {{< callout "note">}} -To further customize the ECR repository, you can pass additional flags to the `create-repository` command. For more details on the available options, refer to the [AWS CLI documentation](https://docs.aws.amazon.com/cli/latest/reference/ecr/create-repository.html). +To further customize the ECR repository, you can pass additional flags to the `create-repository` command. +For more details on the available options, refer to the [AWS CLI documentation](https://docs.aws.amazon.com/cli/latest/reference/ecr/create-repository.html). {{< /callout >}} -Next, build the image and push it to the ECR repository. Execute the following commands: +Next, build the image and push it to the ECR repository. +Execute the following commands: {{< command >}} $ docker build -t localhost:4510/localstack-lambda-container-image . $ docker push localhost:4510/localstack-lambda-container-image {{< / command >}} -In the above commands, we specify the `repositoryUri` as the image name to push the image to the ECR repository. After executing these commands, you can verify that the image is successfully pushed to the repository by using the `describe-images` command: +In the above commands, we specify the `repositoryUri` as the image name to push the image to the ECR repository. +After executing these commands, you can verify that the image is successfully pushed to the repository by using the `describe-images` command: {{< command >}} $ awslocal ecr describe-images --repository-name localstack-lambda-container-image @@ -152,14 +177,17 @@ $ awslocal ecr describe-images --repository-name localstack-lambda-container-ima } {{< / command >}} -By running this command, you can confirm that the image is now in the ECR repository. It ensures it is ready for deployment as a Lambda function using LocalStack's AWS emulation capabilities. +By running this command, you can confirm that the image is now in the ECR repository. +It ensures it is ready for deployment as a Lambda function using LocalStack's AWS emulation capabilities. ## Deploying the Lambda function -To deploy the container image as a Lambda function, we will create a new Lambda function using the `create-function` command. Run the following command to create the function: +To deploy the container image as a Lambda function, we will create a new Lambda function using the `create-function` command. +Run the following command to create the function: {{< callout "note">}} -Before creating the lambda function, please double check under which architecture you have built your image. If your image is built as arm64, you need to specify the lambda architecture when deploying or set `LAMBDA_IGNORE_ARCHTIECTURE=1` when starting LocalStack. +Before creating the lambda function, please double check under which architecture you have built your image. +If your image is built as arm64, you need to specify the lambda architecture when deploying or set `LAMBDA_IGNORE_ARCHTIECTURE=1` when starting LocalStack. More information can be found [in our documentation regarding ARM support.]({{< ref "arm64-support" >}}) {{< /callout >}} @@ -203,13 +231,18 @@ $ awslocal lambda create-function \ } {{< / command >}} -The command provided includes several flags to create the Lambda function. Here's an explanation of each flag: +The command provided includes several flags to create the Lambda function. +Here's an explanation of each flag: -- `ImageUri`: Specifies the image URI of the container image you pushed to the ECR repository (`localhost.localstack.cloud:4510/localstack-lambda-container-image` in this case. Use the return `repositoryUri` from the create-repository command). +- `ImageUri`: Specifies the image URI of the container image you pushed to the ECR repository (`localhost.localstack.cloud:4510/localstack-lambda-container-image` in this case. + Use the return `repositoryUri` from the create-repository command). - `package-type`: Sets the package type to Image to indicate that the Lambda function will be created using a container image. - `function-name`: Specifies the name of the Lambda function you want to create. -- `runtime`: Defines the runtime environment for the Lambda function. In this case, it's specified as provided, indicating that the container image will provide the runtime. -- `role`: Sets the IAM role ARN that the Lambda function should assume. In the example, a mock role ARN is used. For an actual role, please refer to the [IAM documentation]({{< ref "user-guide/aws/iam" >}}). +- `runtime`: Defines the runtime environment for the Lambda function. + In this case, it's specified as provided, indicating that the container image will provide the runtime. +- `role`: Sets the IAM role ARN that the Lambda function should assume. + In the example, a mock role ARN is used. + For an actual role, please refer to the [IAM documentation]({{< ref "user-guide/aws/iam" >}}). To invoke the Lambda function, you can use the `invoke` command: @@ -221,7 +254,9 @@ $ awslocal lambda invoke --function-name localstack-lambda-container-image /tmp/ } {{< / command >}} -The command above will execute the Lambda function locally within the LocalStack environment. The response will include the StatusCode and ExecutedVersion. You can find the logs of the Lambda invocation in the Lambda container output: +The command above will execute the Lambda function locally within the LocalStack environment. +The response will include the StatusCode and ExecutedVersion. +You can find the logs of the Lambda invocation in the Lambda container output: {{< command >}} Hello from LocalStack Lambda container image! @@ -229,6 +264,9 @@ Hello from LocalStack Lambda container image! ## Conclusion -In conclusion, the Lambda container image support enables you to use Docker to package your custom code and dependencies for Lambda functions. With the help of LocalStack, you can seamlessly package, deploy, and invoke Lambda functions locally. It empowers you to develop, debug, and test your Lambda functions with a wide range of AWS services. For more advanced usage patterns, you can explore features like [Lambda Hot Reloading]({{< ref "hot-reloading" >}}) and [Lambda Debugging]({{< ref "debugging" >}}). +In conclusion, the Lambda container image support enables you to use Docker to package your custom code and dependencies for Lambda functions. +With the help of LocalStack, you can seamlessly package, deploy, and invoke Lambda functions locally. +It empowers you to develop, debug, and test your Lambda functions with a wide range of AWS services. +For more advanced usage patterns, you can explore features like [Lambda Hot Reloading]({{< ref "hot-reloading" >}}) and [Lambda Debugging]({{< ref "debugging" >}}). To further explore and experiment with the concepts covered in this tutorial, you can access the code and accompanying `Makefile` on our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/lambda-container-image). diff --git a/content/en/tutorials/replicate-aws-resources-localstack-extension/index.md b/content/en/tutorials/replicate-aws-resources-localstack-extension/index.md index eb94c593a3..aedbae5efe 100644 --- a/content/en/tutorials/replicate-aws-resources-localstack-extension/index.md +++ b/content/en/tutorials/replicate-aws-resources-localstack-extension/index.md @@ -34,20 +34,20 @@ In this tutorial, you will learn how to install the AWS Replicator extension and ## Prerequisites -- [LocalStack CLI](https://docs.localstack.cloud/getting-started/installation/#localstack-cli) with [`LOCALSTACK_AUTH_TOKEN`](https://docs.localstack.cloud/getting-started/auth-token/) -- [Docker](https://docs.localstack.cloud/getting-started/auth-token/) -- [AWS CLI](https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-install.html) with [`awslocal` wrapper](https://github.com/localstack/awscli-local) -- [LocalStack Web Application account](https://app.localstack.cloud/sign-up) -- [AWS Account](https://aws.amazon.com/) with an [`AWS_ACCESS_KEY_ID` & `AWS_SECRET_ACCESS_KEY`](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) +- [LocalStack CLI](https://docs.localstack.cloud/getting-started/installation/#localstack-cli) with [`LOCALSTACK_AUTH_TOKEN`](https://docs.localstack.cloud/getting-started/auth-token/) +- [Docker](https://docs.localstack.cloud/getting-started/auth-token/) +- [AWS CLI](https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-install.html) with [`awslocal` wrapper](https://github.com/localstack/awscli-local) +- [LocalStack Web Application account](https://app.localstack.cloud/sign-up) +- [AWS Account](https://aws.amazon.com/) with an [`AWS_ACCESS_KEY_ID` & `AWS_SECRET_ACCESS_KEY`](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) ## Install the AWS Replicator extension To install the AWS Replicator Extension, follow these steps: -1. Launch your LocalStack container using the `localstack` CLI, ensuring that `LOCALSTACK_AUTH_TOKEN` is available in the environment. -2. Visit the [Extensions library](https://app.localstack.cloud/extensions/library) page on the LocalStack Web Application. +1. Launch your LocalStack container using the `localstack` CLI, ensuring that `LOCALSTACK_AUTH_TOKEN` is available in the environment. +2. Visit the [Extensions library](https://app.localstack.cloud/extensions/library) page on the LocalStack Web Application. Extensions Library -3. Scroll down to find the **AWS replicator** card, then click on the **Install on Instance** button. +3. Scroll down to find the **AWS replicator** card, then click on the **Install on Instance** button. Installing AWS Replicator extension Once the installation is complete, you will notice that your LocalStack container has restarted with the AWS Replicator extension successfully installed. @@ -71,11 +71,12 @@ After verifying the successful installation, you can shut down the LocalStack co In this tutorial, you will set up a basic example consisting of: -- A Lambda function named `func1` that prints a simple statement when invoked. -- An SQS queue named `test-queue` where messages are sent. -- An event source mapping that triggers the Lambda function when a message is sent to the SQS queue. +- A Lambda function named `func1` that prints a simple statement when invoked. +- An SQS queue named `test-queue` where messages are sent. +- An event source mapping that triggers the Lambda function when a message is sent to the SQS queue. -The basic architecture for the scenario is outlined in the figure below. It shows the relationship between the resources deployed in the LocalStack container, the LocalStack AWS Proxy, and the remote AWS account. +The basic architecture for the scenario is outlined in the figure below. +It shows the relationship between the resources deployed in the LocalStack container, the LocalStack AWS Proxy, and the remote AWS account. AWS Replicator sample use case @@ -93,12 +94,12 @@ localstack start In the above command: -- The `EXTRA_CORS_ALLOWED_ORIGINS` variable allows the AWS Replicator extension's web interface to connect with the LocalStack container. -- The `DEBUG` variable enables verbose logging allowing you to see the printed statements from the Lambda function. +- The `EXTRA_CORS_ALLOWED_ORIGINS` variable allows the AWS Replicator extension's web interface to connect with the LocalStack container. +- The `DEBUG` variable enables verbose logging allowing you to see the printed statements from the Lambda function. Next, create a file named `testlambda.py` and add the following Python code to it: -```python +```python def handler(*args, **kwargs): print("Debug output from Lambda function") ``` @@ -117,7 +118,7 @@ $ awslocal lambda create-function \ Once the Lambda function is successfully created, you will see output similar to this: -```bash +```bash { "FunctionName": "func1", "FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:func1", @@ -139,13 +140,13 @@ $ awslocal sqs create-queue --queue-name test-queue The output will display the Queue URL: -```bash +```bash { "QueueUrl": "http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/test-queue" } ``` -Additionally, you can create the remote SQS queue on the real AWS cloud to test invocation after starting the AWS Replicator extension. +Additionally, you can create the remote SQS queue on the real AWS cloud to test invocation after starting the AWS Replicator extension. Use the following command to set up the SQS queue on AWS: @@ -153,9 +154,10 @@ Use the following command to set up the SQS queue on AWS: $ aws sqs create-queue --queue-name test-queue {{< /command >}} -### Invoke the Lambda function +### Invoke the Lambda function -Before invoking, set up an event source mapping between the SQS queue and the Lambda function. Configure the queue for Lambda using the following command: +Before invoking, set up an event source mapping between the SQS queue and the Lambda function. +Configure the queue for Lambda using the following command: {{< command >}} $ awslocal lambda create-event-source-mapping \ @@ -166,7 +168,7 @@ $ awslocal lambda create-event-source-mapping \ The following output would be retrieved: -```bash +```bash { ... "MaximumBatchingWindowInSeconds": 0, @@ -187,7 +189,7 @@ awslocal sqs send-message \ Upon successful execution, you will receive a message ID and MD5 hash of the message body. -```bash +```bash { "MD5OfMessageBody": "99914b932bd37a50b983c5e7c90ae93b", "MessageId": "64e8297c-f0b2-4b68-a482-6cd3317f5096" @@ -206,28 +208,32 @@ In the LocalStack logs, you will see confirmation of the Lambda function invocat To run the AWS Replicator extension: -- Access [`https://aws-replicator.localhost.localstack.cloud:4566`](https://aws-replicator.localhost.localstack.cloud:4566/) via your web browser. +- Access [`https://aws-replicator.localhost.localstack.cloud:4566`](https://aws-replicator.localhost.localstack.cloud:4566/) via your web browser. AWS Replicator extension -- Provide your AWS Credentials: `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and optionally `AWS_SESSION_TOKEN`. -- Add a new YAML-based Proxy configuration to proxy requests for specific resources to AWS. For this scenario, configure it to proxy requests for the SQS queue created earlier. - ```yaml +- Provide your AWS Credentials: `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and optionally `AWS_SESSION_TOKEN`. +- Add a new YAML-based Proxy configuration to proxy requests for specific resources to AWS. + For this scenario, configure it to proxy requests for the SQS queue created earlier. + + ```yaml services: sqs: resources: - '.*:test-queue' ``` -- Save the configuration to enable the AWS Replicator extension. Once enabled, you will see the proxy status as **enabled**. + +- Save the configuration to enable the AWS Replicator extension. + Once enabled, you will see the proxy status as **enabled**. Enabled AWS Replicator extension To invoke the local Lambda function with the remote SQS queue: -- Navigate to your AWS Management Console and access **Simple Queue Service**. -- Select the **test-queue** queue. -- Send a message with a body (e.g., `Hello LocalStack`) by clicking **Send Message**. +- Navigate to your AWS Management Console and access **Simple Queue Service**. +- Select the **test-queue** queue. +- Send a message with a body (e.g., `Hello LocalStack`) by clicking **Send Message**. You will observe the local Lambda function being invoked once again, with corresponding debug messages visible in the logs. -```bash +```bash 2024-03-26T07:45:16.524 DEBUG --- [db58fad602e5] l.s.l.i.version_manager : [func1-ed938bb0-e1ee-41fb-a844-db58fad602e5] START RequestId: ed938bb0-e1ee-41fb-a844-db58fad602e5 Version: $LATEST 2024-03-26T07:45:16.524 DEBUG --- [db58fad602e5] l.s.l.i.version_manager : [func1-ed938bb0-e1ee-41fb-a844-db58fad602e5] Debug output from Lambda function 2024-03-26T07:45:16.524 DEBUG --- [db58fad602e5] l.s.l.i.version_manager : [func1-ed938bb0-e1ee-41fb-a844-db58fad602e5] END RequestId: ed938bb0-e1ee-41fb-a844-db58fad602e5 @@ -239,12 +245,12 @@ Upon completion, you can click **Disable** on the AWS Replicator extension web i Additionally, you can delete the remote SQS queue to avoid AWS billing for long-running resources. To remove local resources, stop the LocalStack container to clear the local Lambda function and SQS queue. -## Conclusion +## Conclusion In this tutorial, you've discovered how the AWS Replicator extension bridges the gap between local and remote cloud resources by mirroring resources from real AWS accounts into your LocalStack instance. You can explore additional use-cases with the AWS Replicator extension, such as: -- Developing a local Lambda function that interacts with a remote DynamoDB table -- Executing a local Athena SQL query in LocalStack, accessing files in a real S3 bucket on AWS -- Testing a local Terraform script with SSM parameters from a real AWS account -- And many more! +- Developing a local Lambda function that interacts with a remote DynamoDB table +- Executing a local Athena SQL query in LocalStack, accessing files in a real S3 bucket on AWS +- Testing a local Terraform script with SSM parameters from a real AWS account +- And many more! diff --git a/content/en/tutorials/reproducible-machine-learning-cloud-pods/index.md b/content/en/tutorials/reproducible-machine-learning-cloud-pods/index.md index 6191f1ed0d..6ea7b97197 100644 --- a/content/en/tutorials/reproducible-machine-learning-cloud-pods/index.md +++ b/content/en/tutorials/reproducible-machine-learning-cloud-pods/index.md @@ -24,11 +24,16 @@ pro: true leadimage: "reproducible-machine-learning-cloud-pods-featured-image.png" --- -[LocalStack Cloud Pods]({{< ref "user-guide/state-management/cloud-pods" >}}) enable you to create persistent state snapshots of your LocalStack instance, which can then be versioned, shared, and restored. It allows next-generation state management and team collaboration for your local cloud development environment, which you can utilize to create persistent shareable cloud sandboxes. Cloud Pods works directly with the [LocalStack CLI]({{< ref "getting-started/installation#localstack-cli" >}}) to save, merge, and restore snapshots of your LocalStack state. You can always tear down your LocalStack instance and restore it from a snapshot at any point in time. +[LocalStack Cloud Pods]({{< ref "user-guide/state-management/cloud-pods" >}}) enable you to create persistent state snapshots of your LocalStack instance, which can then be versioned, shared, and restored. +It allows next-generation state management and team collaboration for your local cloud development environment, which you can utilize to create persistent shareable cloud sandboxes. +Cloud Pods works directly with the [LocalStack CLI]({{< ref "getting-started/installation#localstack-cli" >}}) to save, merge, and restore snapshots of your LocalStack state. +You can always tear down your LocalStack instance and restore it from a snapshot at any point in time. -Cloud Pods is supported in [LocalStack Team](https://app.localstack.cloud/). With LocalStack Team, you can utilize the Cloud Pods CLI that allows you to inspect your Cloud Pods, version them using tags, and push them to the LocalStack platform for storage and collaboration. +Cloud Pods is supported in [LocalStack Team](https://app.localstack.cloud/). +With LocalStack Team, you can utilize the Cloud Pods CLI that allows you to inspect your Cloud Pods, version them using tags, and push them to the LocalStack platform for storage and collaboration. -In this tutorial, we will use [LocalStack Pro]({{< ref "getting-started/auth-token" >}}) to train a simple machine-learning model that recognizes handwritten digits on an image. We will rely on Cloud Pods to create a reproducible sample by using: +In this tutorial, we will use [LocalStack Pro]({{< ref "getting-started/auth-token" >}}) to train a simple machine-learning model that recognizes handwritten digits on an image. +We will rely on Cloud Pods to create a reproducible sample by using: - S3 to create a bucket to host our training data - Lambda to create a function to train and save the model to an S3 bucket @@ -47,11 +52,16 @@ For this tutorial, you will need the following: - [awslocal]({{< ref "aws-cli#localstack-aws-cli-awslocal" >}}) - [Optical recognition of handwritten digits dataset](https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits) -If you don't have a subscription to LocalStack Pro, you can request a trial license upon sign-up. For this tutorial to work, you must have the LocalStack CLI installed, which must be version 1.3 or higher. The Cloud Pods CLI is shipped with the LocalStack CLI, so you don't need to install it separately. +If you don't have a subscription to LocalStack Pro, you can request a trial license upon sign-up. +For this tutorial to work, you must have the LocalStack CLI installed, which must be version 1.3 or higher. +The Cloud Pods CLI is shipped with the LocalStack CLI, so you don't need to install it separately. ## Training the machine learning model -We will use the [Optical Recognition of Handwritten Digits Data Set](https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits) to train a simple machine-learning model to recognise handwritten texts. It contains images of individual digits, represented as arrays of pixel values, along with their corresponding labels, indicating the correct digit that each image represents. You can download the dataset from UCI's Machine Learning Repository (linked above) or from our [samples repository](https://github.com/localstack/localstack-pro-samples/tree/master/reproducible-ml). To train our model, we will upload our dataset on a local S3 bucket and use a Lambda function to train the model. +We will use the [Optical Recognition of Handwritten Digits Data Set](https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits) to train a simple machine-learning model to recognise handwritten texts. +It contains images of individual digits, represented as arrays of pixel values, along with their corresponding labels, indicating the correct digit that each image represents. +You can download the dataset from UCI's Machine Learning Repository (linked above) or from our [samples repository](https://github.com/localstack/localstack-pro-samples/tree/master/reproducible-ml). +To train our model, we will upload our dataset on a local S3 bucket and use a Lambda function to train the model. Create a new file named `train.py` and import the required libraries: @@ -66,7 +76,9 @@ from joblib import dump, load import io ``` -We will now create a separate function named `load_digits` to load the dataset from the S3 bucket and return it as a `Bunch` object. The `Bunch` object is a container object that allows us to access the dataset's attributes as dictionary keys. It is similar to a Python dictionary but provides attribute-style access and can be used to store the dataset and its attributes. +We will now create a separate function named `load_digits` to load the dataset from the S3 bucket and return it as a `Bunch` object. +The `Bunch` object is a container object that allows us to access the dataset's attributes as dictionary keys. +It is similar to a Python dictionary but provides attribute-style access and can be used to store the dataset and its attributes. ```python def load_digits(*, n_class=10, return_X_y=False, as_frame=False): @@ -106,7 +118,10 @@ def load_digits(*, n_class=10, return_X_y=False, as_frame=False): images=images) ``` -The above code uses the `boto3` library to download the data file from an S3 bucket. The file is then loaded into a NumPy array using the `numpy.loadtxt` function, and the target values (i.e. the labels corresponding to each image) are extracted from the last column of the array. The images are then reshaped into 2-dimensional arrays, and the function has been configured to return only a subset of the available classes by filtering the target values. Finally, the function returns an object containing the data, target values, and metadata. +The above code uses the `boto3` library to download the data file from an S3 bucket. +The file is then loaded into a NumPy array using the `numpy.loadtxt` function, and the target values (i.e. the labels corresponding to each image) are extracted from the last column of the array. +The images are then reshaped into 2-dimensional arrays, and the function has been configured to return only a subset of the available classes by filtering the target values. +Finally, the function returns an object containing the data, target values, and metadata. Let us now define a `handler` function that would be executed by the Lambda every time a trigger event occurs. In this case, we would like to use the above function to load the dataset and train a model using the [Support Vector Machine (SVM)](https://scikit-learn.org/stable/modules/svm.html) algorithm. @@ -143,13 +158,16 @@ def handler(event, context): s3_client.put_object(Body=f, Bucket="pods-test", Key="test-set.npy") ``` -First, we loaded the images and flattened them into 1-dimensional arrays. Then, we created a training and a test set using the `train_test_split` function from the `sklearn.model_selection` module. +First, we loaded the images and flattened them into 1-dimensional arrays. +Then, we created a training and a test set using the `train_test_split` function from the `sklearn.model_selection` module. -We trained an SVM classifier on the training set using the `fit` method. Finally, we uploaded the trained model, together with the test set, to an S3 bucket for later usage. +We trained an SVM classifier on the training set using the `fit` method. +Finally, we uploaded the trained model, together with the test set, to an S3 bucket for later usage. ## Perform predictions with the model -Now, we will create a new file called `infer.py` which will contain a second handler function. This function will be used to perform predictions on new data with the model we trained previously. +Now, we will create a new file called `infer.py` which will contain a second handler function. +This function will be used to perform predictions on new data with the model we trained previously. ```python def handler(event, context): @@ -167,17 +185,20 @@ def handler(event, context): print("--> prediction result:", predicted) ``` -To perform inference on the test set, we will download both the trained SVN model and the test set that we previously uploaded to the S3 bucket. Using these resources, we will predict the values of the digits in the test set. +To perform inference on the test set, we will download both the trained SVN model and the test set that we previously uploaded to the S3 bucket. +Using these resources, we will predict the values of the digits in the test set. ## Deploying the Lambda functions -Before creating our Lambda functions, let us start LocalStack to use emulated S3 and Lambda services to deploy and train our model. Let's start LocalStack: +Before creating our Lambda functions, let us start LocalStack to use emulated S3 and Lambda services to deploy and train our model. +Let's start LocalStack: {{< command >}} $ DEBUG=1 LOCALSTACK_AUTH_TOKEN= localstack start -d {{< / command >}} -We have specified `DEBUG=1` to get the printed LocalStack logs from our Lambda invocation in the console. We can now create an S3 bucket to upload our Lambda functions and the dataset: +We have specified `DEBUG=1` to get the printed LocalStack logs from our Lambda invocation in the console. +We can now create an S3 bucket to upload our Lambda functions and the dataset: {{< command >}} $ zip lambda.zip train.py @@ -187,7 +208,10 @@ $ awslocal s3 cp lambda.zip s3://reproducible-ml/lambda.zip $ awslocal s3 cp digits.csv.gz s3://reproducible-ml/digits.csv.gz {{< / command >}} -In the above commands, we first create two zip files for our Lambda functions: lambda.zip and infer.zip. These zip files contain the code for training the machine learning model and do predictions with it, respectively. Next, we create an S3 bucket called `reproducible-ml` and upload the zip files and the dataset to it. Finally, we use the `awslocal` CLI to create the two Lambda functions +In the above commands, we first create two zip files for our Lambda functions: lambda.zip and infer.zip. +These zip files contain the code for training the machine learning model and do predictions with it, respectively. +Next, we create an S3 bucket called `reproducible-ml` and upload the zip files and the dataset to it. +Finally, we use the `awslocal` CLI to create the two Lambda functions {{< command >}} $ awslocal lambda create-function --function-name ml-train \ @@ -207,7 +231,9 @@ $ awslocal lambda create-function --function-name ml-predict \ --layers arn:aws:lambda:us-east-1:446751924810:layer:python-3-8-scikit-learn-0-23-1:2 {{< / command >}} -For each function, we provide the function name, runtime (`python3.8`), handler function (`train.handler` and `infer.handler`, respectively), and the location of the `zip` files in the S3 bucket. We have also specified the `python-3-8-scikit-learn-0-23-1` layer to be used by the Lambda function. This layer includes the scikit-learn library and its dependencies. +For each function, we provide the function name, runtime (`python3.8`), handler function (`train.handler` and `infer.handler`, respectively), and the location of the `zip` files in the S3 bucket. +We have also specified the `python-3-8-scikit-learn-0-23-1` layer to be used by the Lambda function. +This layer includes the scikit-learn library and its dependencies. We can now invoke the first Lambda function using the `awslocal` CLI: @@ -215,7 +241,8 @@ We can now invoke the first Lambda function using the `awslocal` CLI: $ awslocal lambda invoke --function-name ml-train /tmp/test.tmp {{< / command >}} -The first Lambda function will train the model and upload it to the S3 bucket. Finally, we can invoke the second Lambda function to do predictions with the model. +The first Lambda function will train the model and upload it to the S3 bucket. +Finally, we can invoke the second Lambda function to do predictions with the model. {{< command >}} $ awslocal lambda invoke --function-name ml-predict /tmp/test.tmp @@ -235,7 +262,8 @@ null ## Creating a Cloud Pod -After deploying the Lambda functions, we can create a Cloud Pod to share our local infrastructure and instance state with other LocalStack users in the organization. To save the current state of our LocalStack instance, we can use the `save` command: +After deploying the Lambda functions, we can create a Cloud Pod to share our local infrastructure and instance state with other LocalStack users in the organization. +To save the current state of our LocalStack instance, we can use the `save` command: {{< command >}} $ localstack pod save reproducible-ml @@ -244,13 +272,15 @@ Cloud Pod reproducible-ml successfully created {{< / command >}} {{< callout "note" >}} -You can also export a Cloud Pod locally by specifying a file URI as an argument. To export on a local path, run the following command: +You can also export a Cloud Pod locally by specifying a file URI as an argument. +To export on a local path, run the following command: {{< command >}} $ localstack pod save file:/// {{< / command >}} -The output of the above command will be a `` zip file in the specified directory. We can restore it at any time with the `load` command. +The output of the above command will be a `` zip file in the specified directory. +We can restore it at any time with the `load` command. {{< /callout >}} To list available the Cloud Pods you can use the `list` command: @@ -272,34 +302,48 @@ You can also inspect the contents of a Cloud Pod using the `inspect` command: $ localstack pod inspect reproducible-ml {{< / command >}} -While you save a Cloud Pod, it is automatically published on the LocalStack platform and can be shared with other users in your organization. While saving an already existing Cloud Pod, we would create a new version, which is eventually uploaded to the LocalStack platform. +While you save a Cloud Pod, it is automatically published on the LocalStack platform and can be shared with other users in your organization. +While saving an already existing Cloud Pod, we would create a new version, which is eventually uploaded to the LocalStack platform. {{< callout "note" >}} -You can optionally set the visibility of a Cloud Pod to `private` or `public` using the `--visibility` flag. By default, the visibility of a Cloud Pod is set to `private`. To set a Cloud Pod to `public`, you can use the following command: +You can optionally set the visibility of a Cloud Pod to `private` or `public` using the `--visibility` flag. +By default, the visibility of a Cloud Pod is set to `private`. +To set a Cloud Pod to `public`, you can use the following command: {{< command >}} $ localstack pod save --name --visibility public {{< / command >}} The above command does not create a new version and requires a version already registered with the platform. {{< /callout >}} -You can also attach an optional message and a list of services to a Cloud Pod using the `--message` and `--services` flags. You can check all the Cloud Pods in your organization over the [LocalStack Web Application](https://app.localstack.cloud/pods). Now that we have created a Cloud Pod, we can ask one of our team members to start LocalStack and load the Cloud Pod using the `load` command. +You can also attach an optional message and a list of services to a Cloud Pod using the `--message` and `--services` flags. +You can check all the Cloud Pods in your organization over the [LocalStack Web Application](https://app.localstack.cloud/pods). +Now that we have created a Cloud Pod, we can ask one of our team members to start LocalStack and load the Cloud Pod using the `load` command. {{< command >}} $ localstack pod load reproducible-ml {{< / command >}} -The `load` command will retrieve the content of our Cloud Pod named `reproducible-ml` from the LocalStack platform and inject it into our running LocalStack instance. Upon successfully loading the Cloud Pod, the Lambda function can be invoked again, and the log output should be the same as before. +The `load` command will retrieve the content of our Cloud Pod named `reproducible-ml` from the LocalStack platform and inject it into our running LocalStack instance. +Upon successfully loading the Cloud Pod, the Lambda function can be invoked again, and the log output should be the same as before. -LocalStack Cloud Pods also feature different merge strategies to merge the state of a Cloud Pod with the current LocalStack instance. You can use the `--merge` flag to specify the merge strategy. The available merge strategies are: +LocalStack Cloud Pods also feature different merge strategies to merge the state of a Cloud Pod with the current LocalStack instance. +You can use the `--merge` flag to specify the merge strategy. +The available merge strategies are: -- **Load with overwrite**: This is the default merge strategy. It will load the state of the Cloud Pod into the current LocalStack instance and overwrite the existing state. +- **Load with overwrite**: This is the default merge strategy. + It will load the state of the Cloud Pod into the current LocalStack instance and overwrite the existing state. - **Load with basic merge**: This merge strategy will load the state of the Cloud Pod into the current LocalStack instance and merge the existing state with the state of the Cloud Pod. -- **Load with deep merge**: This merge strategy will load the state of the Cloud Pod into the current LocalStack instance and merge the existing state with the state of the Cloud Pod. It will also merge the existing state with the state of the Cloud Pod recursively. +- **Load with deep merge**: This merge strategy will load the state of the Cloud Pod into the current LocalStack instance and merge the existing state with the state of the Cloud Pod. + It will also merge the existing state with the state of the Cloud Pod recursively. {{< figure src="cloud-pods-state-merge-mechanisms.png" width="80%" alt="State Merge mechanisms with LocalStack Cloud Pods">}} ## Conclusion -In conclusion, LocalStack Cloud Pods facilitate collaboration and debugging among team members by allowing the sharing of local cloud infrastructure and instance state. These Cloud Pods can be used to create reproducible environments for various purposes, including machine learning. By using Cloud Pods, teams can work together to create a reproducible environment for their application and share it with other team members. Additionally, Cloud Pods can be used to pre-seed continuous integration (CI) pipelines with the necessary instance state to bootstrap testing environments or to troubleshoot failures in the CI pipeline. +In conclusion, LocalStack Cloud Pods facilitate collaboration and debugging among team members by allowing the sharing of local cloud infrastructure and instance state. +These Cloud Pods can be used to create reproducible environments for various purposes, including machine learning. +By using Cloud Pods, teams can work together to create a reproducible environment for their application and share it with other team members. +Additionally, Cloud Pods can be used to pre-seed continuous integration (CI) pipelines with the necessary instance state to bootstrap testing environments or to troubleshoot failures in the CI pipeline. -For more information about LocalStack Cloud Pods, refer to the documentation provided. The code for this tutorial, including a Makefile to execute it step-by-step, is available in the [LocalStack Pro samples repository](https://github.com/localstack/localstack-pro-samples/tree/master/reproducible-ml) on GitHub. +For more information about LocalStack Cloud Pods, refer to the documentation provided. +The code for this tutorial, including a Makefile to execute it step-by-step, is available in the [LocalStack Pro samples repository](https://github.com/localstack/localstack-pro-samples/tree/master/reproducible-ml) on GitHub. diff --git a/content/en/tutorials/route53-failover-with-fis/index.md b/content/en/tutorials/route53-failover-with-fis/index.md index b678e2712e..a6b491d094 100644 --- a/content/en/tutorials/route53-failover-with-fis/index.md +++ b/content/en/tutorials/route53-failover-with-fis/index.md @@ -29,24 +29,30 @@ leadimage: "route-53-failover.png" ## Introduction -LocalStack allows you to integrate & test [Fault Injection Simulator (FIS)](https://docs.localstack.cloud/user-guide/aws/fis/) with [Route53](https://docs.localstack.cloud/user-guide/aws/route53/) to automatically divert users to -a healthy secondary zone if the primary region fails, ensuring system availability and responsiveness. Route53's health checks and +LocalStack allows you to integrate & test [Fault Injection Simulator (FIS)](https://docs.localstack.cloud/user-guide/aws/fis/) with [Route53](https://docs.localstack.cloud/user-guide/aws/route53/) to automatically divert users to +a healthy secondary zone if the primary region fails, ensuring system availability and responsiveness. +Route53's health checks and traffic redirection enhance architecture resilience and ensure service continuity during regional outages, crucial for uninterrupted user experiences. {{< callout "note">}} -Route53 Failover with FIS is currently available as part of the **LocalStack Enterprise** plan. If you'd like to try it out, +Route53 Failover with FIS is currently available as part of the **LocalStack Enterprise** plan. +If you'd like to try it out, please [contact us](https://www.localstack.cloud/demo) to request access. {{< /callout >}} ## Getting started -This tutorial is designed for users new to the Route53 and FIS services. In this example, there's an active-primary and -passive-standby configuration. Route53 routes traffic to the primary region, which processes product-related requests through -API Gateway and Lambda functions, with data stored in DynamoDB. If the primary region fails, Route53 redirects to the standby +This tutorial is designed for users new to the Route53 and FIS services. +In this example, there's an active-primary and +passive-standby configuration. +Route53 routes traffic to the primary region, which processes product-related requests through +API Gateway and Lambda functions, with data stored in DynamoDB. +If the primary region fails, Route53 redirects to the standby region, maintained in sync by a replication Lambda function. -For this particular example, we'll be using a [sample application repository](https://github.com/localstack-samples/samples-chaos-engineering/tree/main/route53-failover). Clone the repository, and follow the +For this particular example, we'll be using a [sample application repository](https://github.com/localstack-samples/samples-chaos-engineering/tree/main/route53-failover). +Clone the repository, and follow the instructions below to get started. ### Prerequisites @@ -59,7 +65,8 @@ The general prerequisites for this guide are: - [Python-3](https://www.python.org/downloads/) - `dig` -Start LocalStack by using the `docker-compose.yml` file from the repository. Ensure to set your Auth Token as an environment variable +Start LocalStack by using the `docker-compose.yml` file from the repository. +Ensure to set your Auth Token as an environment variable during this process. {{< command >}} @@ -75,17 +82,21 @@ The following diagram shows the architecture that this application builds and de ### Creating the resources -To begin, deploy the same services in both `us-west-1` and `us-east-1` regions. The resources specified in the `init-resources.sh` +To begin, deploy the same services in both `us-west-1` and `us-east-1` regions. +The resources specified in the `init-resources.sh` file will be created when the LocalStack container starts, using Initialization Hooks and the `awslocal` CLI tool. -The objective is to have a backup system in case of a regional outage in the primary availability zone (`us-west-1`). We'll focus +The objective is to have a backup system in case of a regional outage in the primary availability zone (`us-west-1`). +We'll focus on this region to examine the existing resilience mechanisms. {{< figure src="route53-failover-2.png" width="800">}} -- The primary API Gateway includes a health check endpoint that returns a 200 HTTP status code, serving as a basic check for its availability. -- Data synchronization across regions can be achieved with AWS-native tools like DynamoDB Streams and AWS Lambda. Here, any changes to the -primary table trigger a Lambda function, replicating these changes to a secondary table. This configuration is essential for high availability +- The primary API Gateway includes a health check endpoint that returns a 200 HTTP status code, serving as a basic check for its availability. +- Data synchronization across regions can be achieved with AWS-native tools like DynamoDB Streams and AWS Lambda. + Here, any changes to the +primary table trigger a Lambda function, replicating these changes to a secondary table. + This configuration is essential for high availability and disaster recovery. ### Configuring a Route53 hosted zone @@ -113,11 +124,12 @@ awslocal route53 create-health-check \ ) {{< /command >}} -This command creates a Route 53 health check for an HTTP endpoint (`12345.execute-api.localhost.localstack.cloud:4566/dev/healthcheck`) -with a 10-second request interval and captures the health check's ID. The caller reference identifier in AWS resource creation or updates +This command creates a Route 53 health check for an HTTP endpoint (`12345.execute-api.localhost.localstack.cloud:4566/dev/healthcheck`) +with a 10-second request interval and captures the health check's ID. +The caller reference identifier in AWS resource creation or updates prevents accidental duplication if requests are repeated. -To update DNS records in the specified Route53 hosted zone (`$HOSTED_ZONE_ID`), add two CNAME records: `12345.$HOSTED_ZONE_NAME` +To update DNS records in the specified Route53 hosted zone (`$HOSTED_ZONE_ID`), add two CNAME records: `12345.$HOSTED_ZONE_NAME` pointing to `12345.execute-api.localhost.localstack.cloud`, and `67890.$HOSTED_ZONE_NAME` pointing to `67890.execute-api.localhost.localstack.cloud`. Set a TTL (Time to Live) of 60 seconds for these records. @@ -152,9 +164,12 @@ $ awslocal route53 change-resource-record-sets \ }' {{< /command >}} -Finally, we'll update the DNS records in the Route53 hosted zone identified by **`$HOSTED_ZONE_ID`**. We're adding two CNAME records -for the subdomain `test.$HOSTED_ZONE_NAME`. The first record points to `12345.$HOSTED_ZONE_NAME` and is linked with the earlier created -health check, designated as the primary failover target. The second record points to `67890.$HOSTED_ZONE_NAME` and is set as the secondary +Finally, we'll update the DNS records in the Route53 hosted zone identified by **`$HOSTED_ZONE_ID`**. +We're adding two CNAME records +for the subdomain `test.$HOSTED_ZONE_NAME`. +The first record points to `12345.$HOSTED_ZONE_NAME` and is linked with the earlier created +health check, designated as the primary failover target. +The second record points to `67890.$HOSTED_ZONE_NAME` and is set as the secondary failover target. {{< command >}} @@ -196,27 +211,33 @@ $ awslocal route53 change-resource-record-sets \ {{< /command >}} This setup represents the basic failover configuration where traffic is redirected to different endpoints based on their health check -status. To confirm that the CNAME record for `test.hello-localstack.com` points to `12345.execute-api.localhost.localstack.cloud`, +status. +To confirm that the CNAME record for `test.hello-localstack.com` points to `12345.execute-api.localhost.localstack.cloud`, you can use the following `dig` command: {{< command >}} $ dig @localhost test.hello-localstack.com CNAME - + ..... ;; QUESTION SECTION: -;test.hello-localstack.com. IN CNAME +;test.hello-localstack.com. +IN CNAME ;; ANSWER SECTION: -test.hello-localstack.com. 300 IN CNAME 12345.execute-api.localhost.localstack.cloud. +test.hello-localstack.com. +300 IN CNAME 12345.execute-api.localhost.localstack.cloud. ..... {{< /command >}} ### Creating a controlled outage -Our setup is now complete and ready for testing. To mimic a regional outage in the `us-west-1` region, we'll conduct an experiment that -halts all service invocations in this region, including the health check function. Once the primary region becomes non-functional, -Route 53's health checks will fail. This failure will activate the failover policy, redirecting traffic to the corresponding services +Our setup is now complete and ready for testing. +To mimic a regional outage in the `us-west-1` region, we'll conduct an experiment that +halts all service invocations in this region, including the health check function. +Once the primary region becomes non-functional, +Route 53's health checks will fail. +This failure will activate the failover policy, redirecting traffic to the corresponding services in the secondary region, thus maintaining service continuity. {{< command >}} @@ -236,7 +257,7 @@ $ cat region-outage-experiment.json "stopConditions": [], "roleArn": "arn:aws:iam:000000000000:role/ExperimentRole" } - +
{{< /command >}} This Fault Injection Simulator (FIS) experiment template is set up to mimic a `Service Unavailable` (503 error) in the `us-west-1` region. @@ -276,18 +297,22 @@ $ awslocal fis start-experiment --experiment-template-id {{< /command >}} -Replace `` with the ID of the experiment template created in the previous step. When the experiment is active, -Route 53's health checks will detect the failure and redirect traffic to the standby region as per the failover setup. Confirm this redirection with: +Replace `` with the ID of the experiment template created in the previous step. +When the experiment is active, +Route 53's health checks will detect the failure and redirect traffic to the standby region as per the failover setup. +Confirm this redirection with: {{< command >}} $ dig @localhost test.hello-localstack.com CNAME - + ..... ;; QUESTION SECTION: -;test.hello-localstack.com. IN CNAME +;test.hello-localstack.com. +IN CNAME ;; ANSWER SECTION: -test.hello-localstack.com. 300 IN CNAME 67890.execute-api.localhost.localstack.cloud. +test.hello-localstack.com. +300 IN CNAME 67890.execute-api.localhost.localstack.cloud. ..... {{< /command >}} diff --git a/content/en/tutorials/s3-static-website-terraform/index.md b/content/en/tutorials/s3-static-website-terraform/index.md index be2a472577..459250aa21 100644 --- a/content/en/tutorials/s3-static-website-terraform/index.md +++ b/content/en/tutorials/s3-static-website-terraform/index.md @@ -22,13 +22,22 @@ pro: false leadimage: "s3-static-website-terraform-featured-image.png" --- -[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is a proprietary object storage solution that can store an unlimited number of objects for many use cases. S3 is a highly scalable, durable and reliable service that we can use for various use cases: hosting a static site, handling big data analytics, managing application logs, storing web assets and much more! +[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is a proprietary object storage solution that can store an unlimited number of objects for many use cases. +S3 is a highly scalable, durable and reliable service that we can use for various use cases: hosting a static site, handling big data analytics, managing application logs, storing web assets and much more! -With S3, you have unlimited storage with your data stored in buckets. A bucket refers to a directory, while an object is just another term for a file. Every object (file) stores the name of the file (key), the contents (value), a version ID and the associated metadata. You can also use S3 to host a static website, to serve static content. It might include HTML, CSS, JavaScript, images, and other assets that make up your website. +With S3, you have unlimited storage with your data stored in buckets. +A bucket refers to a directory, while an object is just another term for a file. +Every object (file) stores the name of the file (key), the contents (value), a version ID and the associated metadata. +You can also use S3 to host a static website, to serve static content. +It might include HTML, CSS, JavaScript, images, and other assets that make up your website. -LocalStack supports the S3 API, which means you can use the same API calls to interact with S3 in LocalStack as you would with AWS. Using LocalStack, you can create and manage S3 buckets and objects locally, use AWS SDKs and third-party integrations to work with S3, and test your applications without making any significant alterations. LocalStack also supports the creation of S3 buckets with static website hosting enabled. +LocalStack supports the S3 API, which means you can use the same API calls to interact with S3 in LocalStack as you would with AWS. +Using LocalStack, you can create and manage S3 buckets and objects locally, use AWS SDKs and third-party integrations to work with S3, and test your applications without making any significant alterations. +LocalStack also supports the creation of S3 buckets with static website hosting enabled. -In this tutorial, we will deploy a static website using an S3 bucket over a locally emulated AWS infrastructure on LocalStack. We will use Terraform to automate the creation & management of AWS resources by declaring them in the HashiCorp Configuration Language (HCL). We will also learn about `tflocal`, a CLI wrapper created by LocalStack, that allows you to run Terraform locally against LocalStack. +In this tutorial, we will deploy a static website using an S3 bucket over a locally emulated AWS infrastructure on LocalStack. +We will use Terraform to automate the creation & management of AWS resources by declaring them in the HashiCorp Configuration Language (HCL). +We will also learn about `tflocal`, a CLI wrapper created by LocalStack, that allows you to run Terraform locally against LocalStack. ## Prerequisites @@ -40,9 +49,13 @@ For this tutorial, you will need: ## Creating a static website -We will create a simple static website using plain HTML to get started. To create a static website deployed over S3, we need to create an index document and a custom error document. We will name our index document `index.html` and our error document `error.html`. Optionally, you can create a folder called `assets` to store images and other assets. +We will create a simple static website using plain HTML to get started. +To create a static website deployed over S3, we need to create an index document and a custom error document. +We will name our index document `index.html` and our error document `error.html`. +Optionally, you can create a folder called `assets` to store images and other assets. -Let's create a directory named `s3-static-website-localstack` where we'll store our static website files. If you don't have an `index.html` file, you can use the following code to create one: +Let's create a directory named `s3-static-website-localstack` where we'll store our static website files. +If you don't have an `index.html` file, you can use the following code to create one: ```html @@ -58,7 +71,9 @@ Let's create a directory named `s3-static-website-localstack` where we'll store ``` -S3 will serve this file when a user visits the root URL of your static website, serving as the default page. In a similar fashion, you can configure a custom error document that contains a user-friendly error message. Let's create a file named `error.html` and add the following code: +S3 will serve this file when a user visits the root URL of your static website, serving as the default page. +In a similar fashion, you can configure a custom error document that contains a user-friendly error message. +Let's create a file named `error.html` and add the following code: ```html @@ -73,17 +88,23 @@ S3 will serve this file when a user visits the root URL of your static website, ``` -S3 will return the above file content only for HTTP 4XX error codes. Some browsers might choose to display their custom error message if a user tries to access a resource that does not exist. In this case, browsers might ignore the above error document. With the initial setup complete, we can now move on to creating a static website using S3 via `awslocal`, LocalStack's wrapper for the AWS CLI. +S3 will return the above file content only for HTTP 4XX error codes. +Some browsers might choose to display their custom error message if a user tries to access a resource that does not exist. +In this case, browsers might ignore the above error document. +With the initial setup complete, we can now move on to creating a static website using S3 via `awslocal`, LocalStack's wrapper for the AWS CLI. ## Hosting a static website using S3 -To create a static website using S3, we need to create a bucket, enable static website hosting, and upload the files to the bucket. We will use the `awslocal` CLI for these operations. Navigate to the root directory of the project and create a bucket named `testwebsite` using LocalStack's S3 API: +To create a static website using S3, we need to create a bucket, enable static website hosting, and upload the files to the bucket. +We will use the `awslocal` CLI for these operations. +Navigate to the root directory of the project and create a bucket named `testwebsite` using LocalStack's S3 API: {{< command >}} $ awslocal s3api create-bucket --bucket testwebsite {{< / command >}} -With the bucket created, we can now attach a policy to it to allow public access and its contents. Let's create a file named `bucket_policy.json` in the root directory and add the following code: +With the bucket created, we can now attach a policy to it to allow public access and its contents. +Let's create a file named `bucket_policy.json` in the root directory and add the following code: ```json { @@ -123,13 +144,18 @@ If you are deploying a static website using S3 on real AWS cloud, your S3 websit - `http://.s3-website-.amazonaws.com` - `http://.s3-website..amazonaws.com` -In LocalStack, the S3 website endpoint follows the following format: `http://.s3-website.localhost.localstack.cloud:4566`. You can navigate to [`http://testwebsite.s3-website.localhost.localstack.cloud:4566/`](http://testwebsite.s3-website.localhost.localstack.cloud:4566/) to view your static website. +In LocalStack, the S3 website endpoint follows the following format: `http://.s3-website.localhost.localstack.cloud:4566`. +You can navigate to [`http://testwebsite.s3-website.localhost.localstack.cloud:4566/`](http://testwebsite.s3-website.localhost.localstack.cloud:4566/) to view your static website. ## Orchestrating infrastructure using Terraform -You can automate the above process by orchestrating your AWS infrastructure using Terraform. Terraform is an infrastructure as code (IaC) tool that allows you to create, manage, and version your infrastructure. Terraform uses a declarative configuration language called HashiCorp Configuration Language (HCL) to describe your infrastructure. +You can automate the above process by orchestrating your AWS infrastructure using Terraform. +Terraform is an infrastructure as code (IaC) tool that allows you to create, manage, and version your infrastructure. +Terraform uses a declarative configuration language called HashiCorp Configuration Language (HCL) to describe your infrastructure. -Before that, we would need to manually configure the local service endpoints and credentials for Terraform to integrate with LocalStack. We will use the [AWS Provider for Terraform](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) to interact with the many resources supported by AWS in LocalStack. Create a new file named `provider.tf` and specify mock credentials for the AWS provider: +Before that, we would need to manually configure the local service endpoints and credentials for Terraform to integrate with LocalStack. +We will use the [AWS Provider for Terraform](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) to interact with the many resources supported by AWS in LocalStack. +Create a new file named `provider.tf` and specify mock credentials for the AWS provider: ```hcl provider "aws" { @@ -139,7 +165,11 @@ provider "aws" { } ``` -We would also need to avoid issues with routing and authentication (as we do not need it). Therefore we need to supply some general parameters. Additionally, we have to point the individual services to LocalStack. We can do this by specifying the `endpoints` parameter for each service, that we intend to use. Our `provider.tf` file should look like this: +We would also need to avoid issues with routing and authentication (as we do not need it). +Therefore we need to supply some general parameters. +Additionally, we have to point the individual services to LocalStack. +We can do this by specifying the `endpoints` parameter for each service, that we intend to use. +Our `provider.tf` file should look like this: ```hcl provider "aws" { @@ -161,10 +191,14 @@ provider "aws" { ``` {{< callout "note" >}} -We use `localhost.localstack.cloud` as the recommended endpoint for the S3 to enable host-based bucket endpoints. Users can rely on the `localhost.localstack.cloud` domain to be publicly resolvable. We also publish an SSL certificate which is automatically used inside LocalStack to enable HTTPS endpoints with valid certificates. For most of the other services, it is fine to use `localhost:4566`. +We use `localhost.localstack.cloud` as the recommended endpoint for the S3 to enable host-based bucket endpoints. +Users can rely on the `localhost.localstack.cloud` domain to be publicly resolvable. +We also publish an SSL certificate which is automatically used inside LocalStack to enable HTTPS endpoints with valid certificates. +For most of the other services, it is fine to use `localhost:4566`. {{< /callout >}} -With the provider configured, we can now configure the variables for our S3 bucket. Create a new file named `variables.tf` and add the following code: +With the provider configured, we can now configure the variables for our S3 bucket. +Create a new file named `variables.tf` and add the following code: ```hcl variable "bucket_name" { @@ -179,7 +213,9 @@ variable "tags" { } ``` -We take a user input for the bucket name and tags. Next, we will define the output variables for our Terraform configuration. Create a new file named `outputs.tf` and add the following code: +We take a user input for the bucket name and tags. +Next, we will define the output variables for our Terraform configuration. +Create a new file named `outputs.tf` and add the following code: ```hcl output "arn" { @@ -202,7 +238,9 @@ output "website_endpoint" { } ``` -The output variables are the ARN, name, domain name, and website endpoint of the bucket. With all the configuration files in place, we can now create the S3 bucket. Create a new file named `main.tf` and create the S3 bucket using the following code: +The output variables are the ARN, name, domain name, and website endpoint of the bucket. +With all the configuration files in place, we can now create the S3 bucket. +Create a new file named `main.tf` and create the S3 bucket using the following code: ```hcl resource "aws_s3_bucket" "s3_bucket" { @@ -211,7 +249,8 @@ resource "aws_s3_bucket" "s3_bucket" { } ``` -To configure the static website hosting, we will use the `aws_s3_bucket_website_configuration` resource. Add the following code to the `main.tf` file: +To configure the static website hosting, we will use the `aws_s3_bucket_website_configuration` resource. +Add the following code to the `main.tf` file: ```hcl resource "aws_s3_bucket_website_configuration" "s3_bucket" { @@ -228,7 +267,8 @@ resource "aws_s3_bucket_website_configuration" "s3_bucket" { } ``` -To set the bucket policy, we will use the `aws_s3_bucket_policy` resource. Add the following code to the `main.tf` file: +To set the bucket policy, we will use the `aws_s3_bucket_policy` resource. +Add the following code to the `main.tf` file: ```hcl resource "aws_s3_bucket_acl" "s3_bucket" { @@ -257,7 +297,10 @@ resource "aws_s3_bucket_policy" "s3_bucket" { } ``` -In the above code, we are setting the ACL of the bucket to `public-read` and setting the bucket policy to allow public access to the bucket. Pick up an appropriate policy based on your use case. Let's use the `aws_s3_object` resource to upload the files to the bucket. Add the following code to the `main.tf` file: +In the above code, we are setting the ACL of the bucket to `public-read` and setting the bucket policy to allow public access to the bucket. +Pick up an appropriate policy based on your use case. +Let's use the `aws_s3_object` resource to upload the files to the bucket. +Add the following code to the `main.tf` file: ```hcl resource "aws_s3_object" "object_www" { @@ -272,7 +315,9 @@ resource "aws_s3_object" "object_www" { } ``` -The above code uploads all our html files to the bucket. We are also setting the ACL of the files to `public-read`. Optionally, if you have static assets like images, CSS, and JavaScript files, you can upload them to the bucket using the same `aws_s3_bucket_object` resource by adding the following code to the `main.tf` file: +The above code uploads all our html files to the bucket. +We are also setting the ACL of the files to `public-read`. +Optionally, if you have static assets like images, CSS, and JavaScript files, you can upload them to the bucket using the same `aws_s3_bucket_object` resource by adding the following code to the `main.tf` file: ```hcl resource "aws_s3_object" "object_assets" { @@ -286,7 +331,8 @@ resource "aws_s3_object" "object_assets" { } ``` -With all the configuration files in place, we can now initialize the Terraform configuration. Run the following command to initialize the Terraform configuration: +With all the configuration files in place, we can now initialize the Terraform configuration. +Run the following command to initialize the Terraform configuration: {{< command >}} $ terraform init @@ -296,19 +342,22 @@ Terraform has been successfully initialized! ... {{< / command >}} -We can create an execution plan based on our Terraform configuration for the AWS resources. Run the following command to create an execution plan: +We can create an execution plan based on our Terraform configuration for the AWS resources. +Run the following command to create an execution plan: {{< command >}} $ terraform plan {{< / command >}} -Finally, we can apply the Terraform configuration to create the AWS resources. Run the following command to apply the Terraform configuration: +Finally, we can apply the Terraform configuration to create the AWS resources. +Run the following command to apply the Terraform configuration: {{< command >}} $ terraform apply var.bucket_name - Name of the s3 bucket. Must be unique. + Name of the s3 bucket. +Must be unique. Enter a value: testbucket ... @@ -318,9 +367,16 @@ name = "testbucket" website_endpoint = "testbucket.s3-website-us-east-1.amazonaws.com" {{< / command >}} -In the above command, we specified `testbucket` as the bucket name. You can specify any bucket name since LocalStack is ephemeral, and stopping your LocalStack container will delete all the created resources. The above command output includes the ARN, name, domain name, and website endpoint of the bucket. You can see the `website_endpoint` configured to use AWS S3 Website Endpoint. You can now access the website using the bucket name in the following format: `http://.s3-website.localhost.localstack.cloud:4566`. Since the endpoint is configured to use `localhost.localstack.cloud`, no real AWS resources have been created. +In the above command, we specified `testbucket` as the bucket name. +You can specify any bucket name since LocalStack is ephemeral, and stopping your LocalStack container will delete all the created resources. +The above command output includes the ARN, name, domain name, and website endpoint of the bucket. +You can see the `website_endpoint` configured to use AWS S3 Website Endpoint. +You can now access the website using the bucket name in the following format: `http://.s3-website.localhost.localstack.cloud:4566`. +Since the endpoint is configured to use `localhost.localstack.cloud`, no real AWS resources have been created. -You can optionally use the `tflocal` CLI as a drop-in replacement for the official Terraform CLI. `tflocal` uses the Terraform Override mechanism to create a temporary `localstack_providers_override.tf` file, which is deleted after the infrastructure is created. It mitigates the need to create the `provider.tf` file manually. You can use `tflocal` to create the infrastructure by running the following commands: +You can optionally use the `tflocal` CLI as a drop-in replacement for the official Terraform CLI. `tflocal` uses the Terraform Override mechanism to create a temporary `localstack_providers_override.tf` file, which is deleted after the infrastructure is created. +It mitigates the need to create the `provider.tf` file manually. +You can use `tflocal` to create the infrastructure by running the following commands: {{< command >}} $ tflocal init @@ -330,7 +386,10 @@ $ tflocal apply ## Conclusion -In this tutorial, we have seen how to use LocalStack to create an S3 bucket and configure it to serve a static website. We have also seen how you can use Terraform to provision AWS infrastructure in an emulated local environment using LocalStack. You can use the [LocalStack App](https://app.localstack.cloud) to view the created buckets and files on the LocalStack Resource dashboard for S3 and upload more files or perform other operations on the bucket. Using LocalStack, you can perform various operations using emulated S3 buckets and other AWS services without creating any real AWS resources. +In this tutorial, we have seen how to use LocalStack to create an S3 bucket and configure it to serve a static website. +We have also seen how you can use Terraform to provision AWS infrastructure in an emulated local environment using LocalStack. +You can use the [LocalStack App](https://app.localstack.cloud) to view the created buckets and files on the LocalStack Resource dashboard for S3 and upload more files or perform other operations on the bucket. +Using LocalStack, you can perform various operations using emulated S3 buckets and other AWS services without creating any real AWS resources. The code for this tutorial can be found in our [LocalStack Terraform samples over GitHub](https://github.com/localstack/localstack-terraform-samples/tree/master/s3-static-website). Please make sure to adjust the paths for the html files in `main.tf`. diff --git a/content/en/tutorials/schema-evolution-glue-msk/index.md b/content/en/tutorials/schema-evolution-glue-msk/index.md index f78809570f..ef681f30eb 100644 --- a/content/en/tutorials/schema-evolution-glue-msk/index.md +++ b/content/en/tutorials/schema-evolution-glue-msk/index.md @@ -23,14 +23,19 @@ pro: true leadimage: "schema-evolution-glue-msk-featured-image.png" --- -[Apache Kafka](https://kafka.apache.org/) is an open-source distributed event store and stream-processing platform. It is used to capture data generated by producers and distribute it among its consumers. Kafka is known for its scalability, with reports of production environments scaling to [trillions of messages per day](https://engineering.linkedin.com/blog/2019/apache-kafka-trillion-messages). With [Amazon Managed Streaming for Apache Kafka (MSK)](https://aws.amazon.com/msk/), AWS provides a service to provision Apache Kafka clusters easily. +[Apache Kafka](https://kafka.apache.org/) is an open-source distributed event store and stream-processing platform. +It is used to capture data generated by producers and distribute it among its consumers. +Kafka is known for its scalability, with reports of production environments scaling to [trillions of messages per day](https://engineering.linkedin.com/blog/2019/apache-kafka-trillion-messages). +With [Amazon Managed Streaming for Apache Kafka (MSK)](https://aws.amazon.com/msk/), AWS provides a service to provision Apache Kafka clusters easily. [LocalStack Pro](https://app.localstack.cloud/) supports [Amazon Managed Streaming for Kafka (MSK)]({{< ref "user-guide/aws/msk" >}}), which enables you to spin up Kafka clusters on your local machine and test the integration of your applications with Amazon MSK. Kafka clusters are often used as the central messaging infrastructure in complex microservice environments. -However, the continuous and independent development of the individual microservices - the data producers and consumers - can make it hard to coordinate and evolve data schemas over time without introducing application failures due to incompatibilities. A common solution to this problem is to use a schema registry which provides for the validation of schema changes, preventing any unsafe changes and subsequent application failures. +However, the continuous and independent development of the individual microservices - the data producers and consumers - can make it hard to coordinate and evolve data schemas over time without introducing application failures due to incompatibilities. +A common solution to this problem is to use a schema registry which provides for the validation of schema changes, preventing any unsafe changes and subsequent application failures. -[AWS Glue Schema Registry](https://docs.aws.amazon.com/glue/latest/dg/schema-registry.html) can be used as such a schema registry, enabling you to validate and evolve streaming data using Apache Avro schemas. It can be easily integrated into Java applications for Apache Kafka with [AWS's official open-source serializers and deserializers](https://github.com/awslabs/aws-glue-schema-registry). +[AWS Glue Schema Registry](https://docs.aws.amazon.com/glue/latest/dg/schema-registry.html) can be used as such a schema registry, enabling you to validate and evolve streaming data using Apache Avro schemas. +It can be easily integrated into Java applications for Apache Kafka with [AWS's official open-source serializers and deserializers](https://github.com/awslabs/aws-glue-schema-registry).
The following chart shows the integration of producers and consumers with Amazon MSK and the AWS Glue Schema Registry: @@ -39,14 +44,18 @@ The following chart shows the integration of producers and consumers with Amazon {{< figure src="schema-evolution-glue-msk-flow.svg" width="100%" alt="Workflow: Glue Schema Registry with MSK">}}

-1. Before sending a record, the producer validates that the schema it is using to serialize its records is valid. We can configure the producer to register a new schema version if the schema is not yet registered. +1. Before sending a record, the producer validates that the schema it is using to serialize its records is valid. + We can configure the producer to register a new schema version if the schema is not yet registered. - When registering the new schema version, the schema registry validates if the schema is compatible. - - If the registry detects an incompatibility, the registration is rejected. This ensures that a producer fails early and cannot publish incompatible records in the first place. + - If the registry detects an incompatibility, the registration is rejected. + This ensures that a producer fails early and cannot publish incompatible records in the first place. 2. Once the schema is valid, the producer serializes and compresses the record and sends it to the Kafka cluster. 3. The consumer reads the serialized and compressed record. 4. The consumer requests the schema from the schema registry (if it is not already cached) and uses the schema to decompress and deserialize the record. -[AWS Glue Schema Registry](https://docs.localstack.cloud/user-guide/aws/glue/) is supported by LocalStack Pro as well, ultimately allowing you to test the evolution of your data streaming application completely on your local machine. It allows you develop and test your application's data schema evolution locally. The code for this tutorial (including a script to execute it step-by-step) can be found in our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/glue-msk-schema-registry). +[AWS Glue Schema Registry](https://docs.localstack.cloud/user-guide/aws/glue/) is supported by LocalStack Pro as well, ultimately allowing you to test the evolution of your data streaming application completely on your local machine. +It allows you develop and test your application's data schema evolution locally. +The code for this tutorial (including a script to execute it step-by-step) can be found in our [LocalStack Pro samples over GitHub](https://github.com/localstack/localstack-pro-samples/tree/master/glue-msk-schema-registry). # Prerequisites @@ -60,7 +69,8 @@ For this tutorial you will need: ## Initial schema -At first, we will define our schema, set up our Java project, and generate the Java data classes using the schema. In our Apache Avro data schema we describe a request to ride a unicorn, including the necessary addresses, a fare, a duration, some preferences, and a customer record: +At first, we will define our schema, set up our Java project, and generate the Java data classes using the schema. +In our Apache Avro data schema we describe a request to ride a unicorn, including the necessary addresses, a fare, a duration, some preferences, and a customer record: {{< github repo="localstack/localstack-pro-samples" file="glue-msk-schema-registry/producer/src/main/resources/avro/unicorn_ride_request_v1.avsc" lang="json" ref="cd023a84a3b473984e9c34053d4feb7de8e038c1" >}} @@ -167,7 +177,8 @@ In our root pom, we configure the `producer` and the `consumer` module, some sha ``` -While the root pom is a bit lengthy, the `pom.xml` files of the two modules are quite simple. They only reference the root pom, activate the `avro-maven-plugin`, and define a main class (we'll go into detail on the actual Java code in [the section below](#implement-a-producer-and-consumer)). +While the root pom is a bit lengthy, the `pom.xml` files of the two modules are quite simple. +They only reference the root pom, activate the `avro-maven-plugin`, and define a main class (we'll go into detail on the actual Java code in [the section below](#implementing-a-producer-and-consumer)). Here is what the producer's `pom.xml` looks like: @@ -238,7 +249,8 @@ Now, all the boilerplate is done: ### The Producer -The next step is to implement our producer. The complete module can be found on our [samples repository (along with the rest of the code of this tutorial)](https://github.com/localstack/localstack-pro-samples/blob/cd023a84a3b473984e9c34053d4feb7de8e038c1/glue-msk-schema-registry/producer/). +The next step is to implement our producer. +The complete module can be found on our [samples repository (along with the rest of the code of this tutorial)](https://github.com/localstack/localstack-pro-samples/blob/cd023a84a3b473984e9c34053d4feb7de8e038c1/glue-msk-schema-registry/producer/). We create a new class called `Producer` in `producer/src/main/java/cloud/localstack/demos/gluemsk/producer/`. The `Producer` contains a `main` method which uses [`jcommander`](https://jcommander.org/) to create a simple CLI interface: @@ -478,7 +490,9 @@ public void startConsumer() { ``` ## Setting up the infrastructure -Now that the initial coding is done, we can give it a try. Let's start LocalStack: + +Now that the initial coding is done, we can give it a try. +Let's start LocalStack: ```bash LOCALSTACK_AUTH_TOKEN= localstack start -d @@ -499,9 +513,11 @@ $ awslocal kafka create-cluster \ } ``` -The `ClusterArn` is created dynamically and will be different for your run. Make sure to use your `ClusterArn` for the commands below. +The `ClusterArn` is created dynamically and will be different for your run. +Make sure to use your `ClusterArn` for the commands below. -It takes some time for the cluster to get up and running. We can monitor the state with `describe-cluster`: +It takes some time for the cluster to get up and running. +We can monitor the state with `describe-cluster`: ```bash $ awslocal kafka describe-cluster --cluster-arn "arn:aws:kafka:us-east-1:000000000000:cluster/unicorn-ride-cluster/f9b16124-baf3-459b-8507-ec6c605b7a0a-25" @@ -525,7 +541,8 @@ $ awslocal kafka describe-cluster --cluster-arn "arn:aws:kafka:us-east-1:0000000 } ``` -Once the `State` is `ACTIV`, the cluster is ready to be used. Now it's time to create our Glue Schema Registry: +Once the `State` is `ACTIV`, the cluster is ready to be used. +Now it's time to create our Glue Schema Registry: ```bash $ awslocal glue create-registry --registry-name unicorn-ride-request-registry @@ -560,13 +577,17 @@ $ awslocal glue create-schema \ } ``` -For the schema, we just defined the compatibility mode `BACKWARD`. This means that the consumers using the new schema can also read data produced with the last schema. For example, this would allow the deletion of fields, or the introduction of new optional fields. +For the schema, we just defined the compatibility mode `BACKWARD`. +This means that the consumers using the new schema can also read data produced with the last schema. +For example, this would allow the deletion of fields, or the introduction of new optional fields. -You can find a thorough description of the different compatibility modes in the [AWS docs on Schema Versioning and Compatibility](https://docs.aws.amazon.com/glue/latest/dg/schema-registry.html#schema-registry-compatibility). The Glue Schema Registry will ensure that newly registered schemas fulfill the constraints defined by the compatibility mode. +You can find a thorough description of the different compatibility modes in the [AWS docs on Schema Versioning and Compatibility](https://docs.aws.amazon.com/glue/latest/dg/schema-registry.html#schema-registry-compatibility). +The Glue Schema Registry will ensure that newly registered schemas fulfill the constraints defined by the compatibility mode. ## Running the Producer and Consumer -Finally, everything is ready to start our `Producer` and `Consumer`. First, we need to get the bootstrap server address from the Kafka cluster: +Finally, everything is ready to start our `Producer` and `Consumer`. +First, we need to get the bootstrap server address from the Kafka cluster: ```bash $ awslocal kafka get-bootstrap-brokers --cluster-arn "arn:aws:kafka:us-east-1:000000000000:cluster/unicorn-ride-cluster/f9b16124-baf3-459b-8507-ec6c605b7a0a-25" @@ -575,9 +596,11 @@ $ awslocal kafka get-bootstrap-brokers --cluster-arn "arn:aws:kafka:us-east-1:0 } ``` -Like the `ClusterArn`, the `BootstrapBrokerString` is dynamic and can be different. Please make sure to use your bootstrap server address for the runs below. +Like the `ClusterArn`, the `BootstrapBrokerString` is dynamic and can be different. +Please make sure to use your bootstrap server address for the runs below. -Now let's start the `Producer`. By default, our producer will just send 100 records and shut down. +Now let's start the `Producer`. +By default, our producer will just send 100 records and shut down. ```bash # Compile the Java packages @@ -589,13 +612,16 @@ mvn -pl producer exec:java -Dexec.args="--bootstrap-servers localhost:4511" Once the producer is running, we can observe the different [steps of the producer as described in the intro](#integration-description): 1. Before sending a record, the producer validates that the schema, which it is using to serialize its records, is valid: + ```plaintext ... [GlueSchemaRegistrySerializerFactory][DEBUG] Returning Avro serializer instance from GlueSchemaRegistrySerializerFactory [AWSSchemaRegistryClient][DEBUG] Getting Schema Version Id for : schemaDefinition = {"type":"record","name":"UnicornRideRequest","namespace":"cloud.localstack.demos.gluemsk.schema","fields":[{"name":"request_id","type":"int","doc":"customer request id"},{"name":"pickup_address","type":"string","doc":"customer pickup address"},{"name":"destination_address","type":"string","doc":"customer destination address"},{"name":"ride_fare","type":"float","doc":"ride fare amount (USD)"},{"name":"ride_duration","type":"int","doc":"ride duration in minutes"},{"name":"preferred_unicorn_color","type":{"type":"enum","name":"UnicornPreferredColor","symbols":["WHITE","BLACK","RED","BLUE","GREY"]},"default":"WHITE"},{"name":"recommended_unicorn","type":{"type":"record","name":"RecommendedUnicorn","fields":[{"name":"unicorn_id","type":"int","doc":"recommended unicorn id"},{"name":"color","type":{"type":"enum","name":"unicorn_color","symbols":["WHITE","RED","BLUE"]}},{"name":"stars_rating","type":["null","int"],"doc":"unicorn star ratings based on customers feedback","default":null}]}},{"name":"customer","type":{"type":"record","name":"Customer","fields":[{"name":"customer_account_no","type":"int","doc":"customer account number"},{"name":"first_name","type":"string"},{"name":"middle_name","type":["null","string"],"default":null},{"name":"last_name","type":"string"},{"name":"email_addresses","type":["null",{"type":"array","items":"string"}]},{"name":"customer_address","type":"string","doc":"customer address"},{"name":"mode_of_payment","type":{"type":"enum","name":"ModeOfPayment","symbols":["CARD","CASH"]},"default":"CARD"},{"name":"customer_rating","type":["null","int"],"default":null}]}}]}, schemaName = unicorn-ride-request-schema-avro, dataFormat = AVRO ... ``` + 2. Once the schema is known to be valid, the producer serializes the record, compresses it, and sends it to the Kafka cluster. + ```plaintext ... [GlueSchemaRegistryKafkaSerializer][DEBUG] Schema Version Id received from the from schema registry: f95edc4b-778d-4f65-b23e-7de41c5b4e53 @@ -605,19 +631,25 @@ Once the producer is running, we can observe the different [steps of the produce [Producer][INFO ] {"request_id": 1, "pickup_address": "Melbourne, Victoria, Australia", "destination_address": "Sydney, NSW, Aus", "ride_fare": 1200.5, "ride_duration": 120, "preferred_unicorn_color": "WHITE", "recommended_unicorn": {"unicorn_id": 2, "color": "WHITE", "stars_rating": 5}, "customer": {"customer_account_no": 1001, "first_name": "Dummy", "middle_name": null, "last_name": "User", "email_addresses": ["demo@example.com"], "customer_address": "Flinders Street Station", "mode_of_payment": "CARD", "customer_rating": 5}} ... ``` - Now, the records sent by the producer are managed by the Kafka cluster and are waiting for a consumer to pick them up. We can start the consumer with the same bootstrap server address as the producer: + + Now, the records sent by the producer are managed by the Kafka cluster and are waiting for a consumer to pick them up. + We can start the consumer with the same bootstrap server address as the producer: + ```bash mvn -pl consumer exec:java -Dexec.args="--bootstrap-servers localhost:4511" ``` + In the logs of the consumer, we can now observe the [steps of the consumer as described in the intro](#integration-description): 3. The serialized and compressed record is read by the consumers: + ```plaintext [NetworkClient][DEBUG] [Consumer clientId=consumer-unicorn.riderequest.consumer-1, groupId=unicorn.riderequest.consumer] Sending FETCH request with header RequestHeader(apiKey=FETCH, apiVersion=12, clientId=consumer-unicorn.riderequest.consumer-1, correlationId=10) and timeout 30000 to node 0: FetchRequestData(clusterId=null, replicaId=-1, maxWaitMs=500, minBytes=1, maxBytes=52428800, isolationLevel=0, sessionId=0, sessionEpoch=0, topics=[FetchTopic(topic='unicorn-ride-request-topic', partitions=[FetchPartition(partition=0, currentLeaderEpoch=0, fetchOffset=900, lastFetchedEpoch=-1, logStartOffset=-1, partitionMaxBytes=1048576)])], forgottenTopicsData=[], rackId='') [NetworkClient][DEBUG] [Consumer clientId=consumer-unicorn.riderequest.consumer-1, groupId=unicorn.riderequest.consumer] Received FETCH response from node 0 for request with header RequestHeader(apiKey=FETCH, apiVersion=12, clientId=consumer-unicorn.riderequest.consumer-1, correlationId=10): FetchResponseData(throttleTimeMs=0, errorCode=0, sessionId=2057133662, responses=[FetchableTopicResponse(topic='unicorn-ride-request-topic', partitionResponses=[FetchablePartitionResponse(partition=0, errorCode=0, highWatermark=1000, lastStableOffset=1000, logStartOffset=0, divergingEpoch=EpochEndOffset(epoch=-1, endOffset=-1), currentLeader=LeaderIdAndEpoch(leaderId=-1, leaderEpoch=-1), snapshotId=SnapshotId(endOffset=-1, epoch=-1), abortedTransactions=null, preferredReadReplica=-1, recordSet=MemoryRecords(size=17141, buffer=java.nio.HeapByteBuffer[pos=0 lim=17141 cap=17144]))])]) ``` 4. The consumer requests the schema from the schema registry, and uses the schema to decompress and deserialize the record. + ```plaintext [request][DEBUG] Sending Request: DefaultSdkHttpFullRequest(httpMethod=POST, protocol=https, host=localhost.localstack.cloud, port=4566, encodedPath=/, headers=[amz-sdk-invocation-id, Content-Length, Content-Type, User-Agent, X-Amz-Target], queryParameters=[]) ... @@ -637,7 +669,8 @@ Once the producer is running, we can observe the different [steps of the produce ## Schema Evolution -In the course of this tutorial, we have implemented a Kafka producer and a consumer which integrate with the Glue Schema Registry. But the full potential of the Glue Schema Registry is unlocked when performing a schema evolution, i.e., when running producers and consumers with a new version of an already registered schema. +In the course of this tutorial, we have implemented a Kafka producer and a consumer which integrate with the Glue Schema Registry. +But the full potential of the Glue Schema Registry is unlocked when performing a schema evolution, i.e., when running producers and consumers with a new version of an already registered schema. Therefore, we will run a few more interesting scenarios to illustrate the benefits of the Schema Registry: @@ -648,7 +681,8 @@ Therefore, we will run a few more interesting scenarios to illustrate the benefi ### Producer registering a new schema version -In this step, we will create a new producer which uses a new, `BACKWARD` compatible schema version. The complete module can be found in our [samples repository (along with the rest of the code of this tutorial).](https://github.com/localstack/localstack-pro-samples/blob/cd023a84a3b473984e9c34053d4feb7de8e038c1/glue-msk-schema-registry/producer-2/) +In this step, we will create a new producer which uses a new, `BACKWARD` compatible schema version. +The complete module can be found in our [samples repository (along with the rest of the code of this tutorial).](https://github.com/localstack/localstack-pro-samples/blob/cd023a84a3b473984e9c34053d4feb7de8e038c1/glue-msk-schema-registry/producer-2/) The producer should register the new schema version automatically on its own. @@ -656,12 +690,15 @@ We create the new producer by executing the following steps: - Copy the `producer` directory and rename it to `producer-2`. - Set a new artifact ID in the `pom.xml` of the module: + ```xml ... producer-2 ... ``` + - Add the new module to the root `pom.xml`: + ```xml producer @@ -669,13 +706,15 @@ We create the new producer by executing the following steps: consumer ``` + - Create a new version of the schema: - Rename the schema to `unicorn_ride_request_v2.avsc`. - In the schema, remove the previously required field `customer`: + ```bash $ diff -u producer/src/main/resources/avro/unicorn_ride_request_v1.avsc producer-2/src/main/resources/avro/unicorn_ride_request_v2.avsc - --- producer/src/main/resources/avro/unicorn_ride_request_v1.avsc 2022-05-13 08:27:08.219354922 +0200 - +++ producer-2/src/main/resources/avro/unicorn_ride_request_v2.avsc 2022-05-13 08:27:08.219354922 +0200 + --- producer/src/main/resources/avro/unicorn_ride_request_v1.avsc 2022-05-13 08:27:08.219354922 +0200 + +++ producer-2/src/main/resources/avro/unicorn_ride_request_v2.avsc 2022-05-13 08:27:08.219354922 +0200 @@ -20,23 +20,6 @@ {"name": "stars_rating", "type": ["null", "int"], "default": null, "doc": "unicorn star ratings based on customers feedback"} ] @@ -701,13 +740,17 @@ We create the new producer by executing the following steps: ] } ``` + This change is `BACKWARD` compatible, because an updated consumer can read records for both - the current and the previous - records (new consumers don't need the customer data, they don't care if it's present or not). - Re-generate the Java classes for the schema: + ```bash mvn clean generate-sources ``` + - Once the classes have been generatated, the producer code needs to be adjusted (remove the usage of `setCustomer` in the producer's `getRecord`, since the method does not exist anymore). - Configure the producer to automatically register its schema version in case it's not yet registered by setting the additional property `AWSSchemaRegistryConstants.SCHEMA_AUTO_REGISTRATION_SETTING` to `true`: + ```java ... props.put(AWSSchemaRegistryConstants.COMPRESSION_TYPE, AWSSchemaRegistryConstants.COMPRESSION.ZLIB.name()); @@ -735,19 +778,23 @@ In the logs we can see that the producer registered a new schema version before ### Producer trying to register an incompatible schema version -In our next scenario we will create a new producer which wants to register a schema which is _not_ compatible to the schema in the registry. The producer will be rejected right when trying to register the new schema version, before even sending a record. +In our next scenario we will create a new producer which wants to register a schema which is _not_ compatible to the schema in the registry. +The producer will be rejected right when trying to register the new schema version, before even sending a record. The complete module can be found in our [samples repository (along with the rest of the code of this tutorial).](https://github.com/localstack/localstack-pro-samples/blob/cd023a84a3b473984e9c34053d4feb7de8e038c1/glue-msk-schema-registry/producer-3/) Similar to the previous scenario, we create a new producer by executing the following steps: - Copy the `producer-2` directory and rename it to `producer-3`. - Set a new artifact ID in the `pom.xml` of the module: + ```xml ... producer-3 ... ``` + - Add the new module to the root `pom.xml`: + ```xml producer @@ -756,13 +803,15 @@ Similar to the previous scenario, we create a new producer by executing the foll consumer ``` + - Create a new version of the schema: - Rename the schema to `unicorn_ride_request_v3.avsc`. - In the schema, add a new required field `unicorn_food`: + ```bash $ diff -u producer-2/src/main/resources/avro/unicorn_ride_request_v2.avsc producer-3/src/main/resources/avro/unicorn_ride_request_v3.avsc - --- producer-2/src/main/resources/avro/unicorn_ride_request_v2.avsc 2022-05-13 08:27:08.219354922 +0200 - +++ producer-3/src/main/resources/avro/unicorn_ride_request_v3.avsc 2022-05-13 08:27:08.219354922 +0200 + --- producer-2/src/main/resources/avro/unicorn_ride_request_v2.avsc 2022-05-13 08:27:08.219354922 +0200 + +++ producer-3/src/main/resources/avro/unicorn_ride_request_v3.avsc 2022-05-13 08:27:08.219354922 +0200 @@ -20,6 +20,17 @@ {"name": "stars_rating", "type": ["null", "int"], "default": null, "doc": "unicorn star ratings based on customers feedback"} ] @@ -782,12 +831,16 @@ Similar to the previous scenario, we create a new producer by executing the foll ] } ``` + This change is _not_ `BACKWARD` compatible, because an updated consumer cannot read records for both - the current and the previous - records (new consumers would expect the `unicorn_food`, which is not present in old records). - Re-generate the Java classes for the schema: + ```bash mvn clean generate-sources ``` + - Once the classes have been generated, the producer code needs to be adjusted (set the new required `unicorn_food` in the producer's `getRecord`): + ```java ... .setStarsRating(5).build()) @@ -820,9 +873,11 @@ com.amazonaws.services.schemaregistry.exception.AWSSchemaRegistryException: Regi ### Outdated consumers -We've seen how the producer's schema evolution works in the previous scenarios. Now, we'll take a closer look at our consumer. +We've seen how the producer's schema evolution works in the previous scenarios. +Now, we'll take a closer look at our consumer. -In our [first schema evolution scenario](#1-producer-registering-a-new-schema-version), the producer registered a new version of the schema and afterwards published records with that schema. The `BACKWARD` schema compatibility guarantees that updated consumers can read older records, i.e., the consumers need to be updated before the producers. +In our [first schema evolution scenario](#producer-registering-a-new-schema-version), the producer registered a new version of the schema and afterwards published records with that schema. +The `BACKWARD` schema compatibility guarantees that updated consumers can read older records, i.e., the consumers need to be updated before the producers. Therefore, our old consumer will fail in consuming these records, because it is not compatible with the new schema registered by the new producer yet: @@ -849,17 +904,21 @@ Caused by: org.apache.avro.AvroTypeException: Found cloud.localstack.demos.gluem ### Updated consumers -Finally, we will update our consumer such that it is compatible to the new version of the schema. The complete module can be found in our [samples repository (along with the rest of the code of this tutorial).](https://github.com/localstack/localstack-pro-samples/blob/cd023a84a3b473984e9c34053d4feb7de8e038c1/glue-msk-schema-registry/consumer-2/) +Finally, we will update our consumer such that it is compatible to the new version of the schema. +The complete module can be found in our [samples repository (along with the rest of the code of this tutorial).](https://github.com/localstack/localstack-pro-samples/blob/cd023a84a3b473984e9c34053d4feb7de8e038c1/glue-msk-schema-registry/consumer-2/) Similar to the previous producer scenarios, we create a new consumer by executing the following steps: - Copy the `consumer` directory and rename it to `consumer-2`. - Set a new artifact ID in the `pom.xml` of the module: + ```xml ... consumer-2 ... ``` + - Add the new module to the root `pom.xml`: + ```xml producer @@ -869,8 +928,10 @@ Similar to the previous producer scenarios, we create a new consumer by executin consumer-2 ``` + - Replace the `unicorn_ride_request_v1.avsc` with the new version used by `producer-2` (`unicorn_ride_request_v2.avsc`). - Re-generate the Java classes for the schema: + ```bash mvn clean generate-sources ``` @@ -883,6 +944,8 @@ mvn -pl consumer-2 exec:java -Dexec.args="--bootstrap-servers localhost:4511" ## Conclusion -Apache Kafka is used as the core messaging system in complex environments, with independent producers and consumers. The individual development of these components make it hard to coordinate and evolve data schemas over time. Using the AWS Glue Schema Registry can help you to prevent the usage of incompatible schemas. +Apache Kafka is used as the core messaging system in complex environments, with independent producers and consumers. +The individual development of these components make it hard to coordinate and evolve data schemas over time. +Using the AWS Glue Schema Registry can help you to prevent the usage of incompatible schemas. With LocalStack, emulating Amazon Managed Streaming for Kafka and AWS Glue Schema Registry, you can develop and test the next evolution of your data schema locally on your own machine. diff --git a/content/en/tutorials/simulating-outages-in-your-application-stack/index.md b/content/en/tutorials/simulating-outages-in-your-application-stack/index.md index e242c0d15c..50d1eea21f 100644 --- a/content/en/tutorials/simulating-outages-in-your-application-stack/index.md +++ b/content/en/tutorials/simulating-outages-in-your-application-stack/index.md @@ -26,17 +26,23 @@ leadimage: "outages.png" ## Introduction -[LocalStack Outages Extension](https://pypi.org/project/localstack-extension-outages/) can simulate outages for any AWS region or service. You can install and use the Outages Extension through [LocalStack Extension mechanism](https://docs.localstack.cloud/user-guide/extensions/) to test infrastructure resilience by intentionally causing service outages and observing the system's recovery in scenarios with incomplete infrastructure is an effective approach. This method evaluates the system's deployment mechanisms and its ability to handle and recover from infrastructure anomalies, a critical aspect of chaos engineering. +[LocalStack Outages Extension](https://pypi.org/project/localstack-extension-outages/) can simulate outages for any AWS region or service. +You can install and use the Outages Extension through [LocalStack Extension mechanism](https://docs.localstack.cloud/user-guide/extensions/) to test infrastructure resilience by intentionally causing service outages and observing the system's recovery in scenarios with incomplete infrastructure is an effective approach. +This method evaluates the system's deployment mechanisms and its ability to handle and recover from infrastructure anomalies, a critical aspect of chaos engineering. {{< callout "note">}} -Outages Extension is currently available as part of the **LocalStack Enterprise** plan. If you'd like to try it out, please [contact us](https://www.localstack.cloud/demo) to request access. +Outages Extension is currently available as part of the **LocalStack Enterprise** plan. +If you'd like to try it out, please [contact us](https://www.localstack.cloud/demo) to request access. {{< /callout >}} ## Getting started -This guide is designed for users who are new to Outages Extension. We'll simulate partial outages by interrupting specific services, such as halting an ECS instance creation or disrupting a database service. By closely watching Terraform's responses and the status of AWS resources, you'll learn how Terraform manages these disruptions. +This guide is designed for users who are new to Outages Extension. +We'll simulate partial outages by interrupting specific services, such as halting an ECS instance creation or disrupting a database service. +By closely watching Terraform's responses and the status of AWS resources, you'll learn how Terraform manages these disruptions. -For this particular example, we'll be using a Terraform configuration file from a [sample application repository](https://github.com/localstack-samples/samples-chaos-engineering/tree/main/extension-outages). Clone the repository, and follow the instructions below to get started. +For this particular example, we'll be using a Terraform configuration file from a [sample application repository](https://github.com/localstack-samples/samples-chaos-engineering/tree/main/extension-outages). +Clone the repository, and follow the instructions below to get started. ### Prerequisites @@ -47,7 +53,8 @@ The general prerequisites for this guide are: - [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) - [Terraform](https://www.terraform.io/downloads.html) and [`tflocal` wrapper](https://docs.localstack.cloud/user-guide/integrations/terraform/#tflocal-wrapper-script). -Start LocalStack by using the `docker-compose.yml` file from the repository. Ensure to set your Auth Token as an environment variable during this process. +Start LocalStack by using the `docker-compose.yml` file from the repository. +Ensure to set your Auth Token as an environment variable during this process. {{< command >}} $ LOCALSTACK_AUTH_TOKEN= @@ -56,19 +63,23 @@ $ docker compose up ### Installing the extension -To install the LocalStack Outages Extension, first set up your LocalStack Auth Token in your environment. Once the token is configured, use the command below to install the extension: +To install the LocalStack Outages Extension, first set up your LocalStack Auth Token in your environment. +Once the token is configured, use the command below to install the extension: {{< command >}} $ localstack extensions install localstack-extension-outages {{< /command >}} -Alternatively, you can enable automatic installation of the extension by setting the environment variable `EXTENSION_AUTO_INSTALL=localstack-extension-outages` when you start the LocalStack container. This can be done by including it in your `docker` command line interface (CLI) or in your `docker-compose` configuration as an environment variable. +Alternatively, you can enable automatic installation of the extension by setting the environment variable `EXTENSION_AUTO_INSTALL=localstack-extension-outages` when you start the LocalStack container. +This can be done by including it in your `docker` command line interface (CLI) or in your `docker-compose` configuration as an environment variable. Follow our [Managing Extensions documentation](https://docs.localstack.cloud/user-guide/extensions/managing-extensions/) for more information on how to install & manage extensions. ### Running Terraform -To get started, initialize & apply the Terraform configuration using the `tflocal` CLI to create the local resources. The Terraform configuration file operates independently of the application, meaning the application won't be available during this phase. To deploy the entire stack, including the application, refer to the [sample repository](https://github.com/localstack-samples/sample-terraform-ecs-apigateway). +To get started, initialize & apply the Terraform configuration using the `tflocal` CLI to create the local resources. +The Terraform configuration file operates independently of the application, meaning the application won't be available during this phase. +To deploy the entire stack, including the application, refer to the [sample repository](https://github.com/localstack-samples/sample-terraform-ecs-apigateway). {{< command >}} $ tflocal init @@ -94,11 +105,13 @@ private_dns_namespace = "60bfac90" vpc_id = "vpc-f9d6b124" ``` -Next, you can update certain resources. This includes increasing the number of tasks in the `task_definition` for the ECS service from 3 to 5 and upgrading the `openapi` specification version used by API Gateway from 3.0.1 to 3.1.0. +Next, you can update certain resources. +This includes increasing the number of tasks in the `task_definition` for the ECS service from 3 to 5 and upgrading the `openapi` specification version used by API Gateway from 3.0.1 to 3.1.0. ### Simulating outages -After running the Terraform `plan` command to preview these changes, you can simulate an outage affecting the ECS and API Gateway V2 services before applying the changes. To do this, execute the following command: +After running the Terraform `plan` command to preview these changes, you can simulate an outage affecting the ECS and API Gateway V2 services before applying the changes. +To do this, execute the following command: {{< command >}} $ curl --location --request POST 'http://outages.localhost.localstack.cloud:4566/outages' \ @@ -115,7 +128,8 @@ $ curl --location --request POST 'http://outages.localhost.localstack.cloud:4566 ]' {{< /command >}} -In the LocalStack logs, you'll notice that during the periods between successful calls, the controlled outages are marked by a `ServiceUnavailableException` accompanied by a 503 HTTP status code. These exceptions specifically affect the targeted AWS APIs. +In the LocalStack logs, you'll notice that during the periods between successful calls, the controlled outages are marked by a `ServiceUnavailableException` accompanied by a 503 HTTP status code. +These exceptions specifically affect the targeted AWS APIs. ```bash 2023-11-09T21:53:31.801 INFO --- [ asgi_gw_9] localstack.request.aws : AWS ec2.GetTransitGatewayRouteTableAssociations => 200 @@ -188,6 +202,8 @@ $ curl --location --request DELETE 'http://outages.localhost.localstack.cloud:45 ### Conclusion -By closely watching Terraform's responses and the status of cloud resources, you'll learn how Terraform manages these disruptions. It's important to note how it attempts to retry operations, whether it rolls back changes or faces partial failures, and how it logs these incidents. +By closely watching Terraform's responses and the status of cloud resources, you'll learn how Terraform manages these disruptions. +It's important to note how it attempts to retry operations, whether it rolls back changes or faces partial failures, and how it logs these incidents. -This is crucial for understanding the resilience of your infrastructure provisioning against challenging conditions. It also aids in enhancing your IaC configurations, ensuring they are more robust and effective in handling faults and errors in real-life situations. +This is crucial for understanding the resilience of your infrastructure provisioning against challenging conditions. +It also aids in enhancing your IaC configurations, ensuring they are more robust and effective in handling faults and errors in real-life situations. diff --git a/content/en/user-guide/aws/account/index.md b/content/en/user-guide/aws/account/index.md index 38270e75af..392d814868 100644 --- a/content/en/user-guide/aws/account/index.md +++ b/content/en/user-guide/aws/account/index.md @@ -6,23 +6,30 @@ description: Get started with AWS Account Management on LocalStack ## Introduction -The Account service provides APIs to manage your AWS account. You can use the Account APIs to retrieve information about your account, manage your contact information and alternate contacts. Additionally, you can use the Account APIs to enable or disable a region for your account, and delete alternate contacts in your account. +The Account service provides APIs to manage your AWS account. +You can use the Account APIs to retrieve information about your account, manage your contact information and alternate contacts. +Additionally, you can use the Account APIs to enable or disable a region for your account, and delete alternate contacts in your account. -LocalStack supports Account via the Pro offering, allowing you to use the Account API to retrieve information about your account. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_account/), which provides information on the extent of Account's integration with LocalStack. +LocalStack supports Account via the Pro offering, allowing you to use the Account API to retrieve information about your account. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_account/), which provides information on the extent of Account's integration with LocalStack. {{< callout >}} -LocalStack's Account provider is mock-only and does not support any real AWS account. The Account APIs are only intended to demonstrate how you can use and mock the AWS Account APIs in your local environment. It's important to note that LocalStack doesn't offer a programmatic interface to manage your AWS or your LocalStack account. +LocalStack's Account provider is mock-only and does not support any real AWS account. +The Account APIs are only intended to demonstrate how you can use and mock the AWS Account APIs in your local environment. +It's important to note that LocalStack doesn't offer a programmatic interface to manage your AWS or your LocalStack account. {{< /callout >}} ## Getting started -This guide is designed for users who are new to Account and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. +This guide is designed for users who are new to Account and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to put contact information, fetch account details, and attach an alternate contact to your account. +Start your LocalStack container using your preferred method. +We will demonstrate how to put contact information, fetch account details, and attach an alternate contact to your account. ### Put contact information -You can use the [`PutContactInformation`](https://docs.aws.amazon.com/accounts/latest/reference/API_PutContactInformation.html) API to add or update the contact information for your AWS account. Run the following command to add contact information to your account: +You can use the [`PutContactInformation`](https://docs.aws.amazon.com/accounts/latest/reference/API_PutContactInformation.html) API to add or update the contact information for your AWS account. +Run the following command to add contact information to your account: {{< command >}} $ awslocal account put-contact-information \ @@ -39,7 +46,8 @@ $ awslocal account put-contact-information \ ### Fetch account details -You can use the [`GetContactInformation`](https://docs.aws.amazon.com/accounts/latest/reference/API_GetContactInformation.html) API to retrieve the contact information for your AWS account. Run the following command to fetch the contact information for your account: +You can use the [`GetContactInformation`](https://docs.aws.amazon.com/accounts/latest/reference/API_GetContactInformation.html) API to retrieve the contact information for your AWS account. +Run the following command to fetch the contact information for your account: {{< command >}} $ awslocal account get-contact-information @@ -63,7 +71,8 @@ The command will return the contact information for your account: ### Attach alternate contact -You can attach an alternate contact using [`PutAlternateContact`](https://docs.aws.amazon.com/accounts/latest/reference/API_PutAlternateContact.html) API. Run the following command to attach an alternate contact to your account: +You can attach an alternate contact using [`PutAlternateContact`](https://docs.aws.amazon.com/accounts/latest/reference/API_PutAlternateContact.html) API. +Run the following command to attach an alternate contact to your account: {{< command >}} $ awslocal account put-alternate-contact \ @@ -76,7 +85,8 @@ $ awslocal account put-alternate-contact \ ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing contact information & alternate accounts for the Account service. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the Resources section, and then clicking on **Account** under the **Management & Governance** section. +The LocalStack Web Application provides a Resource Browser for managing contact information & alternate accounts for the Account service. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the Resources section, and then clicking on **Account** under the **Management & Governance** section. Account Resource Browser

diff --git a/content/en/user-guide/aws/acm/index.md b/content/en/user-guide/aws/acm/index.md index 6058a36d51..21975ea780 100644 --- a/content/en/user-guide/aws/acm/index.md +++ b/content/en/user-guide/aws/acm/index.md @@ -6,11 +6,14 @@ description: Get started with AWS Certificate Manager (ACM) on LocalStack ## Introduction -[AWS Certificate Manager (ACM)](https://aws.amazon.com/certificate-manager/) is a service that enables you to create and manage SSL/TLS certificates that can be used to secure your applications and resources in AWS. You can use ACM to provision and deploy public or private certificates trusted by browsers and other clients. +[AWS Certificate Manager (ACM)](https://aws.amazon.com/certificate-manager/) is a service that enables you to create and manage SSL/TLS certificates that can be used to secure your applications and resources in AWS. +You can use ACM to provision and deploy public or private certificates trusted by browsers and other clients. -ACM supports securing multiple domain names and subdomains and can create wildcard SSL certificates to protect an entire domain and its subdomains. You can also use ACM to import certificates from third-party certificate authorities or to generate private certificates for internal use. +ACM supports securing multiple domain names and subdomains and can create wildcard SSL certificates to protect an entire domain and its subdomains. +You can also use ACM to import certificates from third-party certificate authorities or to generate private certificates for internal use. -LocalStack allows you to use the ACM APIs to create, list, and delete certificates. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_acm/), which provides information on the extent of ACM's integration with LocalStack. +LocalStack allows you to use the ACM APIs to create, list, and delete certificates. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_acm/), which provides information on the extent of ACM's integration with LocalStack. ## Getting started @@ -18,7 +21,9 @@ This guide is designed for users who are new to ACM and assumes basic knowledge ### Request a public certificate -Start your LocalStack container using your preferred method, then use the [RequestCertificate API](https://docs.aws.amazon.com/acm/latest/APIReference/API_RequestCertificate.html) to request a new public ACM certificate. Specify the domain name you want to request the certificate for, and any additional options you need. Here's an example command: +Start your LocalStack container using your preferred method, then use the [RequestCertificate API](https://docs.aws.amazon.com/acm/latest/APIReference/API_RequestCertificate.html) to request a new public ACM certificate. +Specify the domain name you want to request the certificate for, and any additional options you need. +Here's an example command: {{< command >}} $ awslocal acm request-certificate \ @@ -38,7 +43,9 @@ This command will return the Amazon Resource Name (ARN) of the new certificate, ### List the certificates -Use the [`ListCertificates` API](https://docs.aws.amazon.com/acm/latest/APIReference/API_ListCertificates.html) to list all the certificates. This command returns a list of the ARNs of all the certificates that have been requested or imported into ACM. Here's an example command: +Use the [`ListCertificates` API](https://docs.aws.amazon.com/acm/latest/APIReference/API_ListCertificates.html) to list all the certificates. +This command returns a list of the ARNs of all the certificates that have been requested or imported into ACM. +Here's an example command: {{< command >}} $ awslocal acm list-certificates --max-items 10 @@ -46,7 +53,9 @@ $ awslocal acm list-certificates --max-items 10 ### Describe the certificate -Use the [`DescribeCertificate` API](https://docs.aws.amazon.com/acm/latest/APIReference/API_DescribeCertificate.html) to view the details of a specific certificate. Provide the ARN of the certificate you want to view, and this command will return information about the certificate's status, domain name, and other attributes. Here's an example command: +Use the [`DescribeCertificate` API](https://docs.aws.amazon.com/acm/latest/APIReference/API_DescribeCertificate.html) to view the details of a specific certificate. +Provide the ARN of the certificate you want to view, and this command will return information about the certificate's status, domain name, and other attributes. +Here's an example command: {{< command >}} $ awslocal acm describe-certificate --certificate-arn arn:aws:acm::account:certificate/ @@ -54,7 +63,8 @@ $ awslocal acm describe-certificate --certificate-arn arn:aws:acm::accou ### Delete the certificate -Finally you can use the [`DeleteCertificate` API](https://docs.aws.amazon.com/acm/latest/APIReference/API_DeleteCertificate.html) to delete a certificate from ACM, by passing the ARN of the certificate you want to delete. Here's an example command: +Finally you can use the [`DeleteCertificate` API](https://docs.aws.amazon.com/acm/latest/APIReference/API_DeleteCertificate.html) to delete a certificate from ACM, by passing the ARN of the certificate you want to delete. +Here's an example command: {{< command >}} $ awslocal acm delete-certificate --certificate-arn arn:aws:acm::account:certificate/ @@ -62,7 +72,7 @@ $ awslocal acm delete-certificate --certificate-arn arn:aws:acm::account ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing ACM Certificates. +The LocalStack Web Application provides a Resource Browser for managing ACM Certificates. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Certificate Manager** under the **Security Identity Compliance** section. ACM Resource Browser diff --git a/content/en/user-guide/aws/amplify/index.md b/content/en/user-guide/aws/amplify/index.md index d6dc0aed3e..78d02f1f25 100644 --- a/content/en/user-guide/aws/amplify/index.md +++ b/content/en/user-guide/aws/amplify/index.md @@ -9,17 +9,21 @@ persistence: supported ## Introduction -Amplify is a JavaScript-based development framework with libraries, UI components, and a standard CLI interface for building and deploying web and mobile applications. With Amplify, developers can build and host static websites, single-page applications, and full-stack serverless web applications using an abstraction layer over popular AWS services like DynamoDB, Cognito, AppSync, Lambda, S3, and more. +Amplify is a JavaScript-based development framework with libraries, UI components, and a standard CLI interface for building and deploying web and mobile applications. +With Amplify, developers can build and host static websites, single-page applications, and full-stack serverless web applications using an abstraction layer over popular AWS services like DynamoDB, Cognito, AppSync, Lambda, S3, and more. -LocalStack allows you to use the Amplify APIs to build and test their Amplify applications locally. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_amplify/), which provides information on the extent of Amplify's integration with LocalStack. +LocalStack allows you to use the Amplify APIs to build and test their Amplify applications locally. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_amplify/), which provides information on the extent of Amplify's integration with LocalStack. {{< callout "note" >}} -The `amplifylocal` CLI and the Amplify JS library have been deprecated and are no longer supported. We recommend using the Amplify CLI with the Amplify LocalStack Plugin instead. +The `amplifylocal` CLI and the Amplify JS library have been deprecated and are no longer supported. +We recommend using the Amplify CLI with the Amplify LocalStack Plugin instead. {{< /callout >}} ## Amplify LocalStack Plugin -[Amplify LocalStack Plugin](https://github.com/localstack/amplify-localstack) allows the `amplify` CLI tool to create resources on your local machine instead of AWS. It achieves this by redirecting any requests to AWS to a LocalStack container running locally on your machine. +[Amplify LocalStack Plugin](https://github.com/localstack/amplify-localstack) allows the `amplify` CLI tool to create resources on your local machine instead of AWS. +It achieves this by redirecting any requests to AWS to a LocalStack container running locally on your machine. ### Installation @@ -34,15 +38,21 @@ $ amplify plugin add amplify-localstack You can configure the following environment variables to customize LocalStack's behaviour: -- `EDGE_PORT`: The port number under which the LocalStack edge service is accessible. The default value is `4566`. -- `LOCALSTACK_HOSTNAME`: It specifies the target host under which the LocalStack edge service is accessible. The default value is `localhost.localstack.cloud`. -- `LOCALSTACK_ENDPOINT`: It allows you to set a custom endpoint directly. If set, it overrides the values set for `EDGE_PORT` and `LOCALSTACK_HOSTNAME`. The default value is `https://localhost.localstack.cloud:4566`. +- `EDGE_PORT`: The port number under which the LocalStack edge service is accessible. + The default value is `4566`. +- `LOCALSTACK_HOSTNAME`: It specifies the target host under which the LocalStack edge service is accessible. + The default value is `localhost.localstack.cloud`. +- `LOCALSTACK_ENDPOINT`: It allows you to set a custom endpoint directly. + If set, it overrides the values set for `EDGE_PORT` and `LOCALSTACK_HOSTNAME`. + The default value is `https://localhost.localstack.cloud:4566`. ### Usage -After installing the plugin, you can deploy your resources to LocalStack using the `amplify init` or `amplify push` commands. The console will prompt you to select whether to deploy to LocalStack or AWS. +After installing the plugin, you can deploy your resources to LocalStack using the `amplify init` or `amplify push` commands. +The console will prompt you to select whether to deploy to LocalStack or AWS. -You can also add the parameter `--use-localstack true` to your commands to avoid being prompted and automatically use LocalStack. Here is an example: +You can also add the parameter `--use-localstack true` to your commands to avoid being prompted and automatically use LocalStack. +Here is an example: {{< command >}} $ amplify init --use-localstack true @@ -52,7 +62,8 @@ $ amplify push --use-localstack true ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing Amplify applications. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Amplify** under the **Front-end Web & Mobile** section. +The LocalStack Web Application provides a Resource Browser for managing Amplify applications. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Amplify** under the **Front-end Web & Mobile** section. Amplify Resource Browser

diff --git a/content/en/user-guide/aws/apigateway/index.md b/content/en/user-guide/aws/apigateway/index.md index 265b2a59d5..0b3609d051 100644 --- a/content/en/user-guide/aws/apigateway/index.md +++ b/content/en/user-guide/aws/apigateway/index.md @@ -9,9 +9,12 @@ persistence: supported ## Introduction -API Gateway is a managed service that enables developers to create, deploy, and manage APIs (Application Programming Interfaces). It allows easy creation of REST, HTTP, and WebSocket APIs to securely access data, business logic, or functionality from backend services like AWS Lambda functions or EC2 instances. API Gateway supports standard HTTP methods such as `GET`, `POST`, `PUT`, `PATCH`, and `DELETE` and integrates with various AWS services, including Lambda, Cognito, CloudWatch, and X-Ray. +API Gateway is a managed service that enables developers to create, deploy, and manage APIs (Application Programming Interfaces). +It allows easy creation of REST, HTTP, and WebSocket APIs to securely access data, business logic, or functionality from backend services like AWS Lambda functions or EC2 instances. +API Gateway supports standard HTTP methods such as `GET`, `POST`, `PUT`, `PATCH`, and `DELETE` and integrates with various AWS services, including Lambda, Cognito, CloudWatch, and X-Ray. -LocalStack supports API Gateway V1 in the Community image and API Gateway V2 in the Pro image. LocalStack allows you to use the API Gateway APIs to create, deploy, and manage APIs on your local machine to invoke those exposed API endpoints. +LocalStack supports API Gateway V1 in the Community image and API Gateway V2 in the Pro image. +LocalStack allows you to use the API Gateway APIs to create, deploy, and manage APIs on your local machine to invoke those exposed API endpoints. The supported APIs are available on the API coverage page for [API Gateway V1](https://docs.localstack.cloud/references/coverage/coverage_apigateway/) & [API Gateway V2](https://docs.localstack.cloud/references/coverage/coverage_apigatewayv2/), which provides information on the extent of API Gateway's integration with LocalStack. @@ -19,7 +22,9 @@ The supported APIs are available on the API coverage page for [API Gateway V1](h This guide is designed for users new to API Gateway and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will use the Lambda proxy integration to integrate an API method with a Lambda function. The Lambda function will be invoked with a `GET` request and return a response with a status code of `200` and a body containing the string `Hello from Lambda!`. +Start your LocalStack container using your preferred method. +We will use the Lambda proxy integration to integrate an API method with a Lambda function. +The Lambda function will be invoked with a `GET` request and return a response with a status code of `200` and a body containing the string `Hello from Lambda!`. ### Create a Lambda function @@ -42,7 +47,9 @@ module.exports = { } ``` -The above code defines a function named `apiHandler` that returns a response with a status code of `200` and a body containing the string `Hello from Lambda`. Zip the file and upload it to LocalStack using the `awslocal` CLI. Run the following command: +The above code defines a function named `apiHandler` that returns a response with a status code of `200` and a body containing the string `Hello from Lambda`. +Zip the file and upload it to LocalStack using the `awslocal` CLI. +Run the following command: {{< command >}} $ zip function.zip lambda.js @@ -59,13 +66,15 @@ This creates a new Lambda function named `apigw-lambda` with the code you specif ### Create a REST API -We will use the API Gateway's [`CreateRestApi`](https://docs.aws.amazon.com/apigateway/latest/api/API_CreateRestApi.html) API to create a new REST API. Here's an example command: +We will use the API Gateway's [`CreateRestApi`](https://docs.aws.amazon.com/apigateway/latest/api/API_CreateRestApi.html) API to create a new REST API. +Here's an example command: {{< command >}} $ awslocal apigateway create-rest-api --name 'API Gateway Lambda integration' {{< /command >}} -This creates a new REST API named `API Gateway Lambda integration`. The above command returns the following response: +This creates a new REST API named `API Gateway Lambda integration`. +The above command returns the following response: ```json { @@ -82,7 +91,8 @@ This creates a new REST API named `API Gateway Lambda integration`. The above co } ``` -Note the REST API ID returned in the response. You'll need this ID for the next step. +Note the REST API ID returned in the response. +You'll need this ID for the next step. ### Fetch the Resources @@ -105,11 +115,13 @@ The above command returns the following response: } ``` -Note the ID of the root resource returned in the response. You'll need this ID for the next step. +Note the ID of the root resource returned in the response. +You'll need this ID for the next step. ### Create a resource -Create a new resource for the API using the [`CreateResource`](https://docs.aws.amazon.com/apigateway/latest/api/API_CreateResource.html) API. Use the ID of the resource returned in the previous step as the parent ID: +Create a new resource for the API using the [`CreateResource`](https://docs.aws.amazon.com/apigateway/latest/api/API_CreateResource.html) API. +Use the ID of the resource returned in the previous step as the parent ID: {{< command >}} $ awslocal apigateway create-resource \ @@ -129,11 +141,13 @@ The above command returns the following response: } ``` -Note the ID of the root resource returned in the response. You'll need this Resource ID for the next step. +Note the ID of the root resource returned in the response. +You'll need this Resource ID for the next step. ### Add a method and integration -Add a `GET` method to the resource using the [`PutMethod`](https://docs.aws.amazon.com/apigateway/latest/api/API_PutMethod.html) API. Use the ID of the resource returned in the previous step as the Resource ID: +Add a `GET` method to the resource using the [`PutMethod`](https://docs.aws.amazon.com/apigateway/latest/api/API_PutMethod.html) API. +Use the ID of the resource returned in the previous step as the Resource ID: {{< command >}} awslocal apigateway put-method \ @@ -170,7 +184,8 @@ $ awslocal apigateway put-integration \ --passthrough-behavior WHEN_NO_MATCH {{< /command >}} -The above command integrates the `GET` method with the Lambda function created in the first step. We can now proceed with the deployment before invoking the API. +The above command integrates the `GET` method with the Lambda function created in the first step. +We can now proceed with the deployment before invoking the API. ### Create a deployment @@ -182,7 +197,8 @@ $ awslocal apigateway create-deployment \ --stage-name test {{< /command >}} -Your API is now ready to be invoked. You can use `cURL` or any HTTP REST client to invoke the API endpoint: +Your API is now ready to be invoked. +You can use `cURL` or any HTTP REST client to invoke the API endpoint: {{< command >}} $ curl -X GET http://localhost:4566/restapis//test/_user_request_/test @@ -220,7 +236,8 @@ functions: path: /my/path2 ``` -After you deploy the Lambda functions and API Gateway endpoints, you can access them using the LocalStack edge port (`4566` by default). There are two alternative URL formats to access these endpoints. +After you deploy the Lambda functions and API Gateway endpoints, you can access them using the LocalStack edge port (`4566` by default). +There are two alternative URL formats to access these endpoints. #### Recommended URL format @@ -236,7 +253,9 @@ Here's an example of how you would access the HTTP/REST API with an ID of `0v1p6 http://0v1p6q6.execute-api.localhost.localstack.cloud:4566/local/my/path2 ``` -Note that the local stage ID is added in this example. Adding the stage ID is required for API Gateway V1 APIs, but optional for API Gateway V2 APIs (in case they include the wildcard `$default` stage). For v2 APIs, the following URL should also work: +Note that the local stage ID is added in this example. +Adding the stage ID is required for API Gateway V1 APIs, but optional for API Gateway V2 APIs (in case they include the wildcard `$default` stage). +For v2 APIs, the following URL should also work: ```shell http://0v1p6q6.execute-api.localhost.localstack.cloud:4566/my/path1 @@ -260,7 +279,8 @@ This format is sometimes used in case of local DNS issues. ### WebSocket APIs -WebSocket APIs provide real-time communication channels between a client and a server. To use WebSockets in LocalStack, you can define a WebSocket route in your Serverless configuration: +WebSocket APIs provide real-time communication channels between a client and a server. +To use WebSockets in LocalStack, you can define a WebSocket route in your Serverless configuration: ```yaml ... @@ -274,7 +294,8 @@ functions: route: test-action ``` -Upon deployment of the Serverless project, LocalStack creates a new API Gateway V2 endpoint. To retrieve the list of APIs and verify the WebSocket endpoint, you can use the `awslocal` CLI: +Upon deployment of the Serverless project, LocalStack creates a new API Gateway V2 endpoint. +To retrieve the list of APIs and verify the WebSocket endpoint, you can use the `awslocal` CLI: {{< command >}} $ awslocal apigatewayv2 get-apis @@ -287,7 +308,8 @@ $ awslocal apigatewayv2 get-apis } {{< / command >}} -In the above example, the WebSocket endpoint is `ws://localhost:4510`. Assuming your Serverless project contains a simple Lambda `handler.js` like this: +In the above example, the WebSocket endpoint is `ws://localhost:4510`. +Assuming your Serverless project contains a simple Lambda `handler.js` like this: ```javascript module.exports.handler = function(event, context, callback) { @@ -297,7 +319,8 @@ module.exports.handler = function(event, context, callback) { You can send a message to the WebSocket at `ws://localhost:4510` and the same message will be returned as a response on the same WebSocket. -To push data from a backend service to the WebSocket connection, you can use the [Amazon API Gateway Management API](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewaymanagementapi/index.html). In LocalStack, use the following CLI command (replace `` with your WebSocket connection ID): +To push data from a backend service to the WebSocket connection, you can use the [Amazon API Gateway Management API](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewaymanagementapi/index.html). +In LocalStack, use the following CLI command (replace `` with your WebSocket connection ID): {{< command >}} $ awslocal apigatewaymanagementapi \ @@ -308,9 +331,11 @@ $ awslocal apigatewaymanagementapi \ ## Custom IDs for API Gateway resources via tags -You can assign custom IDs to API Gateway REST and HTTP APIs using the `_custom_id_` tag during resource creation. This can be useful to ensure a static endpoint URL for your API, simplifying testing and integration with other services. +You can assign custom IDs to API Gateway REST and HTTP APIs using the `_custom_id_` tag during resource creation. +This can be useful to ensure a static endpoint URL for your API, simplifying testing and integration with other services. -To assign a custom ID to an API Gateway REST API, use the `create-rest-api` command with the `tags={"_custom_id_":"myid123"}` parameter. The following example assigns the custom ID `"myid123"` to the API: +To assign a custom ID to an API Gateway REST API, use the `create-rest-api` command with the `tags={"_custom_id_":"myid123"}` parameter. +The following example assigns the custom ID `"myid123"` to the API: {{< command >}} $ awslocal apigateway create-rest-api --name my-api --tags '{"_custom_id_":"myid123"}' @@ -320,12 +345,12 @@ $ awslocal apigateway create-rest-api --name my-api --tags '{"_custom_id_":"myid } {{< / command >}} -You can also configure the protocol type, the possible values being `HTTP` and `WEBSOCKET`: +You can also configure the protocol type, the possible values being `HTTP` and `WEBSOCKET`: {{< command >}} $ awslocal apigatewayv2 create-api \ --name=my-api \ - --protocol-type=HTTP --tags="_custom_id_=my-api" + --protocol-type=HTTP --tags="_custom_id_=my-api" { "ApiEndpoint": "my-api.execute-api.localhost.localstack.cloud:4566", "ApiId": "my-api", @@ -338,12 +363,15 @@ $ awslocal apigatewayv2 create-api \ {{< / command >}} {{< callout >}} -Setting the API Gateway ID via `_custom_id_` works only on the creation of the resource, but not on update in LocalStack. Ensure that you set the `_custom_id_` tag on creation of the resource. +Setting the API Gateway ID via `_custom_id_` works only on the creation of the resource, but not on update in LocalStack. +Ensure that you set the `_custom_id_` tag on creation of the resource. {{< /callout >}} ## Custom Domain Names with API Gateway -You can use custom domain names with API Gateway V1 and V2 APIs. To route requests to a custom domain name for an API Gateway V2 API, include the `Host` header with the custom domain name in your request. For example, assuming that you have set up a custom domain name `test.example.com` to point to your LocalStack instance, you can send a request like this: +You can use custom domain names with API Gateway V1 and V2 APIs. +To route requests to a custom domain name for an API Gateway V2 API, include the `Host` header with the custom domain name in your request. +For example, assuming that you have set up a custom domain name `test.example.com` to point to your LocalStack instance, you can send a request like this: {{< command >}} $ curl -H 'Host: test.example.com' http://localhost:4566/test @@ -351,9 +379,11 @@ $ curl -H 'Host: test.example.com' http://localhost:4566/test ## API Gateway Resource Browser -The LocalStack Web Application provides a Resource Browser for managing API Gateway resources. You can access the Resource Browser by opening the LocalStack Web Application in your browser and navigating to the **Resources** section, then clicking on **API Gateway** under the **App Integration** section. +The LocalStack Web Application provides a Resource Browser for managing API Gateway resources. +You can access the Resource Browser by opening the LocalStack Web Application in your browser and navigating to the **Resources** section, then clicking on **API Gateway** under the **App Integration** section. -The Resource Browser displays [API Gateway V1](https://app.localstack.cloud/resources/gateway/v1) and [API Gateway V2](https://app.localstack.cloud/resources/gateway/v2) resources. You can click on individual resources to view their details. +The Resource Browser displays [API Gateway V1](https://app.localstack.cloud/resources/gateway/v1) and [API Gateway V2](https://app.localstack.cloud/resources/gateway/v2) resources. +You can click on individual resources to view their details. API Gateway Resource Browser diff --git a/content/en/user-guide/aws/appconfig/index.md b/content/en/user-guide/aws/appconfig/index.md index c9084d1c8d..2a4e49036c 100644 --- a/content/en/user-guide/aws/appconfig/index.md +++ b/content/en/user-guide/aws/appconfig/index.md @@ -5,19 +5,25 @@ description: Get started with AppConfig on LocalStack tags: ["Pro image"] --- -AppConfig is a service provided by Amazon Web Services (AWS) that simplifies the process of managing and deploying application configurations. AppConfig offers centralized management of configuration data and the ability to create, manage, and deploy configuration changes separately. It allows you to avoid deploying the service repeatedly for smaller changes, enables controlled deployments to applications and includes built-in validation checks & monitoring. +AppConfig is a service provided by Amazon Web Services (AWS) that simplifies the process of managing and deploying application configurations. +AppConfig offers centralized management of configuration data and the ability to create, manage, and deploy configuration changes separately. +It allows you to avoid deploying the service repeatedly for smaller changes, enables controlled deployments to applications and includes built-in validation checks & monitoring. -LocalStack allows you to use the AppConfig APIs in your local environment to define configurations for different environments and deploy them to your applications as needed. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_appconfig/), which provides information on the extent of AppConfig's integration with LocalStack. +LocalStack allows you to use the AppConfig APIs in your local environment to define configurations for different environments and deploy them to your applications as needed. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_appconfig/), which provides information on the extent of AppConfig's integration with LocalStack. ## Getting started This guide is designed for users new to AppConfig and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create an AppConfig application, environment, configuration profiles & feature flags, and deploy the configuration with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create an AppConfig application, environment, configuration profiles & feature flags, and deploy the configuration with the AWS CLI. ### Create an AppConfig application and environment -You can create an AppConfig application using the [`CreateApplication`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateApplication.html) API. The application is a folder/directory that contains the configuration data for your specific application. The following command creates an application named `my-app`: +You can create an AppConfig application using the [`CreateApplication`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateApplication.html) API. +The application is a folder/directory that contains the configuration data for your specific application. +The following command creates an application named `my-app`: {{< command >}} $ awslocal appconfig create-application \ @@ -35,7 +41,9 @@ The following output would be retrieved: } ``` -You can now create an AppConfig environment for your application using the [`CreateEnvironment`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateEnvironment.html) API. An environment consists of the deployment group of your AppConfig applications. The following command creates an environment named `my-app-env`: +You can now create an AppConfig environment for your application using the [`CreateEnvironment`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateEnvironment.html) API. +An environment consists of the deployment group of your AppConfig applications. +The following command creates an environment named `my-app-env`: {{< command >}} $ awslocal appconfig create-environment \ @@ -44,7 +52,8 @@ $ awslocal appconfig create-environment \ --description "My application environment" {{< /command >}} -Replace the `application-id` with the ID of the application you created in the previous step. The following output would be retrieved: +Replace the `application-id` with the ID of the application you created in the previous step. +The following output would be retrieved: ```bash { @@ -58,7 +67,9 @@ Replace the `application-id` with the ID of the application you created in the p ### Create configuration profiles and feature flags -You can create an AppConfig configuration profile using the [`CreateConfigurationProfile`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateConfigurationProfile.html) API. A configuration profile contains for the configurations of your AppConfig applications. The following command creates a configuration profile named `my-app-config`: +You can create an AppConfig configuration profile using the [`CreateConfigurationProfile`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateConfigurationProfile.html) API. +A configuration profile contains for the configurations of your AppConfig applications. +The following command creates a configuration profile named `my-app-config`: {{< command >}} $ awslocal appconfig create-configuration-profile \ @@ -80,7 +91,8 @@ The following output would be retrieved: } ``` -You can now create a JSON file to add your feature flag configuration data. Create a file named `feature-flag-config.json` with the following content: +You can now create a JSON file to add your feature flag configuration data. +Create a file named `feature-flag-config.json` with the following content: ```json { @@ -93,7 +105,8 @@ You can now create a JSON file to add your feature flag configuration data. Crea } ``` -You can now use the [`CreateHostedConfigurationVersion`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateHostedConfigurationVersion.html) API to save your feature flag configuration data to AppConfig. The following command creates a hosted configuration version for the configuration profile you created in the previous step: +You can now use the [`CreateHostedConfigurationVersion`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateHostedConfigurationVersion.html) API to save your feature flag configuration data to AppConfig. +The following command creates a hosted configuration version for the configuration profile you created in the previous step: {{< command >}} $ awslocal appconfig create-hosted-configuration-version \ @@ -117,7 +130,9 @@ The following output would be retrieved: ### Create an AppConfig deployment -You can now create an AppConfig deployment strategy using the [`CreateDeploymentStrategy`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateDeploymentStrategy.html) API. A deployment strategy defines important criteria for rolling out your configuration to the target environment. The following command creates a deployment strategy named `my-app-deployment-strategy`: +You can now create an AppConfig deployment strategy using the [`CreateDeploymentStrategy`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_CreateDeploymentStrategy.html) API. +A deployment strategy defines important criteria for rolling out your configuration to the target environment. +The following command creates a deployment strategy named `my-app-deployment-strategy`: {{< command >}} $ awslocal appconfig create-deployment-strategy \ @@ -139,7 +154,8 @@ The following output would be retrieved: } ``` -You can now use the [`StartDeployment`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_StartDeployment.html) API to deploy the configuration. The following command deploys the configuration to the environment you created in the previous step: +You can now use the [`StartDeployment`](https://docs.aws.amazon.com/appconfig/latest/APIReference/API_StartDeployment.html) API to deploy the configuration. +The following command deploys the configuration to the environment you created in the previous step: {{< command >}} $ awslocal appconfig start-deployment \ @@ -183,7 +199,8 @@ The following output would be retrieved: ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing AppConfig applications. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **AppConfig** under the **Developer Tools** section. +The LocalStack Web Application provides a Resource Browser for managing AppConfig applications. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **AppConfig** under the **Developer Tools** section. AppConfig Resource Browser

diff --git a/content/en/user-guide/aws/applicationautoscaling/index.md b/content/en/user-guide/aws/applicationautoscaling/index.md index 056f0da7ae..711b15f4b3 100644 --- a/content/en/user-guide/aws/applicationautoscaling/index.md +++ b/content/en/user-guide/aws/applicationautoscaling/index.md @@ -9,15 +9,20 @@ persistence: supported ## Introduction -Application Auto Scaling is a centralized solution for managing automatic scaling by defining scaling policies based on specific metrics. Based on CPU utilization or request rates, it automatically adjusts capacity in response to changes in workload. With Application Auto Scaling, you can configure automatic scaling for services such as DynamoDB, ECS, Lambda, ElastiCache, and more. Auto scaling uses CloudWatch under the hood to configure scalable targets which a service namespace, resource ID, and scalable dimension can uniquely identify. +Application Auto Scaling is a centralized solution for managing automatic scaling by defining scaling policies based on specific metrics. +Based on CPU utilization or request rates, it automatically adjusts capacity in response to changes in workload. +With Application Auto Scaling, you can configure automatic scaling for services such as DynamoDB, ECS, Lambda, ElastiCache, and more. +Auto scaling uses CloudWatch under the hood to configure scalable targets which a service namespace, resource ID, and scalable dimension can uniquely identify. -LocalStack allows you to use the Application Auto Scaling APIs in your local environment to scale different resources based on scaling policies and scheduled scaling. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_application-autoscaling/), which provides information on the extent of Application Auto Scaling's integration with LocalStack. +LocalStack allows you to use the Application Auto Scaling APIs in your local environment to scale different resources based on scaling policies and scheduled scaling. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_application-autoscaling/), which provides information on the extent of Application Auto Scaling's integration with LocalStack. ## Getting Started This guide is designed for users new to Application Auto Scaling and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can configure auto scaling to handle a heavy workload for your Lambda function. +Start your LocalStack container using your preferred method. +We will demonstrate how you can configure auto scaling to handle a heavy workload for your Lambda function. ### Create a Lambda Function @@ -48,7 +53,9 @@ $ awslocal lambda create-function \ ### Create a version and alias for your Lambda function -Next, you can create a version for your Lambda function and publish an alias. We will use the [`PublishVersion`](https://docs.aws.amazon.com/cli/latest/reference/lambda/publish-version.html) and [`CreateAlias`](https://docs.aws.amazon.com/cli/latest/reference/lambda/create-alias.html) APIs for this. Run the following commands: +Next, you can create a version for your Lambda function and publish an alias. +We will use the [`PublishVersion`](https://docs.aws.amazon.com/cli/latest/reference/lambda/publish-version.html) and [`CreateAlias`](https://docs.aws.amazon.com/cli/latest/reference/lambda/create-alias.html) APIs for this. +Run the following commands: {{< command >}} $ awslocal lambda publish-version --function-name autoscaling-example @@ -61,7 +68,8 @@ $ awslocal lambda create-alias \ ### Register the Lambda function as a scalable target -To register the Lambda function as a scalable target, you can use the [`RegisterScalableTarget`](https://docs.aws.amazon.com/cli/latest/reference/application-autoscaling/register-scalable-target.html) API. We will specify the `--service-namespace` as `lambda`, `--scalable-dimension` as `lambda:function:ProvisionedConcurrency`, and `--resource-id` as `function:autoscaling-example:BLUE`. +To register the Lambda function as a scalable target, you can use the [`RegisterScalableTarget`](https://docs.aws.amazon.com/cli/latest/reference/application-autoscaling/register-scalable-target.html) API. +We will specify the `--service-namespace` as `lambda`, `--scalable-dimension` as `lambda:function:ProvisionedConcurrency`, and `--resource-id` as `function:autoscaling-example:BLUE`. Run the following command to register the scalable target: @@ -75,7 +83,8 @@ $ awslocal application-autoscaling register-scalable-target \ ### Setting up a scheduled action -You can create a scheduled action that scales out by specifying the `--schedule` parameter with a recurring schedule specified as a cron job. Run the following command to create a scheduled action using the [`PutScheduledAction`](https://docs.aws.amazon.com/cli/latest/reference/application-autoscaling/put-scheduled-action.html) API: +You can create a scheduled action that scales out by specifying the `--schedule` parameter with a recurring schedule specified as a cron job. +Run the following command to create a scheduled action using the [`PutScheduledAction`](https://docs.aws.amazon.com/cli/latest/reference/application-autoscaling/put-scheduled-action.html) API: {{< command >}} awslocal application-autoscaling put-scheduled-action \ @@ -83,7 +92,7 @@ awslocal application-autoscaling put-scheduled-action \ --scalable-dimension lambda:function:ProvisionedConcurrency \ --resource-id function:autoscaling-example:BLUE \ --scheduled-action-name lambda-action \ - --schedule "cron(*/2 * * * *)" \ + --schedule "cron(*/2* ** *)" \ --scalable-target-action MinCapacity=1,MaxCapacity=5 {{< /command >}} @@ -96,7 +105,9 @@ $ awslocal application-autoscaling describe-scheduled-actions \ ### Setting up a target tracking scaling policy -You can now set up a target tracking scaling policy to scale based on current resource utilization. You can use the [`PutScalingPolicy`](https://docs.aws.amazon.com/cli/latest/reference/application-autoscaling/put-scaling-policy.html) API to create a target tracking scaling policy after ensuring that your predefined metric expects the target value. When metrics lack data due to minimal application load, Application Auto Scaling does not adjust capacity. +You can now set up a target tracking scaling policy to scale based on current resource utilization. +You can use the [`PutScalingPolicy`](https://docs.aws.amazon.com/cli/latest/reference/application-autoscaling/put-scaling-policy.html) API to create a target tracking scaling policy after ensuring that your predefined metric expects the target value. +When metrics lack data due to minimal application load, Application Auto Scaling does not adjust capacity. Run the following command to create a target-tracking scaling policy: @@ -111,7 +122,8 @@ $ awslocal application-autoscaling put-scaling-policy \ ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing AppConfig applications. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Application Auto Scaling** under the **App Integration** section. +The LocalStack Web Application provides a Resource Browser for managing AppConfig applications. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Application Auto Scaling** under the **App Integration** section. Application Auto Scaling Resource Browser

@@ -124,15 +136,15 @@ The Resource Browser allows you to perform the following actions: The following service namespaces are currently supported: -- Elastic Container Service (ECS) -- Elastic MapReduce (EMR) -- Elastic Compute Cloud (EC2) -- AppStream -- Lambda -- DynamoDB -- RDS -- Sagemaker -- Kafka -- Cassandra -- Comprenhend -- Custom Resource +* Elastic Container Service (ECS) +* Elastic MapReduce (EMR) +* Elastic Compute Cloud (EC2) +* AppStream +* Lambda +* DynamoDB +* RDS +* Sagemaker +* Kafka +* Cassandra +* Comprenhend +* Custom Resource diff --git a/content/en/user-guide/aws/appsync/index.md b/content/en/user-guide/aws/appsync/index.md index cdfd9607d5..2c8d941017 100644 --- a/content/en/user-guide/aws/appsync/index.md +++ b/content/en/user-guide/aws/appsync/index.md @@ -7,19 +7,23 @@ tags: ["Pro image"] ## Introduction -AppSync is a managed service provided by Amazon Web Services (AWS) that enables you to create serverless GraphQL APIs to query databases, microservices, and other APIs. AppSync allows you to define your data models and business logic using a declarative approach, and connect to various data sources, including other AWS services, relational databases, and custom data sources. +AppSync is a managed service provided by Amazon Web Services (AWS) that enables you to create serverless GraphQL APIs to query databases, microservices, and other APIs. +AppSync allows you to define your data models and business logic using a declarative approach, and connect to various data sources, including other AWS services, relational databases, and custom data sources. -LocalStack allows you to use the AppSync APIs in your local environment to connect your applications and services to data and events. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_appsync/), which provides information on the extent of AppSync's integration with LocalStack. +LocalStack allows you to use the AppSync APIs in your local environment to connect your applications and services to data and events. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_appsync/), which provides information on the extent of AppSync's integration with LocalStack. ## Getting started This guide is designed for users new to AppSync and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create an AppSync API with a DynamoDB data source using the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create an AppSync API with a DynamoDB data source using the AWS CLI. ### Create a DynamoDB table -You can create a DynamoDB table using the [`CreateTable`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html) API. Execute the following command to create a table named `DynamoDBNotesTable` with a primary key named `NoteId`: +You can create a DynamoDB table using the [`CreateTable`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html) API. +Execute the following command to create a table named `DynamoDBNotesTable` with a primary key named `NoteId`: {{< command >}} $ awslocal dynamodb create-table \ @@ -29,7 +33,8 @@ $ awslocal dynamodb create-table \ --billing-mode PAY_PER_REQUEST {{< /command >}} -After the table is created, you can use the [`ListTables`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ListTables.html) API. Run the following command to list all tables in your running LocalStack container: +After the table is created, you can use the [`ListTables`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ListTables.html) API. +Run the following command to list all tables in your running LocalStack container: {{< command >}} $ awslocal dynamodb list-tables @@ -47,7 +52,8 @@ The following output would be retrieved: ### Create a GraphQL API -You can create a GraphQL API using the [`CreateGraphqlApi`](https://docs.aws.amazon.com/appsync/latest/APIReference/API_CreateGraphqlApi.html) API. Execute the following command to create a GraphQL API named `NotesApi`: +You can create a GraphQL API using the [`CreateGraphqlApi`](https://docs.aws.amazon.com/appsync/latest/APIReference/API_CreateGraphqlApi.html) API. +Execute the following command to create a GraphQL API named `NotesApi`: {{< command >}} $ awslocal appsync create-graphql-api \ @@ -74,7 +80,8 @@ The following output would be retrieved: } ``` -You can now create an API key for your GraphQL API using the [`CreateApiKey`](https://docs.aws.amazon.com/appsync/latest/APIReference/API_CreateApiKey.html) API. Execute the following command to create an API key for your GraphQL API: +You can now create an API key for your GraphQL API using the [`CreateApiKey`](https://docs.aws.amazon.com/appsync/latest/APIReference/API_CreateApiKey.html) API. +Execute the following command to create an API key for your GraphQL API: {{< command >}} $ awslocal appsync create-api-key \ @@ -120,7 +127,8 @@ type Schema { } ``` -You can start the schema creation process using the [`StartSchemaCreation`](https://docs.aws.amazon.com/appsync/latest/APIReference/API_StartSchemaCreation.html) API. Execute the following command to start the schema creation process: +You can start the schema creation process using the [`StartSchemaCreation`](https://docs.aws.amazon.com/appsync/latest/APIReference/API_StartSchemaCreation.html) API. +Execute the following command to start the schema creation process: {{< command >}} $ awslocal appsync start-schema-creation \ @@ -138,7 +146,8 @@ The following output would be retrieved: ### Create a data source -You can create a data source using the [`CreateDataSource`](https://docs.aws.amazon.com/appsync/latest/APIReference/API_CreateDataSource.html) API. Execute the following command to create a data source named `DynamoDBNotesTable`: +You can create a data source using the [`CreateDataSource`](https://docs.aws.amazon.com/appsync/latest/APIReference/API_CreateDataSource.html) API. +Execute the following command to create a data source named `DynamoDBNotesTable`: {{< command >}} $ awslocal appsync create-data-source \ @@ -166,7 +175,9 @@ The following output would be retrieved: ### Create a resolver -You can create a resolver using the [`CreateResolver`](https://github.com/localstack/docs/pull/782) API. You can create a custom `request-mapping-template.vtl` and `response-mapping-template.vtl` file to use as a mapping template to use for requests and responses respectively. Execute the following command to create a VTL resolver attached to the `PaginatedNotes.notes` field: +You can create a resolver using the [`CreateResolver`](https://github.com/localstack/docs/pull/782) API. +You can create a custom `request-mapping-template.vtl` and `response-mapping-template.vtl` file to use as a mapping template to use for requests and responses respectively. +Execute the following command to create a VTL resolver attached to the `PaginatedNotes.notes` field: {{< command >}} $ awslocal appsync create-resolver \ @@ -180,7 +191,8 @@ $ awslocal appsync create-resolver \ ## Custom GraphQL API IDs -You can employ a pre-defined ID during the creation of GraphQL APIs by utilizing the special tag `_custom_id_`. For example, the following command will create a GraphQL API with the ID `faceb00c`: +You can employ a pre-defined ID during the creation of GraphQL APIs by utilizing the special tag `_custom_id_`. +For example, the following command will create a GraphQL API with the ID `faceb00c`: {{< command >}} $ awslocal appsync create-graphql-api \ @@ -223,13 +235,16 @@ LocalStack supports the following data source types types and services: ## GraphQL resolvers -LocalStack's AppSync offers support for both unit and pipeline resolvers, as detailed in the [AWS resolvers documentation](https://docs.aws.amazon.com/appsync/latest/devguide/resolver-components.html). Unit resolvers consist of request and response mapping templates, facilitating the transformation of requests to and from data sources. +LocalStack's AppSync offers support for both unit and pipeline resolvers, as detailed in the [AWS resolvers documentation](https://docs.aws.amazon.com/appsync/latest/devguide/resolver-components.html). +Unit resolvers consist of request and response mapping templates, facilitating the transformation of requests to and from data sources. -Pipeline resolvers, on the other hand, invoke AppSync functions that wraps the AppSync data sources. Unit resolvers are written in the Velocity templating language (VTL), while pipeline resolvers can be written in either VTL or JavaScript. +Pipeline resolvers, on the other hand, invoke AppSync functions that wraps the AppSync data sources. +Unit resolvers are written in the Velocity templating language (VTL), while pipeline resolvers can be written in either VTL or JavaScript. ## Configuring GraphQL Endpoints -There are three configurable strategies that govern how GraphQL API endpoints are created. The strategy can be configured via the `GRAPHQL_ENDPOINT_STRATEGY` environment variable. +There are three configurable strategies that govern how GraphQL API endpoints are created. +The strategy can be configured via the `GRAPHQL_ENDPOINT_STRATEGY` environment variable. | Value | Format | Description | |----------|--------------------------------------------------------|-----------------------------------------------------------------------------------------------------| @@ -274,10 +289,10 @@ awslocal appsync evaluate-code \
{{< / command >}} - ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing AppSync APIs, Data Sources, Schema, Query, Types, Resolvers, Functions and API keys. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **AppSync** under the **App Integration** section. +The LocalStack Web Application provides a Resource Browser for managing AppSync APIs, Data Sources, Schema, Query, Types, Resolvers, Functions and API keys. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **AppSync** under the **App Integration** section. AppSync Resource Browser @@ -285,14 +300,22 @@ The Resource Browser allows you to perform the following actions: - **Create API**: Create a new GraphQL API by clicking **Create API** and providing a name for the API, Authentication Type, and optional tags among other parameters. - **Edit API**: Click on the GraphQL API name and click **Edit API** to edit the GraphQL API, by updating the parameters before clicking **Submit**. -- **Create Data Source**: Click on the GraphQL API name and click **Data Source**. Click on **Create Data Source** to create a new data source for the GraphQL API, by providing a name for the data source, data source type, and Service Role ARN before clicking **Submit**. -- **Edit Data Source**: Click on the GraphQL API name and click **Data Source**. Click on the data source name and click **Edit Data Source** to edit the data source, by updating the parameters before clicking **Submit**. -- **Create Types**: Click on the GraphQL API name and click **Types**. Click on **Create Type** to create a type definition, in GraphQL Schema Definition Language (SDL) format, before clicking **Submit**. -- **Create API Key**: Click on the GraphQL API name and click **API Keys**. Click on **Create API Key** to create an API key for the GraphQL API, by providing a description for the API key and its expiration time before clicking **Submit**. -- **View and edit Schema**: Click on the GraphQL API name and click **Schema**. You can view the GraphQL schema, and edit the GraphQL schema, in GraphQL Schema Definition Language (SDL) format, before clicking **Update**. -- **Query**: Click on the GraphQL API name and click **Query**. You can query the GraphQL API by providing the GraphQL query and variables, including the operation and API key, before clicking **Execute**. -- **Attach Resolver**: Click on the GraphQL API name and click **Resolvers**. Click on **Attach Resolver** to attach a resolver to a field, by providing the field name, data source name, Request Mapping Template, Response Mapping Template, among other parameters, before clicking **Submit**. -- **Create Function**: Click on the GraphQL API name and click **Functions**. Click on **Create Function** to create a function, by providing a name for the function, data source name, and Function Version, Request Mapping Template, Response Mapping Template, among other parameters, before clicking **Submit**. +- **Create Data Source**: Click on the GraphQL API name and click **Data Source**. + Click on **Create Data Source** to create a new data source for the GraphQL API, by providing a name for the data source, data source type, and Service Role ARN before clicking **Submit**. +- **Edit Data Source**: Click on the GraphQL API name and click **Data Source**. + Click on the data source name and click **Edit Data Source** to edit the data source, by updating the parameters before clicking **Submit**. +- **Create Types**: Click on the GraphQL API name and click **Types**. + Click on **Create Type** to create a type definition, in GraphQL Schema Definition Language (SDL) format, before clicking **Submit**. +- **Create API Key**: Click on the GraphQL API name and click **API Keys**. + Click on **Create API Key** to create an API key for the GraphQL API, by providing a description for the API key and its expiration time before clicking **Submit**. +- **View and edit Schema**: Click on the GraphQL API name and click **Schema**. + You can view the GraphQL schema, and edit the GraphQL schema, in GraphQL Schema Definition Language (SDL) format, before clicking **Update**. +- **Query**: Click on the GraphQL API name and click **Query**. + You can query the GraphQL API by providing the GraphQL query and variables, including the operation and API key, before clicking **Execute**. +- **Attach Resolver**: Click on the GraphQL API name and click **Resolvers**. + Click on **Attach Resolver** to attach a resolver to a field, by providing the field name, data source name, Request Mapping Template, Response Mapping Template, among other parameters, before clicking **Submit**. +- **Create Function**: Click on the GraphQL API name and click **Functions**. + Click on **Create Function** to create a function, by providing a name for the function, data source name, and Function Version, Request Mapping Template, Response Mapping Template, among other parameters, before clicking **Submit**. ## Examples diff --git a/content/en/user-guide/aws/athena/index.md b/content/en/user-guide/aws/athena/index.md index 0fc4a9ffa3..efc597bc67 100644 --- a/content/en/user-guide/aws/athena/index.md +++ b/content/en/user-guide/aws/athena/index.md @@ -91,7 +91,7 @@ Run the following command: {{< command >}} $ awslocal athena start-query-execution \ - --query-string "select * from tbl01;" --result-configuration "OutputLocation=s3://athena-bucket/output/" + --query-string "select * from tbl01;" --result-configuration "OutputLocation=s3://athena-bucket/output/" {{< / command >}} You can retrieve the execution details similarly using the [`GetQueryExecution`](https://docs.aws.amazon.com/athena/latest/APIReference/API_GetQueryExecution.html) API using the `QueryExecutionId` returned by the previous step. @@ -147,7 +147,8 @@ $ awslocal athena get-query-results --query-execution-id $queryId {{< / command >}} The query should yield a result similar to the output below: -``` + +```bash ... "Rows": [ { @@ -190,7 +191,7 @@ LOCATION 's3://mybucket/prefix/' TBLPROPERTIES ( 'table_type' = 'ICEBERG' ) Once the table has been created and data inserted into it, you can see the Iceberg metadata and data files being created in S3: -``` +```bash s3://mybucket/_tmp.prefix/ s3://mybucket/prefix/data/00000-0-user1_20230212221600_cd8f8cbd-4dcc-4c3f-96a2-f08d4104d6fb-job_local1695603329_0001-00001.parquet s3://mybucket/prefix/data/00000-0-user1_20230212221606_eef1fd88-8ff1-467a-a15b-7a24be7bc52b-job_local1976884152_0002-00001.parquet diff --git a/content/en/user-guide/aws/autoscaling/index.md b/content/en/user-guide/aws/autoscaling/index.md index 407eef439c..a92a7305d8 100644 --- a/content/en/user-guide/aws/autoscaling/index.md +++ b/content/en/user-guide/aws/autoscaling/index.md @@ -7,19 +7,23 @@ tags: ["Pro image"] ## Introduction -Auto Scaling helps you maintain application availability and allows you to automatically add or remove EC2 instances according to the demand. You can use Auto Scaling to ensure that you are running your desired number of instances. +Auto Scaling helps you maintain application availability and allows you to automatically add or remove EC2 instances according to the demand. +You can use Auto Scaling to ensure that you are running your desired number of instances. -LocalStack allows you to use the Auto Scaling APIs locally to create and manage Auto Scaling groups locally. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_autoscaling/), which provides information on the extent of Auto Scaling's integration with LocalStack. +LocalStack allows you to use the Auto Scaling APIs locally to create and manage Auto Scaling groups locally. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_autoscaling/), which provides information on the extent of Auto Scaling's integration with LocalStack. ## Getting started This guide is designed for users new to Auto Scaling and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a launch template, an Auto Scaling group, and attach an instance to the Auto Scaling group using the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a launch template, an Auto Scaling group, and attach an instance to the Auto Scaling group using the AWS CLI. ### Create a launch template -You can create a launch template that defines the launch configuration for the instances in the Auto Scaling group using the [`CreateLaunchTemplate`](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) API. Run the following command to create a launch template: +You can create a launch template that defines the launch configuration for the instances in the Auto Scaling group using the [`CreateLaunchTemplate`](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) API. +Run the following command to create a launch template: {{< command >}} $ awslocal ec2 create-launch-template \ @@ -46,13 +50,15 @@ The following output is displayed: ### Create an Auto Scaling group -Before creating an Auto Scaling group, you need to fetch the subnet ID. Run the following command to describe the subnets: +Before creating an Auto Scaling group, you need to fetch the subnet ID. +Run the following command to describe the subnets: {{< command >}} $ awslocal ec2 describe-subnets --output text --query Subnets[0].SubnetId {{< /command >}} -Copy the subnet ID from the output and use it to create the Auto Scaling group. Run the following command to create an Auto Scaling group using the [`CreateAutoScalingGroup`](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_CreateAutoScalingGroup.html) API: +Copy the subnet ID from the output and use it to create the Auto Scaling group. +Run the following command to create an Auto Scaling group using the [`CreateAutoScalingGroup`](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_CreateAutoScalingGroup.html) API: {{< command >}} $ awslocal autoscaling create-auto-scaling-group \ @@ -65,7 +71,8 @@ $ awslocal autoscaling create-auto-scaling-group \ ### Describe the Auto Scaling group -You can describe the Auto Scaling group using the [`DescribeAutoScalingGroups`](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAutoScalingGroups.html) API. Run the following command to describe the Auto Scaling group: +You can describe the Auto Scaling group using the [`DescribeAutoScalingGroups`](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAutoScalingGroups.html) API. +Run the following command to describe the Auto Scaling group: {{< command >}} $ awslocal autoscaling describe-auto-scaling-groups @@ -109,14 +116,16 @@ The following output is displayed: You can attach an instance to the Auto Scaling group using the [`AttachInstances`](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachInstances.html) API. -Before that, create an EC2 instance using the [`RunInstances`](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API. Run the following command to create an EC2 instance locally: +Before that, create an EC2 instance using the [`RunInstances`](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API. +Run the following command to create an EC2 instance locally: {{< command >}} $ awslocal ec2 run-instances \ --image-id ami-ff0fea8310f3 --count 1 {{< /command >}} -Fetch the instance ID from the output and use it to attach the instance to the Auto Scaling group. Run the following command to attach the instance to the Auto Scaling group: +Fetch the instance ID from the output and use it to attach the instance to the Auto Scaling group. +Run the following command to attach the instance to the Auto Scaling group: {{< command >}} $ awslocal autoscaling attach-instances \ @@ -128,4 +137,5 @@ Replace `i-0d678c4ecf6018dde` with the instance ID that you fetched from the out ## Current Limitations -LocalStack does not support the `docker`/`libvirt` [VM manager for EC2](https://docs.localstack.cloud/user-guide/aws/ec2/#vm-managers). It only works with the `mock` VM manager. +LocalStack does not support the `docker`/`libvirt` [VM manager for EC2](https://docs.localstack.cloud/user-guide/aws/ec2/#vm-managers). +It only works with the `mock` VM manager. diff --git a/content/en/user-guide/aws/backup/index.md b/content/en/user-guide/aws/backup/index.md index 4144ef0b8c..65539432be 100644 --- a/content/en/user-guide/aws/backup/index.md +++ b/content/en/user-guide/aws/backup/index.md @@ -9,19 +9,25 @@ persistence: supported ## Introduction -Backup is a centralized backup service provided by Amazon Web Services. It simplifies the process of backing up and restoring your data across various AWS services and resources. Backup supports a wide range of AWS resources, including Elastic Block Store (EBS) volumes, Relational Database Service (RDS) databases, DynamoDB tables, Elastic File System (EFS) file systems, and more. Backup enables you to set backup retention policies, allowing you to specify how long you want to retain your backup copies. +Backup is a centralized backup service provided by Amazon Web Services. +It simplifies the process of backing up and restoring your data across various AWS services and resources. +Backup supports a wide range of AWS resources, including Elastic Block Store (EBS) volumes, Relational Database Service (RDS) databases, DynamoDB tables, Elastic File System (EFS) file systems, and more. +Backup enables you to set backup retention policies, allowing you to specify how long you want to retain your backup copies. -LocalStack allows you to use the Backup APIs in your local environment to manage backup plans, create scheduled or on-demand backups of certain resource types. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_backup/), which provides information on the extent of Backup's integration with LocalStack. +LocalStack allows you to use the Backup APIs in your local environment to manage backup plans, create scheduled or on-demand backups of certain resource types. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_backup/), which provides information on the extent of Backup's integration with LocalStack. ## Getting started This guide is designed for users new to Backup and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a backup job and specify a set of resources to the backup plan name and backup rules with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a backup job and specify a set of resources to the backup plan name and backup rules with the AWS CLI. ### Create a backup vault -You can create a backup vault which acts as a logical container where backups are stored using the [`CreateBackupVault`](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_CreateBackupVault.html) API. Run the following command to create a backup vault named `my-vault`: +You can create a backup vault which acts as a logical container where backups are stored using the [`CreateBackupVault`](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_CreateBackupVault.html) API. +Run the following command to create a backup vault named `my-vault`: {{< command >}} $ awslocal backup create-backup-vault \ @@ -40,7 +46,8 @@ The following output would be retrieved: ### Create a backup plan -You can create a backup plan which specifies the backup vault to store the backups in and the schedule for creating backups. You can specify the backup plan in a `backup-plan.json` file: +You can create a backup plan which specifies the backup vault to store the backups in and the schedule for creating backups. +You can specify the backup plan in a `backup-plan.json` file: ```json { @@ -64,7 +71,8 @@ You can create a backup plan which specifies the backup vault to store the backu } ``` -You can use the [`CreateBackupPlan`](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_CreateBackupPlan.html) API to create a backup plan. Run the following command to create a backup plan: +You can use the [`CreateBackupPlan`](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_CreateBackupPlan.html) API to create a backup plan. +Run the following command to create a backup plan: {{< command >}} $ awslocal backup create-backup-plan \ @@ -84,7 +92,8 @@ The following output would be retrieved: ### Create a backup selection -You can create a backup selection which specifies the resources to backup and the backup plan to associate with. You can specify the backup selection in a `backup-selection.json` file: +You can create a backup selection which specifies the resources to backup and the backup plan to associate with. +You can specify the backup selection in a `backup-selection.json` file: ```json { @@ -100,15 +109,17 @@ You can create a backup selection which specifies the resources to backup and th ``` -You can use the [`CreateBackupSelection`](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_CreateBackupSelection.html) API to create a backup selection. Run the following command to create a backup selection: +You can use the [`CreateBackupSelection`](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_CreateBackupSelection.html) API to create a backup selection. +Run the following command to create a backup selection: {{< command >}} $ awslocal backup create-backup-selection \ --backup-plan-id 9337aba3 \ - --backup-selection file://backup-plan-resources.json + --backup-selection file://backup-plan-resources.json {{< / command >}} -Replace the `--backup-plan-id` value with the `BackupPlanId` value from the output of the previous command. The following output would be retrieved: +Replace the `--backup-plan-id` value with the `BackupPlanId` value from the output of the previous command. +The following output would be retrieved: ```bash { @@ -120,7 +131,8 @@ Replace the `--backup-plan-id` value with the `BackupPlanId` value from the outp ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing backup plans and vaults. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Backup** under the **Storage** section. +The LocalStack Web Application provides a Resource Browser for managing backup plans and vaults. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Backup** under the **Storage** section. Backup Resource Browser @@ -128,7 +140,11 @@ The Resource Browser allows you to perform the following actions: - **Create Backup Plan**: Create a backup plan by clicking the **Create** button in the **Backup Plans** tab and specifying the backup plan details, including the plan name, rules, backup setting, and more in the modal dialog. - **Create Backup Vault**: Create a backup vault by clicking the **Create** button in the **Backup Vault** tab and specifying the vault name, tags, and other parameters in the modal dialog. -- **Create Backup**: Create a backup by clicking the **Backup Vault** and then clicking the **Actions** button followed by clicking the **Create Backup** button in the modal dialog. Specify the backup name, backup vault, and other parameters in the modal dialog. -- **Assign Resources**: Click the backup plan and then click the **Actions** button followed by clicking the **Assign Resources** button in the modal dialog. Specify the backup plan ID and resources to assign in the modal dialog, and click **Submit** to assign the resources to the backup plan. -- **Delete Vault**: Delete a backup vault by clicking the **Backup Vault** or selecting multiple vaults. Click the **Actions** button followed by clicking the **Delete Vault** button or **Remove Selected** to delete an individual vault or multiple vaults respectively in the modal dialog. -- **Delete Backup Plan**: Delete a backup plan by clicking the **Backup Plan** or selecting multiple plans. Click the **Actions** button followed by clicking the **Delete Backup Plan** button or **Remove Selected** to delete an individual plan or multiple plans respectively in the modal dialog. +- **Create Backup**: Create a backup by clicking the **Backup Vault** and then clicking the **Actions** button followed by clicking the **Create Backup** button in the modal dialog. + Specify the backup name, backup vault, and other parameters in the modal dialog. +- **Assign Resources**: Click the backup plan and then click the **Actions** button followed by clicking the **Assign Resources** button in the modal dialog. + Specify the backup plan ID and resources to assign in the modal dialog, and click **Submit** to assign the resources to the backup plan. +- **Delete Vault**: Delete a backup vault by clicking the **Backup Vault** or selecting multiple vaults. + Click the **Actions** button followed by clicking the **Delete Vault** button or **Remove Selected** to delete an individual vault or multiple vaults respectively in the modal dialog. +- **Delete Backup Plan**: Delete a backup plan by clicking the **Backup Plan** or selecting multiple plans. + Click the **Actions** button followed by clicking the **Delete Backup Plan** button or **Remove Selected** to delete an individual plan or multiple plans respectively in the modal dialog. diff --git a/content/en/user-guide/aws/batch/index.md b/content/en/user-guide/aws/batch/index.md index e5b9db6f03..53d90e8086 100644 --- a/content/en/user-guide/aws/batch/index.md +++ b/content/en/user-guide/aws/batch/index.md @@ -7,9 +7,11 @@ tags: ["Pro image"] ## Introduction -Batch is a cloud-based service provided by Amazon Web Services (AWS) that simplifies the process of running batch computing workloads on the AWS cloud infrastructure. Batch allows you to efficiently process large volumes of data and run batch jobs without the need to manage and provision underlying compute resources. +Batch is a cloud-based service provided by Amazon Web Services (AWS) that simplifies the process of running batch computing workloads on the AWS cloud infrastructure. +Batch allows you to efficiently process large volumes of data and run batch jobs without the need to manage and provision underlying compute resources. -LocalStack allows you to use the Batch APIs to automate and scale computational tasks in your local environment while handling batch workloads. The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_batch/), which provides information on the extent of Batch integration with LocalStack. +LocalStack allows you to use the Batch APIs to automate and scale computational tasks in your local environment while handling batch workloads. +The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_batch/), which provides information on the extent of Batch integration with LocalStack. ## Getting started @@ -26,7 +28,9 @@ We will demonstrate how you create and run a Batch job by following these steps: ### Create a service role -You can create a role using the [`CreateRole`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-role.html) API. For LocalStack, the service role simply needs to exist. However, when [enforcing IAM policies]({{< ref "user-guide/aws/iam#enforcing-iam-policies" >}}), it is necessary that the policy is valid. +You can create a role using the [`CreateRole`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-role.html) API. +For LocalStack, the service role simply needs to exist. +However, when [enforcing IAM policies]({{< ref "user-guide/aws/iam#enforcing-iam-policies" >}}), it is necessary that the policy is valid. Run the following command to create a role with an empty policy document: @@ -53,7 +57,8 @@ You should see the following output: ### Create the compute environment -You can use the [`CreateComputeEnvironment`](https://docs.aws.amazon.com/cli/latest/reference/batch/create-compute-environment.html) API to create a compute environment. Run the following command using the role ARN above (`arn:aws:iam::000000000000:role/myrole`), to create the compute environment: +You can use the [`CreateComputeEnvironment`](https://docs.aws.amazon.com/cli/latest/reference/batch/create-compute-environment.html) API to create a compute environment. +Run the following command using the role ARN above (`arn:aws:iam::000000000000:role/myrole`), to create the compute environment: {{< command >}} $ awslocal batch create-compute-environment \ @@ -72,12 +77,14 @@ You should see the following output: ``` {{< callout >}} -While an unmanaged compute environment has been specified, there is no need to provision any compute resources for this setup to function. Your tasks will run independently in new Docker containers, alongside the LocalStack container. +While an unmanaged compute environment has been specified, there is no need to provision any compute resources for this setup to function. +Your tasks will run independently in new Docker containers, alongside the LocalStack container. {{< /callout >}} ### Create a job queue -You can fetch the ARN using the [`DescribeComputeEnvironments`](https://docs.aws.amazon.com/cli/latest/reference/batch/describe-compute-environments.html) API. Run the following command to fetch the ARN of the compute environment: +You can fetch the ARN using the [`DescribeComputeEnvironments`](https://docs.aws.amazon.com/cli/latest/reference/batch/describe-compute-environments.html) API. +Run the following command to fetch the ARN of the compute environment: {{< command >}} $ awslocal batch describe-compute-environments --compute-environments myenv @@ -101,7 +108,8 @@ You should see the following output: } ``` -You can use the ARN to create the job queue using [`CreateJobQueue`](https://docs.aws.amazon.com/cli/latest/reference/batch/create-job-queue.html) API. Run the following command to create the job queue: +You can use the ARN to create the job queue using [`CreateJobQueue`](https://docs.aws.amazon.com/cli/latest/reference/batch/create-job-queue.html) API. +Run the following command to create the job queue: {{< command >}} $ awslocal batch create-job-queue \ @@ -122,7 +130,9 @@ You should see the following output: ### Create a job definition -Now, you can define what occurs during a job run, or at least what transpires by default. In this example, you can execute the `busybox` container from DockerHub and initiate the command: `sleep 30` within it. It's important to note that you can override this command when submitting the job. +Now, you can define what occurs during a job run, or at least what transpires by default. +In this example, you can execute the `busybox` container from DockerHub and initiate the command: `sleep 30` within it. +It's important to note that you can override this command when submitting the job. Run the following command to create the job definition using the [`RegisterJobDefinition`](https://docs.aws.amazon.com/cli/latest/reference/batch/register-job-definition.html) API: @@ -145,7 +155,9 @@ You should see the following output: ### Submit a job to the job queue -You can now run a compute job. This command runs a job on the queue that you have set up previously, overriding the container command to run: `sh -c "sleep 5; pwd"`. This command simulates work being done in the container. +You can now run a compute job. +This command runs a job on the queue that you have set up previously, overriding the container command to run: `sh -c "sleep 5; pwd"`. +This command simulates work being done in the container. Run the following command to submit a job to the job queue using the [`SubmitJob`](https://docs.aws.amazon.com/cli/latest/reference/batch/submit-job.html) API: @@ -166,6 +178,7 @@ You should see the following output: } ``` -## Current Limitations +## Current Limitations -As mentioned in the example above, the creation of a compute environment does not entail the provisioning of EC2 or Fargate instances. Rather, it executes Batch jobs on the local Docker daemon, operating alongside LocalStack. +As mentioned in the example above, the creation of a compute environment does not entail the provisioning of EC2 or Fargate instances. +Rather, it executes Batch jobs on the local Docker daemon, operating alongside LocalStack. diff --git a/content/en/user-guide/aws/ce/index.md b/content/en/user-guide/aws/ce/index.md index 24ef2273aa..d19efe071c 100644 --- a/content/en/user-guide/aws/ce/index.md +++ b/content/en/user-guide/aws/ce/index.md @@ -8,19 +8,24 @@ tags: ["Pro image"] ## Introduction -Cost Explorer is a service provided by Amazon Web Services (AWS) that enables you to visualize, analyze, and manage your AWS spending and usage. Cost Explorer offers options to filter and group data by dimensions such as service, region, instance type, and more. With Cost Explorer, you can forecast costs, track budget progress, and set up alerts to receive notifications when spending exceeds predefined thresholds. +Cost Explorer is a service provided by Amazon Web Services (AWS) that enables you to visualize, analyze, and manage your AWS spending and usage. +Cost Explorer offers options to filter and group data by dimensions such as service, region, instance type, and more. +With Cost Explorer, you can forecast costs, track budget progress, and set up alerts to receive notifications when spending exceeds predefined thresholds. -LocalStack allows you to use the Cost Explorer APIs in your local environment to create and manage cost category definition, cost anomaly monitors & subscriptions. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_ce/), which provides information on the extent of Cost Explorer's integration with LocalStack. +LocalStack allows you to use the Cost Explorer APIs in your local environment to create and manage cost category definition, cost anomaly monitors & subscriptions. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_ce/), which provides information on the extent of Cost Explorer's integration with LocalStack. ## Getting started This guide is designed for users new to Cost Explorer and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to mock the Cost Explorer APIs with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to mock the Cost Explorer APIs with the AWS CLI. ### Create a Cost Category definition -You can create a Cost Category definition using the [`CreateCostCategoryDefinition`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_CreateCostCategoryDefinition.html)) API. The following example creates a Cost Category definition using an empty rule condition of type "REGULAR": +You can create a Cost Category definition using the [`CreateCostCategoryDefinition`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_CreateCostCategoryDefinition.html)) API. +The following example creates a Cost Category definition using an empty rule condition of type "REGULAR": {{< command >}} $ awslocal ce create-cost-category-definition --name test \ @@ -35,7 +40,8 @@ The following output would be retrieved: } ``` -You can describe the Cost Category definition using the [`DescribeCostCategoryDefinition`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DescribeCostCategoryDefinition.html) API. Run the following command: +You can describe the Cost Category definition using the [`DescribeCostCategoryDefinition`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DescribeCostCategoryDefinition.html) API. +Run the following command: {{< command >}} $ awslocal ce describe-cost-category-definition \ @@ -63,7 +69,8 @@ The following output would be retrieved: ### Create a cost anomaly subscription -You can add an alert subscription to a cost anomaly detection monitor to define subscribers using the [`CreateAnomalySubscription`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_CreateAnomalySubscription.html) API. The following example creates a cost anomaly subscription: +You can add an alert subscription to a cost anomaly detection monitor to define subscribers using the [`CreateAnomalySubscription`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_CreateAnomalySubscription.html) API. +The following example creates a cost anomaly subscription: {{< command >}} $ awslocal ce create-anomaly-subscription --anomaly-subscription '{ @@ -84,7 +91,8 @@ The following output would be retrieved: } ``` -You can retrieve the cost anomaly subscriptions using the [`GetAnomalySubscriptions`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetAnomalySubscriptions.html) API. Run the following command: +You can retrieve the cost anomaly subscriptions using the [`GetAnomalySubscriptions`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetAnomalySubscriptions.html) API. +Run the following command: {{< command >}} $ awslocal ce get-anomaly-subscriptions @@ -110,7 +118,8 @@ The following output would be retrieved: ### Create a cost anomaly monitor -You can create a new cost anomaly detection subscription with the requested type and monitor specification using the [`CreateAnomalyMonitor`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_CreateAnomalyMonitor.html) API. The following example creates a cost anomaly monitor: +You can create a new cost anomaly detection subscription with the requested type and monitor specification using the [`CreateAnomalyMonitor`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_CreateAnomalyMonitor.html) API. +The following example creates a cost anomaly monitor: {{< command >}} $ awslocal ce create-anomaly-monitor --anomaly-monitor '{ @@ -127,7 +136,8 @@ The following output would be retrieved: } ``` -You can retrieve the cost anomaly monitors using the [`GetAnomalyMonitors`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetAnomalyMonitors.html) API. Run the following command: +You can retrieve the cost anomaly monitors using the [`GetAnomalyMonitors`](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_GetAnomalyMonitors.html) API. +Run the following command: {{< command >}} $ awslocal ce get-anomaly-monitors @@ -149,7 +159,8 @@ The following output would be retrieved: ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing cost category definitions for the Cost Explorer service. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the Resources section, and then clicking on **Cost Explorer** under the **Cloud Financial Management** section. +The LocalStack Web Application provides a Resource Browser for managing cost category definitions for the Cost Explorer service. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the Resources section, and then clicking on **Cost Explorer** under the **Cloud Financial Management** section. Cost Explorer Resource Browser

@@ -162,4 +173,5 @@ The Resource Browser allows you to perform the following actions: ## Current Limitations -LocalStack's Cost Explorer implementation cannot programmatically query your cost and usage data, or provide aggregated data such as total monthly costs or total daily usage. However, you can use the integrations to mock the Cost Explorer APIs and test your workflow locally. +LocalStack's Cost Explorer implementation cannot programmatically query your cost and usage data, or provide aggregated data such as total monthly costs or total daily usage. +However, you can use the integrations to mock the Cost Explorer APIs and test your workflow locally. diff --git a/content/en/user-guide/aws/cloudformation/index.md b/content/en/user-guide/aws/cloudformation/index.md index 2946a4c82b..e384ff90b1 100644 --- a/content/en/user-guide/aws/cloudformation/index.md +++ b/content/en/user-guide/aws/cloudformation/index.md @@ -7,24 +7,34 @@ persistence: supported with limitations --- {{< callout >}} -With LocalStack 3.5 we've improved how the internal engine orders resources for deployment and deletion of stacks. Specifically it now more accurately calculates dependencies between resources and doesn't try to deploy/delete resources which don't have their dependencies available yet. Should you encounter any issues, please report them on [GitHub](https://github.com/localstack/localstack/issues/new/choose). You can temporarily revert to the old behavior with `CFN_LEGACY_TEMPLATE_DEPLOYER=1`, but be aware that this is only a temporary option. +With LocalStack 3.5 we've improved how the internal engine orders resources for deployment and deletion of stacks. +Specifically it now more accurately calculates dependencies between resources and doesn't try to deploy/delete resources which don't have their dependencies available yet. +Should you encounter any issues, please report them on [GitHub](https://github.com/localstack/localstack/issues/new/choose). +You can temporarily revert to the old behavior with `CFN_LEGACY_TEMPLATE_DEPLOYER=1`, but be aware that this is only a temporary option. {{< /callout >}} ## Introduction -CloudFormation is a service provided by Amazon Web Services (AWS) that allows you to define and provision infrastructure as code. It enables you to create, update, and manage resources in a repeatable and automated manner using declarative templates. With CloudFormation, you can use JSON or YAML templates to define your desired infrastructure state. You can specify resources, their configurations, dependencies, and relationships in these templates. +CloudFormation is a service provided by Amazon Web Services (AWS) that allows you to define and provision infrastructure as code. +It enables you to create, update, and manage resources in a repeatable and automated manner using declarative templates. +With CloudFormation, you can use JSON or YAML templates to define your desired infrastructure state. +You can specify resources, their configurations, dependencies, and relationships in these templates. -LocalStack supports CloudFormation, allowing you to use the CloudFormation APIs in your local environment to declaratively define your architecture on the AWS, including resources such as S3 Buckets, Lambda Functions, and much more. The [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_cloudformation/) and [feature coverage](#feature-coverage) provides information on the extent of CloudFormation's integration with LocalStack. +LocalStack supports CloudFormation, allowing you to use the CloudFormation APIs in your local environment to declaratively define your architecture on the AWS, including resources such as S3 Buckets, Lambda Functions, and much more. +The [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_cloudformation/) and [feature coverage](#feature-coverage) provides information on the extent of CloudFormation's integration with LocalStack. ## Getting started This guide is designed for users new to CloudFormation and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to deploy a simple CloudFormation stack consisting of a single S3 Bucket with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to deploy a simple CloudFormation stack consisting of a single S3 Bucket with the AWS CLI. ### Create a CloudFormation Stack -CloudFormation stack is a collection of AWS resources that you can create, update, or delete as a single unit. Stacks are defined using JSON or YAML templates. Use the following code snippet and save the content in either `cfn-quickstart-stack.yaml` or `cfn-quickstart-stack.json`, depending on your preferred format. +CloudFormation stack is a collection of AWS resources that you can create, update, or delete as a single unit. +Stacks are defined using JSON or YAML templates. +Use the following code snippet and save the content in either `cfn-quickstart-stack.yaml` or `cfn-quickstart-stack.json`, depending on your preferred format. {{< tabpane >}} {{< tab header="YAML" lang="yaml" >}} @@ -48,10 +58,11 @@ Resources: {{< /tab >}} {{< /tabpane >}} - ### Deploy the CloudFormation Stack -You can deploy the CloudFormation stack using the AWS CLI with the [`deploy`](https://docs.aws.amazon.com/cli/latest/reference/cloudformation/deploy/index.html) command. The `deploy` command creates and updates CloudFormation stacks. Run the following command to deploy the stack: +You can deploy the CloudFormation stack using the AWS CLI with the [`deploy`](https://docs.aws.amazon.com/cli/latest/reference/cloudformation/deploy/index.html) command. +The `deploy` command creates and updates CloudFormation stacks. +Run the following command to deploy the stack: {{< command >}} $ awslocal cloudformation deploy \ @@ -59,7 +70,8 @@ $ awslocal cloudformation deploy \ --template-file "./cfn-quickstart-stack.yaml" {{< / command >}} -You can verify that the stack was created successfully by listing the S3 buckets in your LocalStack container using the [`ListBucket` API](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-buckets.html). Run the following command to list the buckets: +You can verify that the stack was created successfully by listing the S3 buckets in your LocalStack container using the [`ListBucket` API](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-buckets.html). +Run the following command to list the buckets: {{< command >}} $ awslocal s3api list-buckets @@ -67,7 +79,8 @@ $ awslocal s3api list-buckets ### Delete the CloudFormation Stack -You can delete the CloudFormation stack using the [`delete-stack`](https://docs.aws.amazon.com/cli/latest/reference/cloudformation/delete-stack.html) command. Run the following command to delete the stack along with all the resources created by the stack: +You can delete the CloudFormation stack using the [`delete-stack`](https://docs.aws.amazon.com/cli/latest/reference/cloudformation/delete-stack.html) command. +Run the following command to delete the stack along with all the resources created by the stack: {{< command >}} $ awslocal cloudformation delete-stack \ @@ -76,20 +89,24 @@ $ awslocal cloudformation delete-stack \ ## Local User-Interface -You can also utilize LocalStack's local CloudFormation user-interface to deploy and manage your CloudFormation stacks using public templates. You can access the user-interface at [`localhost:4566/_localstack/cloudformation/deploy`](http://localhost:4566/_localstack/cloudformation/deploy). +You can also utilize LocalStack's local CloudFormation user-interface to deploy and manage your CloudFormation stacks using public templates. +You can access the user-interface at [`localhost:4566/_localstack/cloudformation/deploy`](http://localhost:4566/_localstack/cloudformation/deploy). Local CloudFormation UI in LocalStack

-You can utilize the CloudFormation user interface to provide an existing CloudFormation template URL, input the necessary parameters, and initiate the deployment directly from your browser. Let's proceed with an example template to deploy a CloudFormation stack. +You can utilize the CloudFormation user interface to provide an existing CloudFormation template URL, input the necessary parameters, and initiate the deployment directly from your browser. +Let's proceed with an example template to deploy a CloudFormation stack. To begin, employ the public CloudFormation template URL: [`s3.eu-central-1.amazonaws.com/cloudformation-templates-eu-central-1/DynamoDB_Secondary_Indexes.template`](https://s3.eu-central-1.amazonaws.com/cloudformation-templates-eu-central-1/DynamoDB_Secondary_Indexes.template). -Following this, download the template URL and extract the stack parameters (default values will be automatically applied). Upon submission, the stack deployment will be triggered, and a result message will be displayed. +Following this, download the template URL and extract the stack parameters (default values will be automatically applied). +Upon submission, the stack deployment will be triggered, and a result message will be displayed. ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing CloudFormation stacks to manage your AWS resources locally. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **CloudFormation** under the **Management/Governance** section. +The LocalStack Web Application provides a Resource Browser for managing CloudFormation stacks to manage your AWS resources locally. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **CloudFormation** under the **Management/Governance** section. CloudFormation Resource Browser @@ -111,7 +128,9 @@ The following code snippets and sample applications provide practical examples o ## Feature coverage {{< callout "tip" >}} -We are continually enhancing our CloudFormation feature coverage by consistently introducing new resource types. Your feature requests assist us in determining the priority of resource additions. Feel free to contribute by [creating a new GitHub issue](https://github.com/localstack/localstack/issues/new?assignees=&labels=feature-request&template=feature-request.yml&title=feature+request%3A+%3Ctitle%3E). +We are continually enhancing our CloudFormation feature coverage by consistently introducing new resource types. +Your feature requests assist us in determining the priority of resource additions. +Feel free to contribute by [creating a new GitHub issue](https://github.com/localstack/localstack/issues/new?assignees=&labels=feature-request&template=feature-request.yml&title=feature+request%3A+%3Ctitle%3E). {{< /callout >}} ### Features @@ -134,7 +153,8 @@ We are continually enhancing our CloudFormation feature coverage by consistently | Intrinsic Functions | Partial | {{< callout >}} -Currently, support for `UPDATE` operations on resources is limited. Prefer stack re-creation over stack update at this time. +Currently, support for `UPDATE` operations on resources is limited. +Prefer stack re-creation over stack update at this time. {{< /callout >}} ### Intrinsic Functions @@ -274,7 +294,6 @@ When utilizing the Community image, any resources within the stack that are not | AWS::Timestream::Database | ✅ | ✅ | - | | AWS::Timestream::Table | ✅ | ✅ | - | - #### Pro image | Resource | Create | Delete | Update | diff --git a/content/en/user-guide/aws/cloudfront/index.md b/content/en/user-guide/aws/cloudfront/index.md index 6c60a218fd..641a9dfb6c 100644 --- a/content/en/user-guide/aws/cloudfront/index.md +++ b/content/en/user-guide/aws/cloudfront/index.md @@ -9,15 +9,20 @@ persistence: supported ## Introduction -CloudFront is a content delivery network (CDN) service provided by Amazon Web Services (AWS). CloudFront distributes its web content, videos, applications, and APIs with low latency and high data transfer speeds. CloudFront APIs allow you to configure distributions, customize cache behavior, secure content with access controls, and monitor the CDN's performance through real-time metrics. +CloudFront is a content delivery network (CDN) service provided by Amazon Web Services (AWS). +CloudFront distributes its web content, videos, applications, and APIs with low latency and high data transfer speeds. +CloudFront APIs allow you to configure distributions, customize cache behavior, secure content with access controls, and monitor the CDN's performance through real-time metrics. -LocalStack allows you to use the CloudFront APIs in your local environment to create local CloudFront distributions to transparently access your applications and file artifacts. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_cloudfront/), which provides information on the extent of CloudFront's integration with LocalStack. +LocalStack allows you to use the CloudFront APIs in your local environment to create local CloudFront distributions to transparently access your applications and file artifacts. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_cloudfront/), which provides information on the extent of CloudFront's integration with LocalStack. ## Getting started -This guide is intended for users who wish to get more acquainted with CloudFront over LocalStack. It assumes you have basic knowledge of the AWS CLI (and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script). +This guide is intended for users who wish to get more acquainted with CloudFront over LocalStack. +It assumes you have basic knowledge of the AWS CLI (and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script). -Start your LocalStack container using your preferred method. We will demonstrate how you can create an S3 bucket, put a text file named `hello.txt` to the bucket, and then create a CloudFront distribution which makes the file accessible via a `https://abc123.cloudfront.net/hello.txt` proxy URL (where `abc123` is a placeholder for the real distribution ID). +Start your LocalStack container using your preferred method. +We will demonstrate how you can create an S3 bucket, put a text file named `hello.txt` to the bucket, and then create a CloudFront distribution which makes the file accessible via a `https://abc123.cloudfront.net/hello.txt` proxy URL (where `abc123` is a placeholder for the real distribution ID). To get started, create an S3 bucket using the `mb` command: @@ -32,7 +37,8 @@ $ echo 'Hello World' > /tmp/hello.txt $ awslocal s3 cp /tmp/hello.txt s3://abc123/hello.txt --acl public-read {{< / command >}} -After uploading the file to S3, you can create a CloudFront distribution using the [`CreateDistribution`](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CreateDistribution.html) API call. Run the following command to create a distribution with the default settings: +After uploading the file to S3, you can create a CloudFront distribution using the [`CreateDistribution`](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CreateDistribution.html) API call. +Run the following command to create a distribution with the default settings: {{< command >}} $ domain=$(awslocal cloudfront create-distribution \ @@ -45,15 +51,22 @@ If you wish to use CloudFront on system host, ensure your local DNS setup is cor Refer to the section on [System DNS configuration]({{< ref "dns-server#system-dns-configuration" >}}) for details. {{< /callout >}} -In the example provided above, be aware that the final command (`curl https://$domain/hello.txt`) might encounter a temporary failure accompanied by a warning message `Could not resolve host`. This can occur because different operating systems adopt diverse DNS caching strategies, causing a delay in the availability of the CloudFront distribution's DNS name (e.g., `abc123.cloudfront.net`) within the system. Typically, after a few retries, the command should succeed. It's worth noting that similar behavior can be observed in the actual AWS environment, where CloudFront DNS names may take up to 10-15 minutes to propagate across the network. +In the example provided above, be aware that the final command (`curl https://$domain/hello.txt`) might encounter a temporary failure accompanied by a warning message `Could not resolve host`. +This can occur because different operating systems adopt diverse DNS caching strategies, causing a delay in the availability of the CloudFront distribution's DNS name (e.g., `abc123.cloudfront.net`) within the system. +Typically, after a few retries, the command should succeed. +It's worth noting that similar behavior can be observed in the actual AWS environment, where CloudFront DNS names may take up to 10-15 minutes to propagate across the network. ## Using custom URLs LocalStack Pro supports using an alternate domain name, also referred to as a `CNAME` or custom domain name, to access your applications and file artifacts instead of relying on the domain name generated by CloudFront for your distribution. -To set up the custom domain name, you must configure it in your local DNS server. Once that is done, you can designate the desired domain name as an alias for the target distribution. To achieve this, you'll need to provide the `Aliases` field in the `--distribution-config` option when creating or updating a distribution. The format of this structure is similar to the one used in [AWS CloudFront options](https://docs.aws.amazon.com/cli/latest/reference/cloudfront/create-distribution.html#options). +To set up the custom domain name, you must configure it in your local DNS server. +Once that is done, you can designate the desired domain name as an alias for the target distribution. +To achieve this, you'll need to provide the `Aliases` field in the `--distribution-config` option when creating or updating a distribution. +The format of this structure is similar to the one used in [AWS CloudFront options](https://docs.aws.amazon.com/cli/latest/reference/cloudfront/create-distribution.html#options). -In the given example, two domains are specified as `Aliases` for a distribution. Please note that a complete configuration would entail additional values relevant to the distribution, which have been omitted here for brevity. +In the given example, two domains are specified as `Aliases` for a distribution. +Please note that a complete configuration would entail additional values relevant to the distribution, which have been omitted here for brevity. {{< command >}} --distribution-config {...'Aliases':'{'Quantity':2, 'Items': ['custom.domain.one', 'customDomain.two']}'...} @@ -61,7 +74,8 @@ In the given example, two domains are specified as `Aliases` for a distribution. ## Resource Browser -The LocalStack Web Application provides a Resource Browser for CloudFront, which allows you to view and manage your CloudFront distributions. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **CloudFront** under the **Analytics** section. +The LocalStack Web Application provides a Resource Browser for CloudFront, which allows you to view and manage your CloudFront distributions. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **CloudFront** under the **Analytics** section. CloudFront Resource Browser
diff --git a/content/en/user-guide/aws/cloudtrail/index.md b/content/en/user-guide/aws/cloudtrail/index.md index 4d221d927d..cfd9f0a754 100644 --- a/content/en/user-guide/aws/cloudtrail/index.md +++ b/content/en/user-guide/aws/cloudtrail/index.md @@ -9,19 +9,23 @@ persistence: supported ## Introduction -CloudTrail is a service provided by Amazon Web Services (AWS) that enables you to track and monitor all activities and events within your AWS environment. It records API calls and actions made on your AWS resources, offering an audit trail that helps you understand changes, diagnose issues, and maintain compliance. +CloudTrail is a service provided by Amazon Web Services (AWS) that enables you to track and monitor all activities and events within your AWS environment. +It records API calls and actions made on your AWS resources, offering an audit trail that helps you understand changes, diagnose issues, and maintain compliance. -LocalStack allows you to use the CloudTrail APIs in your local environment to create and manage Event history and trails. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_cloudtrail/), which provides information on the extent of CloudTrail's integration with LocalStack. +LocalStack allows you to use the CloudTrail APIs in your local environment to create and manage Event history and trails. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_cloudtrail/), which provides information on the extent of CloudTrail's integration with LocalStack. ## Getting started This guide is designed for users new to CloudTrail and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to enable S3 object logging to CloudTrail using AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to enable S3 object logging to CloudTrail using AWS CLI. ### Create a bucket -Before you create a trail, you need to create an S3 bucket where CloudTrail can deliver the log data. You can use the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) command to create a bucket: +Before you create a trail, you need to create an S3 bucket where CloudTrail can deliver the log data. +You can use the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) command to create a bucket: {{< command >}} $ awslocal s3 mb s3://my-bucket @@ -29,7 +33,9 @@ $ awslocal s3 mb s3://my-bucket ### Create a trail -You can create a trail which would allow the delivery of events to the S3 bucket we created earlier. You can use the [`CreateTrail`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_CreateTrail.html) API to create a trail. Run the following command to create a trail: +You can create a trail which would allow the delivery of events to the S3 bucket we created earlier. +You can use the [`CreateTrail`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_CreateTrail.html) API to create a trail. +Run the following command to create a trail: {{< command >}} $ awslocal cloudtrail create-trail \ @@ -39,13 +45,18 @@ $ awslocal cloudtrail create-trail \ ### Enable logging and configure event selectors -You can now enable logging for your trail. You can use the [`StartLogging`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_StartLogging.html) API to enable logging for your trail. Run the following command to enable logging: +You can now enable logging for your trail. +You can use the [`StartLogging`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_StartLogging.html) API to enable logging for your trail. +Run the following command to enable logging: {{< command >}} $ awslocal cloudtrail start-logging --name MyTrail {{< /command >}} -You can further configure event selectors for the trail. In this example, we will configure the trail to log all S3 object level events. You can use the [`PutEventSelectors`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_PutEventSelectors.html) API to configure event selectors for your trail. Run the following command to configure event selectors: +You can further configure event selectors for the trail. +In this example, we will configure the trail to log all S3 object level events. +You can use the [`PutEventSelectors`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_PutEventSelectors.html) API to configure event selectors for your trail. +Run the following command to configure event selectors: {{< command >}} $ awslocal cloudtrail put-event-selectors \ @@ -53,7 +64,8 @@ $ awslocal cloudtrail put-event-selectors \ --event-selectors '[{"ReadWriteType": "All", "IncludeManagementEvents":true, "DataResources": [{"Type": "AWS::S3::Object", "Values": ["arn:aws:s3:::my-bucket/"]}]}]' {{< /command >}} -You can verify if your configuration is correct by using the [`GetEventSelectors`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_GetEventSelectors.html) API. Run the following command to verify your configuration: +You can verify if your configuration is correct by using the [`GetEventSelectors`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_GetEventSelectors.html) API. +Run the following command to verify your configuration: {{< command >}} $ awslocal cloudtrail get-event-selectors \ @@ -84,7 +96,8 @@ The following output would be retrieved: ### Test the configuration -You can now test the configuration by creating an object in the S3 bucket. You can use the [`cp`](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html) command to copy an object in the S3 bucket: +You can now test the configuration by creating an object in the S3 bucket. +You can use the [`cp`](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html) command to copy an object in the S3 bucket: {{< command >}} $ echo "hello world" > /tmp/hello-world @@ -92,7 +105,9 @@ $ awslocal s3 cp /tmp/hello-world s3://my-bucket/hello-world $ awslocal s3 ls s3://my-bucket {{< /command >}} -You can verify that the object was created in the S3 bucket. You can also verify that the object level event was logged by CloudTrail using the [`LookupEvents`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_LookupEvents.html) API. Run the following command to verify the event: +You can verify that the object was created in the S3 bucket. +You can also verify that the object level event was logged by CloudTrail using the [`LookupEvents`](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_LookupEvents.html) API. +Run the following command to verify the event: {{< command >}} $ awslocal cloudtrail lookup-events \ @@ -104,19 +119,20 @@ The following output would be retrieved: ```json { - "Events": [{ - "EventId": "218785bf-3ec4-4bdd-a055-57eca773294f", - "EventName": "PutObject", - "ReadOnly": "false", + "Events": [{ + "EventId": "218785bf-3ec4-4bdd-a055-57eca773294f", + "EventName": "PutObject", + "ReadOnly": "false", ... - "CloudTrailEvent": "{\"eventVersion\": \"1.08\", ... {\"bucketName\": \"my-bucket\", \"key\": \"hello-world\"} ...}" - }] + "CloudTrailEvent": "{\"eventVersion\": \"1.08\", ... {\"bucketName\": \"my-bucket\", \"key\": \"hello-world\"} ...}" + }] } ``` ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing CloudTrail's Event History & Trails. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **CloudTrail** under the **Management/Governance** section. +The LocalStack Web Application provides a Resource Browser for managing CloudTrail's Event History & Trails. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **CloudTrail** under the **Management/Governance** section. CloudTrail Resource Browser
diff --git a/content/en/user-guide/aws/cloudwatch/index.md b/content/en/user-guide/aws/cloudwatch/index.md index 6f484a4e1f..c869728c0f 100644 --- a/content/en/user-guide/aws/cloudwatch/index.md +++ b/content/en/user-guide/aws/cloudwatch/index.md @@ -8,28 +8,35 @@ persistence: supported --- -CloudWatch is a comprehensive monitoring and observability service that Amazon Web Services (AWS) provides. It allows you to collect and track metrics, collect and monitor log files, and set alarms. CloudWatch provides valuable insights into your AWS resources, applications, and services, enabling you to troubleshoot issues, optimize performance, and make informed decisions. +CloudWatch is a comprehensive monitoring and observability service that Amazon Web Services (AWS) provides. +It allows you to collect and track metrics, collect and monitor log files, and set alarms. +CloudWatch provides valuable insights into your AWS resources, applications, and services, enabling you to troubleshoot issues, optimize performance, and make informed decisions. -LocalStack allows you to use CloudWatch APIs on your local machine to create and manage CloudWatch resources, such as custom metrics, alarms, and log groups, for local development and testing purposes. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_cloudwatch/), which provides information on the extent of CloudWatch's integration with LocalStack. +LocalStack allows you to use CloudWatch APIs on your local machine to create and manage CloudWatch resources, such as custom metrics, alarms, and log groups, for local development and testing purposes. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_cloudwatch/), which provides information on the extent of CloudWatch's integration with LocalStack. {{< callout >}} -We have introduced an all-new LocalStack-native [CloudWatch provider](https://docs.localstack.cloud/user-guide/aws/cloudwatch/) is available behind a feature flag. You can activate it by configuring `PROVIDER_OVERRIDE_CLOUDWATCH=v2` in your LocalStack configuration. +We have introduced an all-new LocalStack-native [CloudWatch provider](https://docs.localstack.cloud/user-guide/aws/cloudwatch/) is available behind a feature flag. +You can activate it by configuring `PROVIDER_OVERRIDE_CLOUDWATCH=v2` in your LocalStack configuration. -We have migrated from storing data in Python objects within the Moto backend to a more robust system. Now, metrics are efficiently stored in SQLite, and alarm resources are managed using LocalStack stores. +We have migrated from storing data in Python objects within the Moto backend to a more robust system. +Now, metrics are efficiently stored in SQLite, and alarm resources are managed using LocalStack stores. -- Various enhancements have been made to attain greater feature parity with AWS. -- The provider is engineered to ensure thread safety, facilitating smooth concurrent operations. -- There’s a significant improvement in the integrity and durability of data. -- The new provider allows for more efficient data retrieval. +- Various enhancements have been made to attain greater feature parity with AWS. +- The provider is engineered to ensure thread safety, facilitating smooth concurrent operations. +- There’s a significant improvement in the integrity and durability of data. +- The new provider allows for more efficient data retrieval. {{< /callout >}} ## Getting started This guide is designed for users new to CloudWatch and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method and deploy your Lambda functions that will generate some logs. You can get the name for your Lambda Functions using the [`ListFunctions`](https://docs.aws.amazon.com/lambda/latest/dg/API_ListFunctions.html) API. +Start your LocalStack container using your preferred method and deploy your Lambda functions that will generate some logs. +You can get the name for your Lambda Functions using the [`ListFunctions`](https://docs.aws.amazon.com/lambda/latest/dg/API_ListFunctions.html) API. -Fetch the Log Groups using the [`DescribeLogGroups`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogGroups.html) API. Run the following command to get the Log Group name: +Fetch the Log Groups using the [`DescribeLogGroups`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogGroups.html) API. +Run the following command to get the Log Group name: {{< command >}} $ awslocal logs describe-log-groups @@ -58,7 +65,8 @@ The output should look similar to the following: } ``` -Get the log streams for the Log Group using the [`DescribeLogStreams`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogStreams.html) API. Run the following command to get the Log Stream name: +Get the log streams for the Log Group using the [`DescribeLogStreams`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogStreams.html) API. +Run the following command to get the Log Stream name: {{< command >}} $ awslocal logs describe-log-streams \ @@ -84,7 +92,8 @@ The output should look similar to the following: } ``` -You can now fetch the log events using the [`GetLogEvents`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html) API. Run the following command to get the logs: +You can now fetch the log events using the [`GetLogEvents`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html) API. +Run the following command to get the logs: {{< command >}} $ awslocal logs get-log-events \ @@ -123,13 +132,16 @@ You can use [filters](https://docs.aws.amazon.com/cli/latest/reference/logs/filt ## Metric Alarms -Alarms in CloudWatch are crucial in monitoring specific data thresholds and automating actions based on those thresholds. To learn more about how alarms are evaluated in general, please refer to the [AWS documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation). +Alarms in CloudWatch are crucial in monitoring specific data thresholds and automating actions based on those thresholds. +To learn more about how alarms are evaluated in general, please refer to the [AWS documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation). -In LocalStack, you can use metric-alarm evaluation, explicitly utilizing the statistic and comparison-operator functionalities. These features enable you to define and evaluate alarms based on various statistical calculations and comparison operators. +In LocalStack, you can use metric-alarm evaluation, explicitly utilizing the statistic and comparison-operator functionalities. +These features enable you to define and evaluate alarms based on various statistical calculations and comparison operators. ### Metric Alarm Examples -Metric alarms in CloudWatch allow you to evaluate the state of a metric by analyzing its data points over a specified period. With metric alarms, you can create customized thresholds and define actions based on the metric's behavior. +Metric alarms in CloudWatch allow you to evaluate the state of a metric by analyzing its data points over a specified period. +With metric alarms, you can create customized thresholds and define actions based on the metric's behavior. To get started with creating an alarm in LocalStack using the `awslocal` integration, use the following command: @@ -160,13 +172,17 @@ $ awslocal cloudwatch put-metric-data \ --metric-data '[{"MetricName": "Orders", "Value": -1}]' {{< / command >}} -Within a few seconds, the alarm state should change to **ALARM**, and eventually, it will go back to **OK** as we configured it to treat missing data points as `not breaching`. This allows you to observe how the alarm behaves in response to the provided data. +Within a few seconds, the alarm state should change to **ALARM**, and eventually, it will go back to **OK** as we configured it to treat missing data points as `not breaching`. +This allows you to observe how the alarm behaves in response to the provided data. #### Metric Alarm with Action -When the state of an alarm changes, actions can be triggered accordingly. In LocalStack, you can configure `alarm-actions`, `ok-actions`, and `insufficient-data-actions` to specify the actions to be taken. Currently, only SNS Topics are supported as the target for these actions, and it's important to note that the topic must be created beforehand. +When the state of an alarm changes, actions can be triggered accordingly. +In LocalStack, you can configure `alarm-actions`, `ok-actions`, and `insufficient-data-actions` to specify the actions to be taken. +Currently, only SNS Topics are supported as the target for these actions, and it's important to note that the topic must be created beforehand. -Here's an example demonstrating how to set up an alarm that sends a message to the specified topic when entering the **ALARM** state. Make sure to replace `` with the valid ARN of an existing SNS topic. +Here's an example demonstrating how to set up an alarm that sends a message to the specified topic when entering the **ALARM** state. +Make sure to replace `` with the valid ARN of an existing SNS topic. {{< command >}} $ awslocal cloudwatch put-metric-alarm \ @@ -179,10 +195,11 @@ $ awslocal cloudwatch put-metric-alarm \ --period 300 \ --statistic Maximum \ --treat-missing notBreaching \ - --alarm-actions + --alarm-actions {{< / command >}} -By executing this command, you'll create an alarm named `my-alarm` that monitors the `Orders` metric in the `test` namespace. If the metric value exceeds the threshold of 50 (using the `GreaterThanThreshold` operator) during a single evaluation period of 300 seconds, the alarm will trigger the specified action on the provided SNS topic. +By executing this command, you'll create an alarm named `my-alarm` that monitors the `Orders` metric in the `test` namespace. +If the metric value exceeds the threshold of 50 (using the `GreaterThanThreshold` operator) during a single evaluation period of 300 seconds, the alarm will trigger the specified action on the provided SNS topic. {{< callout "warning" >}} Please be aware of the following known limitations in LocalStack: @@ -196,12 +213,13 @@ Please be aware of the following known limitations in LocalStack: LocalStack supports the following AWS services for integration with CloudWatch metrics: -- **SQS**: Supports `Approximate*` metrics, `NumberOfMessagesSent`, and other metrics triggered by events such as message received or sending. -- **Lambda**: Supports `Invocations` and `Errors` metrics. +- **SQS**: Supports `Approximate*` metrics, `NumberOfMessagesSent`, and other metrics triggered by events such as message received or sending. +- **Lambda**: Supports `Invocations` and `Errors` metrics. ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing CloudWatch logs. You can access the Resource Browser by opening the LocalStack Web Application in your browser and navigating to the Resources section, then clicking on [**CloudWatch Logs**](https://app.localstack.cloud/resources/cloudwatch/groups) and [**CloudWatch Metrics**](https://app.localstack.cloud/resources/monitoring) under the **Management/Governance** section. +The LocalStack Web Application provides a Resource Browser for managing CloudWatch logs. +You can access the Resource Browser by opening the LocalStack Web Application in your browser and navigating to the Resources section, then clicking on [**CloudWatch Logs**](https://app.localstack.cloud/resources/cloudwatch/groups) and [**CloudWatch Metrics**](https://app.localstack.cloud/resources/monitoring) under the **Management/Governance** section. The Resource Browser allows you to perform the following actions: @@ -209,14 +227,14 @@ The Resource Browser allows you to perform the following actions: CloudWatch Metrics Resource Browser -* **Create Log Group**: Create a new log group by specifying the `Log Group Name`, `KMS Key ID`, and `Tags`. -* **Put metric**: Create a new metric by specifying the `Namespace` and `Metric Data`. -* **Put Alarm**: Create a new alarm by specifying the `Alarm Name`, `Alarm Description`, `Actions Enabled`, `Metric Name`, `Namespace`, `Statistic`, `Comparison Operator`, `Threshold`, `Evaluation Periods`, `Period`, `Unit`, `Treat Missing Data`, `Tags`, and `Alarm Actions`. -* **Check the Resources**: View and manage existing log groups, metrics, and alarms and perform actions such as `Delete`, `View`, and `Edit`. +- **Create Log Group**: Create a new log group by specifying the `Log Group Name`, `KMS Key ID`, and `Tags`. +- **Put metric**: Create a new metric by specifying the `Namespace` and `Metric Data`. +- **Put Alarm**: Create a new alarm by specifying the `Alarm Name`, `Alarm Description`, `Actions Enabled`, `Metric Name`, `Namespace`, `Statistic`, `Comparison Operator`, `Threshold`, `Evaluation Periods`, `Period`, `Unit`, `Treat Missing Data`, `Tags`, and `Alarm Actions`. +- **Check the Resources**: View and manage existing log groups, metrics, and alarms and perform actions such as `Delete`, `View`, and `Edit`. ## Examples The following code snippets and sample applications provide practical examples of how to use CloudWatch in LocalStack for various use cases: -* [Creating Cloudwatch metric alarms](https://github.com/localstack/localstack-pro-samples/tree/master/cloudwatch-metrics-aws) to demonstrate a simple example for creating CloudWatch metric alarm based on the metrics of a failing Lambda function. -* [Event-driven architecture with Amazon SNS FIFO, DynamoDB, Lambda, and S3](https://github.com/localstack/event-driven-architecture-with-amazon-sns-fifo) to deploy a recruiting agency application with a job listings website and view the CloudWatch logs. +- [Creating Cloudwatch metric alarms](https://github.com/localstack/localstack-pro-samples/tree/master/cloudwatch-metrics-aws) to demonstrate a simple example for creating CloudWatch metric alarm based on the metrics of a failing Lambda function. +- [Event-driven architecture with Amazon SNS FIFO, DynamoDB, Lambda, and S3](https://github.com/localstack/event-driven-architecture-with-amazon-sns-fifo) to deploy a recruiting agency application with a job listings website and view the CloudWatch logs. diff --git a/content/en/user-guide/aws/codecommit/index.md b/content/en/user-guide/aws/codecommit/index.md index f1ed7518a0..87eae7e087 100644 --- a/content/en/user-guide/aws/codecommit/index.md +++ b/content/en/user-guide/aws/codecommit/index.md @@ -95,7 +95,8 @@ To git://localhost:4510/localstack-repo ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing CodeCommit repositories. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **CodeCommit** under the **Developer Tools** section. +The LocalStack Web Application provides a Resource Browser for managing CodeCommit repositories. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **CodeCommit** under the **Developer Tools** section. CodeCommit Resource Browser diff --git a/content/en/user-guide/aws/cognito/index.md b/content/en/user-guide/aws/cognito/index.md index 40405f9633..383c95405d 100644 --- a/content/en/user-guide/aws/cognito/index.md +++ b/content/en/user-guide/aws/cognito/index.md @@ -9,19 +9,24 @@ persistence: supported ## Introduction -Cognito is a managed identity service provided by AWS that is used for securing user authentication, authorization, and managing user identities in web and mobile applications. Cognito enables developers to add user sign-up, sign-in, and access control functionalities to their applications. Cognito supports various authentication methods, including social identity providers, SAML-based identity providers, and custom authentication flows. +Cognito is a managed identity service provided by AWS that is used for securing user authentication, authorization, and managing user identities in web and mobile applications. +Cognito enables developers to add user sign-up, sign-in, and access control functionalities to their applications. +Cognito supports various authentication methods, including social identity providers, SAML-based identity providers, and custom authentication flows. -LocalStack allows you to use the Cognito APIs in your local environment to manage authentication and access control for your local application and resources. The supported APIs are available on our [Cognito Identity coverage page](https://docs.localstack.cloud/references/coverage/coverage_cognito-identity/) and [Cognito User Pools coverage page](https://docs.localstack.cloud/references/coverage/coverage_cognito-idp/), which provides information on the extent of Cognito's integration with LocalStack. +LocalStack allows you to use the Cognito APIs in your local environment to manage authentication and access control for your local application and resources. +The supported APIs are available on our [Cognito Identity coverage page](https://docs.localstack.cloud/references/coverage/coverage_cognito-identity/) and [Cognito User Pools coverage page](https://docs.localstack.cloud/references/coverage/coverage_cognito-idp/), which provides information on the extent of Cognito's integration with LocalStack. ## Getting started This guide is designed for users new to Cognito and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a Cognito user pool and client, and then sign up and authenticate a new user in the pool. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a Cognito user pool and client, and then sign up and authenticate a new user in the pool. ### Creating a User Pool -To create a user pool, you can use the [`CreateUserPool`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) API call. The following command creates a user pool named `test`: +To create a user pool, you can use the [`CreateUserPool`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) API call. +The following command creates a user pool named `test`: {{< command >}} $ awslocal cognito-idp create-user-pool --pool-name test @@ -59,7 +64,8 @@ You can see an output similar to the following: } ``` -You will need the user pool's `id` for further operations. Save it in a `pool_id` variable: +You will need the user pool's `id` for further operations. +Save it in a `pool_id` variable: {{< command >}} $ pool_id= @@ -73,7 +79,10 @@ $ pool_id=$(awslocal cognito-idp create-user-pool --pool-name test | jq -rc ".Us ### Adding a Client -You can proceed with adding a client to the pool we just created. You will require the ID of the newly created client for the subsequent steps. You can use the [`CreateUserPoolClient`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPoolClient.html) for both client creation and extraction of the corresponding ID. Run the following command: +You can proceed with adding a client to the pool we just created. +You will require the ID of the newly created client for the subsequent steps. +You can use the [`CreateUserPoolClient`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPoolClient.html) for both client creation and extraction of the corresponding ID. +Run the following command: {{< command >}} $ client_id=$(awslocal cognito-idp create-user-pool-client --user-pool-id $pool_id --client-name test-client | jq -rc ".UserPoolClient.ClientId") @@ -81,7 +90,8 @@ $ client_id=$(awslocal cognito-idp create-user-pool-client --user-pool-id $pool_ ### Using Predefined IDs for Pool Creation -When creating Cognito user or identity pools, you have the flexibility to utilize a predefined ID by setting the tag `_custom_id_`. This feature proves particularly useful during the testing of authentication flows, especially when dealing with scenarios involving frequent restarts of LocalStack and the recreation of resources. +When creating Cognito user or identity pools, you have the flexibility to utilize a predefined ID by setting the tag `_custom_id_`. +This feature proves particularly useful during the testing of authentication flows, especially when dealing with scenarios involving frequent restarts of LocalStack and the recreation of resources. Please note that a valid custom id must be in the format `_`. Run the following command to create a user pool with a predefined ID: @@ -107,10 +117,10 @@ $ awslocal cognito-idp create-user-pool-client --user-pool-id us-east-1_myid123 ... {{< /command >}} - ### Signing up and confirming a user -You can now use the [`SignUp`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SignUp.html) API to sign up a user. Run the following command: +You can now use the [`SignUp`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SignUp.html) API to sign up a user. +Run the following command: {{< command >}} $ awslocal cognito-idp sign-up \ @@ -129,14 +139,17 @@ You can see an output similar to the following: } ``` -Once the user is successfully created, a confirmation code will be generated. This code can be found in the LocalStack container logs (as shown below). Additionally, if you have [SMTP configured](#smtp-integration), the confirmation code can be optionally sent via email for enhanced convenience and user experience. +Once the user is successfully created, a confirmation code will be generated. +This code can be found in the LocalStack container logs (as shown below). +Additionally, if you have [SMTP configured]({{< ref "configuration#emails" >}}), the confirmation code can be optionally sent via email for enhanced convenience and user experience. ```bash INFO:localstack_ext.services.cognito.cognito_idp_api: Confirmation code for Cognito user example_user: 125796 DEBUG:localstack_ext.bootstrap.email_utils: Sending confirmation code via email to "your.email@address.com" ``` -You can confirm the user with the activation code, using the [`ConfirmSignUp`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_ConfirmSignUp.html) API. Execute the following command: +You can confirm the user with the activation code, using the [`ConfirmSignUp`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_ConfirmSignUp.html) API. +Execute the following command: {{< command >}} $ awslocal cognito-idp confirm-sign-up \ @@ -145,7 +158,8 @@ $ awslocal cognito-idp confirm-sign-up \ --confirmation-code {{< /command >}} -Since the above command does not provide a direct response, we need to verify the success of the request by checking the pool. Run the following command to use the [`ListUsers`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_ListUsers.html) API to list the users in the pool: +Since the above command does not provide a direct response, we need to verify the success of the request by checking the pool. +Run the following command to use the [`ListUsers`](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_ListUsers.html) API to list the users in the pool: {{< command "hl_lines=21" >}} $ awslocal cognito-idp list-users --user-pool-id $pool_id @@ -176,7 +190,8 @@ $ awslocal cognito-idp list-users --user-pool-id $pool_id ## JWT Token Issuer and JSON Web Key Sets (JWKS) endpoints -When Cognito creates JWT tokens, they include an issuer (`iss`) attribute that specifies the endpoint of the corresponding user pool. Generally, the issuer endpoint follows this format, with `` being the ID of the Cognito user pool: +When Cognito creates JWT tokens, they include an issuer (`iss`) attribute that specifies the endpoint of the corresponding user pool. +Generally, the issuer endpoint follows this format, with `` being the ID of the Cognito user pool: ```bash http://localhost:4566/ @@ -206,7 +221,9 @@ $ curl http://localhost:4566/.well-known/jwks_uri Cognito offers a variety of lifecycle hooks called Cognito Lambda triggers, which allow you to react to different lifecycle events and customize the behavior of user signup, confirmation, migration, and more. -To illustrate, suppose you wish to define a _user migration_ Lambda trigger. In this case, you can start by creating a Lambda function, let's say named `"f1"`, responsible for performing the migration. Subsequently, you can define the corresponding `--lambda-config` when creating the user pool to link it with the Lambda function: +To illustrate, suppose you wish to define a _user migration_ Lambda trigger. +In this case, you can start by creating a Lambda function, let's say named `"f1"`, responsible for performing the migration. +Subsequently, you can define the corresponding `--lambda-config` when creating the user pool to link it with the Lambda function: {{< command >}} $ awslocal cognito-idp create-user-pool \ @@ -223,17 +240,20 @@ You can access the local [Cognito login form](https://docs.aws.amazon.com/cognit ```bash https://localhost.localstack.cloud/_aws/cognito-idp/login?response_type=code&client_id=&redirect_uri= ``` + Replace `` with the ID of your existing user pool client (for example, `example_user`), and `` with the redirect URI specific to your application (e.g., `http://example.com`). The login form should look similar to the screenshot below: {{< figure src="cognitoLogin.png" width="320" >}} -Upon successful login, the page will automatically redirect to the designated ``, with an appended path parameter `?code=`. For instance, the redirect URL might look like `http://example.com?code=test123`. +Upon successful login, the page will automatically redirect to the designated ``, with an appended path parameter `?code=`. +For instance, the redirect URL might look like `http://example.com?code=test123`. To obtain a token, you need to submit the received code using `grant_type=authorization_code` to LocalStack's implementation of the Cognito OAuth2 TOKEN Endpoint, which is documented [on the AWS Cognito Token endpoint page](https://docs.aws.amazon.com/cognito/latest/developerguide/token-endpoint.html). -Note that the value of the `redirect_uri` parameter in your token request must match the value provided during the login process. Ensuring this match is crucial for the proper functioning of the authentication flow. +Note that the value of the `redirect_uri` parameter in your token request must match the value provided during the login process. +Ensuring this match is crucial for the proper functioning of the authentication flow. ```sh % curl \ @@ -280,11 +300,14 @@ resources: ... ``` -After configuring the Serverless setup, you can deploy it using `serverless deploy --stage local`. The provided example includes a Lambda function called `http_request` that's linked to an API Gateway endpoint. +After configuring the Serverless setup, you can deploy it using `serverless deploy --stage local`. +The provided example includes a Lambda function called `http_request` that's linked to an API Gateway endpoint. -Once deployed, the `v1/request` API Gateway endpoint will be protected by the Cognito user pool named "`ExampleUserPool`". As a result, you can register users against the local pool using the same API calls as you would with AWS. +Once deployed, the `v1/request` API Gateway endpoint will be protected by the Cognito user pool named "`ExampleUserPool`". +As a result, you can register users against the local pool using the same API calls as you would with AWS. -To send requests to the secured API Gateway endpoint, you need to fetch identity credentials from the local Cognito API. These credentials can then be included as `Authentication` HTTP headers (where `test-1234567` represents the name of the access key ID generated by Cognito): +To send requests to the secured API Gateway endpoint, you need to fetch identity credentials from the local Cognito API. +These credentials can then be included as `Authentication` HTTP headers (where `test-1234567` represents the name of the access key ID generated by Cognito): ```bash Authentication: AWS4-HMAC-SHA256 Credential=test-1234567/20190821/us-east-1/cognito-idp/aws4_request ... @@ -292,7 +315,8 @@ Authentication: AWS4-HMAC-SHA256 Credential=test-1234567/20190821/us-east-1/cogn ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing Cognito User Pools, and more. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Cognito** under the **Security Identity Compliance** section. +The LocalStack Web Application provides a Resource Browser for managing Cognito User Pools, and more. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Cognito** under the **Security Identity Compliance** section. Cognito Resource Browser @@ -315,4 +339,6 @@ The following code snippets and sample applications provide practical examples o ## Current Limitations -By default, LocalStack's Cognito does not send actual email messages. However, if you wish to enable this feature, you will need to provide an email address and configure the corresponding SMTP settings. The instructions on configuring the connection parameters of your SMTP server can be found in the [Configuration]({{< ref "configuration#emails" >}}) guide to allow your local Cognito environment to send email notifications. +By default, LocalStack's Cognito does not send actual email messages. +However, if you wish to enable this feature, you will need to provide an email address and configure the corresponding SMTP settings. +The instructions on configuring the connection parameters of your SMTP server can be found in the [Configuration]({{< ref "configuration#emails" >}}) guide to allow your local Cognito environment to send email notifications. diff --git a/content/en/user-guide/aws/dms/index.md b/content/en/user-guide/aws/dms/index.md index 62c56d28ba..4444b42fec 100644 --- a/content/en/user-guide/aws/dms/index.md +++ b/content/en/user-guide/aws/dms/index.md @@ -7,15 +7,15 @@ tags: ["Enterprise plan"] ## Introduction -AWS Database Migration Service provides migration solution from databases, data warehouses, and other type of data stores (e.g. S3, SAP). +AWS Database Migration Service provides migration solution from databases, data warehouses, and other type of data stores (e.g. S3, SAP). The migration can be homogeneous (source and target have the same type), but often times is heterogeneous as it supports migration from various sources to various targets (self-hosted and AWS services). -LocalStack only supports selected use cases for DMS at the moment. +LocalStack only supports selected use cases for DMS at the moment. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_dms/), which provides information on the extent of DMS integration with LocalStack. {{< callout "note">}} -DMS is in a preview state, supporting only [selected use cases](#supported-use-cases). -It is only available as part of the **LocalStack Enterprise** plan, and you need to set the env `ENABLE_DMS=1` in order to activate it. +DMS is in a preview state, supporting only [selected use cases](#supported-use-cases). +It is only available as part of the **LocalStack Enterprise** plan, and you need to set the env `ENABLE_DMS=1` in order to activate it. If you'd like to try it out, please [contact us](https://www.localstack.cloud/demo) to request access. {{< /callout >}} @@ -60,30 +60,30 @@ STARTING FULL LOAD FLOW ************ db endpoint: localhost:3306 - Cleaning tables - Creating tables - Inserting data + Cleaning tables + Creating tables + Inserting data - Added the following authors + Added the following authors [{'first_name': 'John', 'last_name': 'Doe'}] - Added the following accounts + Added the following accounts [{'account_balance': Decimal('1500.00'), 'name': 'Alice'}] - Added the following novels + Added the following novels [{'author_id': 1, 'title': 'The Great Adventure'}, {'author_id': 1, 'title': 'Journey to the Stars'}] ****Full Task 1**** - Starting Full load task 1 a% + Starting Full load task 1 a% Replication Task arn:aws:dms:us-east-1:000000000000:task:FQWFF7YIZ4VGQHBIXCLI9FJTUUS17NSECIM0UR7 status: starting Waiting for task status stopped task='arn:aws:dms:us-east-1:000000000000:task:FQWFF7YIZ4VGQHBIXCLI9FJTUUS17NSECIM0UR7' status='starting' task='arn:aws:dms:us-east-1:000000000000:task:FQWFF7YIZ4VGQHBIXCLI9FJTUUS17NSECIM0UR7' status='stopped' - Kinesis events + Kinesis events fetching Kinesis event Received: 6 events @@ -109,13 +109,12 @@ Received: 6 events ... ``` - ## Supported Use Cases DMS is in a preview state on LocalStack and only supports some selected use cases: | Source | Target | Migration Types | -| - | - | - | +| - | - | - | | MariaDB (external) | Kinesis | full-load, cdc | | MySQL (external) | Kinesis | full-load, cdc | | RDS MariaDB | Kinesis | full-load, cdc | @@ -125,9 +124,9 @@ DMS is in a preview state on LocalStack and only supports some selected use case The LocalStack Web Application provides a Resource Browser for managing: -- [Replication Instances](https://app.localstack.cloud/inst/default/resources/dms/replication-instances) -- [Endpoints](https://app.localstack.cloud/inst/default/resources/dms/endpoints) -- [Replication Tasks](https://app.localstack.cloud/inst/default/resources/dms/replication-tasks) +* [Replication Instances](https://app.localstack.cloud/inst/default/resources/dms/replication-instances) +* [Endpoints](https://app.localstack.cloud/inst/default/resources/dms/endpoints) +* [Replication Tasks](https://app.localstack.cloud/inst/default/resources/dms/replication-tasks) You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Database Migration Service** under the **Migration and transfer** section. @@ -138,25 +137,26 @@ The Resource Browser supports CRD (Create, Read, Delete) operations on DMS resou ### Replication Instances -- **Create Replication Instance**: To create a new replication instance, click the **Create Replication Instance** button and enter details such as the Replication Instance Identifier and Replication Instance class. -- **View Replication Instance**: To view details of a replication instance, click on its ARN. -- **Delete Replication Instance**: To delete a replication instance, select it, go to **Actions**, and choose **Remove Selected**. +* **Create Replication Instance**: To create a new replication instance, click the **Create Replication Instance** button and enter details such as the Replication Instance Identifier and Replication Instance class. +* **View Replication Instance**: To view details of a replication instance, click on its ARN. +* **Delete Replication Instance**: To delete a replication instance, select it, go to **Actions**, and choose **Remove Selected**. ### Endpoints -- **Create Endpoint**: To create a new endpoint, click on the **Create Endpoint** button and fill in necessary details such as the Endpoint Identifier, Endpoint Type, and Engine Name. -- **View Endpoint**: To see the details of an endpoint, click on its ARN. You can further click **Connections** and test a conenction by specifying the Replication Instance ARN. -- **Delete Endpoint**: To remove an endpoint, select it, navigate to **Actions**, and click **Remove Selected**. +* **Create Endpoint**: To create a new endpoint, click on the **Create Endpoint** button and fill in necessary details such as the Endpoint Identifier, Endpoint Type, and Engine Name. +* **View Endpoint**: To see the details of an endpoint, click on its ARN. + You can further click **Connections** and test a conenction by specifying the Replication Instance ARN. +* **Delete Endpoint**: To remove an endpoint, select it, navigate to **Actions**, and click **Remove Selected**. ### Replication Tasks -- **Create Replication Task**: To create a new replication task, press the **Create Replication Task** button and specify the Task Identifier, Source Endpoint Identifier, and Target Endpoint Identifier, among other settings. -- **View Replication Task**: To review a replication task, click on the task identifier. -- **Delete Replication Task**: To delete a replication task, choose the task, click on **Actions**, and select **Remove Selected**. +* **Create Replication Task**: To create a new replication task, press the **Create Replication Task** button and specify the Task Identifier, Source Endpoint Identifier, and Target Endpoint Identifier, among other settings. +* **View Replication Task**: To review a replication task, click on the task identifier. +* **Delete Replication Task**: To delete a replication task, choose the task, click on **Actions**, and select **Remove Selected**. ## Current Limitations -For RDS MariaDB and RDS MySQL it is not yet possible to set custom db-parameters. +For RDS MariaDB and RDS MySQL it is not yet possible to set custom db-parameters. In order to make those databases work with `cdc` migration for DMS, some default db-parameters are changed upon start if the `ENABLE_DMS=1` flag is set: ```sh @@ -167,29 +167,27 @@ server_id=1 log_bin=mysqld-bin ``` - ### Enum Values for CDC data events To support Enum values for CDC data events, you need to enable the database setting `BINLOG_ROW_METADATA=FULL` ### Migration Type -A replication task on LocalStack does currently only support `full-load` (migrate existing data) or `cdc` (replicate data changes only). +A replication task on LocalStack does currently only support `full-load` (migrate existing data) or `cdc` (replicate data changes only). On AWS there is also a combination for those, which is not yet implemented on LocalStack. ### ReplicationTaskSettings The `ReplicationTaskSettings` for a [replication task](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) only considers `BeforeImageSettings`, `FullLoadSettings.CommitRate` and `FullLoadSettings.TargetTablePrepMode` - ### Other Limitations -- [DMS Serverless](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Serverless.html) is not yet supported -- [Data Validation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Validating.html#CHAP_Validating.TaskStatistics) is not supported -- [Reload](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.ReloadTables.html) of tables is not supported -- [Task Logs](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Monitoring.html#CHAP_Monitoring.ManagingLogs), specifically CloudWatch, and CloudTrail are not supported (table statistics are supported) -- [Time Travel](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.TimeTravel.html) is not supported -- [Target Metadata Settings](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.TargetMetadata.html): `ParallelLoadThreads` is not supported -- [Transformation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.SelectionTransformation.Transformations.html): `"rule-type": "transformation"` is not supported -- [AWS DMS Schema Conversion Tool](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_SchemaConversion.html) is not supported -- [AWS DMS Fleet Advisor](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_FleetAdvisor.html) is not supported +* [DMS Serverless](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Serverless.html) is not yet supported +* [Data Validation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Validating.html#CHAP_Validating.TaskStatistics) is not supported +* [Reload](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.ReloadTables.html) of tables is not supported +* [Task Logs](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Monitoring.html#CHAP_Monitoring.ManagingLogs), specifically CloudWatch, and CloudTrail are not supported (table statistics are supported) +* [Time Travel](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.TimeTravel.html) is not supported +* [Target Metadata Settings](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.TargetMetadata.html): `ParallelLoadThreads` is not supported +* [Transformation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.SelectionTransformation.Transformations.html): `"rule-type": "transformation"` is not supported +* [AWS DMS Schema Conversion Tool](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_SchemaConversion.html) is not supported +* [AWS DMS Fleet Advisor](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_FleetAdvisor.html) is not supported diff --git a/content/en/user-guide/aws/docdb/index.md b/content/en/user-guide/aws/docdb/index.md index 3c3f1d763a..0f94a4ae74 100644 --- a/content/en/user-guide/aws/docdb/index.md +++ b/content/en/user-guide/aws/docdb/index.md @@ -8,9 +8,11 @@ description: > ## Introduction -DocumentDB is a fully managed, non-relational database service that supports MongoDB workloads. DocumentDB is compatible with MongoDB, meaning you can use the same MongoDB drivers, applications, and tools to run, manage, and scale workloads on DocumentDB without having to worry about managing the underlying infrastructure. +DocumentDB is a fully managed, non-relational database service that supports MongoDB workloads. +DocumentDB is compatible with MongoDB, meaning you can use the same MongoDB drivers, applications, and tools to run, manage, and scale workloads on DocumentDB without having to worry about managing the underlying infrastructure. -LocalStack allows you to use the DocumentDB APIs to create and manage DocumentDB clusters and instances. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_docdb/), which provides information on the extent of DocumentDB's integration with LocalStack. +LocalStack allows you to use the DocumentDB APIs to create and manage DocumentDB clusters and instances. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_docdb/), which provides information on the extent of DocumentDB's integration with LocalStack. ## Getting started @@ -49,13 +51,16 @@ If we break down the previous command, we can identify: - `docdb`: The command related to Amazon DocumentDB for the `AWS CLI`. - `create-db-cluster`: The command to create an Amazon DocumentDB cluster. - `--db-cluster-identifier test-docdb-cluster`: Specifies the unique identifier for the DocumentDB - cluster. In this case, it is set to `test-docdb-cluster`. You can customize this identifier to a + cluster. + In this case, it is set to `test-docdb-cluster`. + You can customize this identifier to a name of your choice. -- `--engine docdb`: Specifies the database engine. Here, it is set to `docdb`, indicating the use of +- `--engine docdb`: Specifies the database engine. + Here, it is set to `docdb`, indicating the use of Amazon DocumentDB. Notice in the `DBClusterMembers` field of the cluster description that there are no other databases -created. +created. As we did not specify a `MasterUsername` or `MasterUserPassword` for the creation of the database, the mongo-db will not set any credentials when starting the docker container. To create a new database, we can use the `create-db-instance` command, like in this example: @@ -63,6 +68,7 @@ To create a new database, we can use the `create-db-instance` command, like in t $ awslocal docdb create-db-instance --db-instance-identifier test-company \ --db-instance-class db.r5.large --engine docdb --db-cluster-identifier test-docdb-cluster {{< /command >}} + ```yaml { "DBInstance": { @@ -99,8 +105,10 @@ Some noticeable fields: - `--db-instance-identifier test-company`: Represents the unique identifier of the newly created database. - `--db-instance-class db.r5.large`: Is the type or class of the Amazon DocumentDB - instance. It determines the compute and memory capacity allocated to the instance. `db.r5.large` refers to a specific instance type in - the R5 family. Although the flag is required for database creation, LocalStack will only mock the `DBInstanceClass` attribute. + instance. + It determines the compute and memory capacity allocated to the instance. `db.r5.large` refers to a specific instance type in + the R5 family. + Although the flag is required for database creation, LocalStack will only mock the `DBInstanceClass` attribute. You can find out more about instance classes in the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) @@ -168,7 +176,8 @@ test> {{< /command >}} -This command will default to accessing the `test` database that was created with the cluster. Notice the port, `39045`, +This command will default to accessing the `test` database that was created with the cluster. +Notice the port, `39045`, which is the cluster port that appears in the aforementioned description. To work with a specific database, the command is: @@ -213,10 +222,11 @@ the [MongoDB documentation](https://www.mongodb.com/docs/). {{< callout >}} You need to set `DOCDB_PROXY_CONTAINER=1` when starting LocalStack to be able to use the returned `Endpoint`, which will be correctly resolved automatically. -The flag `DOCDB_PROXY_CONTAINER=1` changes the default behavior and the container will be started as proxied container. Meaning a port from the [pre-defined port]({{< ref "/references/external-ports" >}}) range will be chosen, and when using lambda, you can use `localhost.localstack.cloud` to connect to the instance. +The flag `DOCDB_PROXY_CONTAINER=1` changes the default behavior and the container will be started as proxied container. +Meaning a port from the [pre-defined port]({{< ref "/references/external-ports" >}}) range will be chosen, and when using lambda, you can use `localhost.localstack.cloud` to connect to the instance. {{< /callout >}} -In this sample we will use a Node.js lambda function to connect to a DocumentDB. +In this sample we will use a Node.js lambda function to connect to a DocumentDB. For the mongo-db connection we will use the `mongodb` lib. Please note, that this sample is only for demo purpose, e.g., we will set the credentials as environment variables to the lambda function. @@ -298,7 +308,7 @@ exports.handler = async (event) => { }; {{< /command >}} -Now, you can zip the entire. +Now, you can zip the entire. Make sure you are inside `resources` directory and run: {{< command >}} $ zip -r function.zip . @@ -321,20 +331,19 @@ $ awslocal lambda invoke --function-name MyNodeLambda outfile {{< /command >}} The `outfile` contains the returned value, e.g.: + ```yaml {"statusCode":200,"body":"{\"_id\":\"6560a21ca7771a02ef128c72\",\"key\":\"value\"}"} ```` #### Use Secret To Connect to DocDB -The best-practise for accessing databases is by using secrets. +The best-practise for accessing databases is by using secrets. Secrets follow a [well-defined pattern](https://docs.aws.amazon.com/secretsmanager/latest/userguide/create_database_secret.html). For the lambda function, you can pass the secret arn as `SECRET_NAME`. In the lambda, you can then retrieve the secret details like this: -[This sample is a snippet from the scenario test https://github.com/localstack/localstack-ext/blob/master/tests/aws/scenario/rds_neptune_docdb/test_rds_neptune_docdb.py]: # - {{< command >}} const AWS = require('aws-sdk'); const { MongoClient } = require('mongodb'); @@ -387,7 +396,8 @@ exports.handler = async (event) => { ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing DocumentDB instances and clusters. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **DocumentDB** under the **Database** section. +The LocalStack Web Application provides a Resource Browser for managing DocumentDB instances and clusters. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **DocumentDB** under the **Database** section. DocumentDB Resource Browser
@@ -403,8 +413,11 @@ The Resource Browser allows you to perform the following actions: ## Current Limitations -Under the hood, LocalStack starts a MongoDB server, to handle DocumentDB storage, in a separate Docker container and adds port-mapping so that it can be accessed from `localhost`. When defining a port to access the container, an available port on the host machine will be selected, that means there is no pre-defined port range by default. +Under the hood, LocalStack starts a MongoDB server, to handle DocumentDB storage, in a separate Docker container and adds port-mapping so that it can be accessed from `localhost`. +When defining a port to access the container, an available port on the host machine will be selected, that means there is no pre-defined port range by default. -Because LocalStack utilizes a MongoDB container to provide DocumentDB storage, LocalStack may not have exact feature parity with Amazon DocumentDB. The database engine may support additional features that DocumentDB does not and vice versa. +Because LocalStack utilizes a MongoDB container to provide DocumentDB storage, LocalStack may not have exact feature parity with Amazon DocumentDB. +The database engine may support additional features that DocumentDB does not and vice versa. -DocumentDB currently uses the default configuration of the latest [MongoDB Docker image](https://hub.docker.com/_/mongo). When the `MasterUsername` and `MasterUserPassword` are set for the creation for the DocumentDB cluster or instance, the container will be started with the corresponding ENVs `MONGO_INITDB_ROOT_USERNAME` respectively `MONGO_INITDB_ROOT_PASSWORD`. +DocumentDB currently uses the default configuration of the latest [MongoDB Docker image](https://hub.docker.com/_/mongo). +When the `MasterUsername` and `MasterUserPassword` are set for the creation for the DocumentDB cluster or instance, the container will be started with the corresponding ENVs `MONGO_INITDB_ROOT_USERNAME` respectively `MONGO_INITDB_ROOT_PASSWORD`. diff --git a/content/en/user-guide/aws/dynamodb/index.md b/content/en/user-guide/aws/dynamodb/index.md index 12a7ed9bc0..5b4e98a858 100644 --- a/content/en/user-guide/aws/dynamodb/index.md +++ b/content/en/user-guide/aws/dynamodb/index.md @@ -149,7 +149,9 @@ The following output would be retrieved: ``` {{< callout >}} -You can run DynamoDB in memory, which can greatly improve the performance of your database operations. However, this also means that the data will not be possible to persist on disk and will be lost even though persistence is enabled in LocalStack. To enable this feature, you need to set the environment variable `DYNAMODB_IN_MEMORY=1` while starting LocalStack. +You can run DynamoDB in memory, which can greatly improve the performance of your database operations. +However, this also means that the data will not be possible to persist on disk and will be lost even though persistence is enabled in LocalStack. +To enable this feature, you need to set the environment variable `DYNAMODB_IN_MEMORY=1` while starting LocalStack. {{< /callout >}} ### Time To Live @@ -192,15 +194,15 @@ You can add your query in the editor and click on the **Execute** button to exec The following code snippets and sample applications provide practical examples of how to use IAM in LocalStack for various use cases: -- [Serverless Container-based APIs with Amazon ECS & API Gateway](https://github.com/localstack/serverless-api-ecs-apigateway-sample) -- [Full-Stack application with AWS Lambda, DynamoDB & S3 for shipment validation](https://github.com/localstack/shipment-list-demo) -- [Step-up Authentication using Amazon Cognito](https://github.com/localstack/step-up-auth-sample) -- [Serverless microservices with Amazon API Gateway, DynamoDB, SQS, and Lambda](https://github.com/localstack/microservices-apigateway-lambda-dynamodb-sqs-sample) -- [Event-driven architecture with Amazon SNS FIFO, DynamoDB, Lambda, and S3](https://github.com/localstack/event-driven-architecture-with-amazon-sns-fifo) -- [Note-Taking application using AWS SDK for JavaScript](https://github.com/localstack/aws-sdk-js-notes-app) -- [AppSync GraphQL APIs for DynamoDB and RDS Aurora PostgreSQL](https://github.com/localstack/appsync-graphql-api-sample) -- [Loan Broker application with AWS Step Functions, DynamoDB, Lambda, SQS, and SNS](https://github.com/localstack/loan-broker-stepfunctions-lambda-app) -- [Messaging Processing application with SQS, DynamoDB, and Fargate](https://github.com/localstack/sqs-fargate-ddb-cdk-go) +- [Serverless Container-based APIs with Amazon ECS & API Gateway](https://github.com/localstack/serverless-api-ecs-apigateway-sample) +- [Full-Stack application with AWS Lambda, DynamoDB & S3 for shipment validation](https://github.com/localstack/shipment-list-demo) +- [Step-up Authentication using Amazon Cognito](https://github.com/localstack/step-up-auth-sample) +- [Serverless microservices with Amazon API Gateway, DynamoDB, SQS, and Lambda](https://github.com/localstack/microservices-apigateway-lambda-dynamodb-sqs-sample) +- [Event-driven architecture with Amazon SNS FIFO, DynamoDB, Lambda, and S3](https://github.com/localstack/event-driven-architecture-with-amazon-sns-fifo) +- [Note-Taking application using AWS SDK for JavaScript](https://github.com/localstack/aws-sdk-js-notes-app) +- [AppSync GraphQL APIs for DynamoDB and RDS Aurora PostgreSQL](https://github.com/localstack/appsync-graphql-api-sample) +- [Loan Broker application with AWS Step Functions, DynamoDB, Lambda, SQS, and SNS](https://github.com/localstack/loan-broker-stepfunctions-lambda-app) +- [Messaging Processing application with SQS, DynamoDB, and Fargate](https://github.com/localstack/sqs-fargate-ddb-cdk-go) ## Current Limitations @@ -211,7 +213,7 @@ LocalStack provides support for global tables (Version 2019), which are tables t However, legacy global tables (Version 2017) are not supported by LocalStack. Operations such as `CreateGlobalTable`, `UpdateGlobalTable`, and `DescribeGlobalTable` will not replicate globally. -### Replication +### Replication - Removing the original table region from the replication set while retaining the replicas is currently not feasible. Deleting the original table will result in the removal of all replicas as well. diff --git a/content/en/user-guide/aws/dynamodbstreams/index.md b/content/en/user-guide/aws/dynamodbstreams/index.md index decf39ddde..fef6bcc32f 100644 --- a/content/en/user-guide/aws/dynamodbstreams/index.md +++ b/content/en/user-guide/aws/dynamodbstreams/index.md @@ -6,15 +6,19 @@ description: Get started with DynamoDB Streams on LocalStack ## Introduction -DynamoDB Streams captures data modification events in a DynamoDB table. The stream records are written to a DynamoDB stream, which is an ordered flow of information about changes to items in a table. DynamoDB Streams records data in near-real time, enabling you to develop workflows that process these streams and respond based on their contents. +DynamoDB Streams captures data modification events in a DynamoDB table. +The stream records are written to a DynamoDB stream, which is an ordered flow of information about changes to items in a table. +DynamoDB Streams records data in near-real time, enabling you to develop workflows that process these streams and respond based on their contents. -LocalStack supports DynamoDB Streams, allowing you to create and manage streams in a local environment. The supported APIs are available on our [DynamoDB Streams coverage page](https://docs.localstack.cloud/references/coverage/coverage_dynamodbstreams/), which provides information on the extent of DynamoDB Streams integration with LocalStack. +LocalStack supports DynamoDB Streams, allowing you to create and manage streams in a local environment. +The supported APIs are available on our [DynamoDB Streams coverage page](https://docs.localstack.cloud/references/coverage/coverage_dynamodbstreams/), which provides information on the extent of DynamoDB Streams integration with LocalStack. ## Getting started This guide is designed for users new to DynamoDB Streams and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate the following process using LocalStack: +Start your LocalStack container using your preferred method. +We will demonstrate the following process using LocalStack: - A user adds an entry to a DynamoDB table. - A new stream record is generated in DynamoDB Streams when an entry is added. @@ -23,7 +27,8 @@ Start your LocalStack container using your preferred method. We will demonstrate ### Create a DynamoDB table -You can create a DynamoDB table named `BarkTable` using the [`CreateTable`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html) API. Run the following command to create the table: +You can create a DynamoDB table named `BarkTable` using the [`CreateTable`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html) API. +Run the following command to create the table: {{< command >}} $ awslocal dynamodb create-table \ @@ -34,7 +39,8 @@ $ awslocal dynamodb create-table \ --stream-specification StreamEnabled=true,StreamViewType=NEW_AND_OLD_IMAGES {{< /command >}} -The `BarkTable` has a stream enabled which you can trigger by associating a Lambda function with the stream. You can notice that in the `LatestStreamArn` field of the response: +The `BarkTable` has a stream enabled which you can trigger by associating a Lambda function with the stream. +You can notice that in the `LatestStreamArn` field of the response: ```bash ... @@ -44,7 +50,8 @@ The `BarkTable` has a stream enabled which you can trigger by associating a Lamb ### Create a Lambda function -You can now create a Lambda function (`publishNewBark`) to process stream records from `BarkTable`. Create a new file named `index.js` with the following code: +You can now create a Lambda function (`publishNewBark`) to process stream records from `BarkTable`. +Create a new file named `index.js` with the following code: ```javascript 'use strict'; @@ -69,7 +76,8 @@ exports.handler = (event, context, callback) => { }; ``` -You can now create a Lambda function using the [`CreateFunction`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) API. Run the following command to create the Lambda function: +You can now create a Lambda function using the [`CreateFunction`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) API. +Run the following command to create the Lambda function: {{< command >}} $ zip index.zip index.js @@ -85,7 +93,8 @@ $ awslocal lambda create-function \ ### Invoke the Lambda function -To test the Lambda function, you can invoke it using the [`Invoke`](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) API. Create a new file named `payload.json` with the following content: +To test the Lambda function, you can invoke it using the [`Invoke`](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) API. +Create a new file named `payload.json` with the following content: ```json { @@ -144,13 +153,16 @@ In the `output.txt` file, you should see the following output: ### Add event source mapping -To add the DynamoDB stream as an event source for the Lambda function, you need the stream ARN. You can get the stream ARN using the [`DescribeTable`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html) API. Run the following command to get the stream ARN: +To add the DynamoDB stream as an event source for the Lambda function, you need the stream ARN. +You can get the stream ARN using the [`DescribeTable`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html) API. +Run the following command to get the stream ARN: {{< command >}} awslocal dynamodb describe-table --table-name BarkTable {{< /command >}} -You can now create an event source mapping using the [`CreateEventSourceMapping`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html) API. Run the following command to create the event source mapping: +You can now create an event source mapping using the [`CreateEventSourceMapping`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html) API. +Run the following command to create the event source mapping: {{< command >}} awslocal lambda create-event-source-mapping \ @@ -160,7 +172,8 @@ awslocal lambda create-event-source-mapping \ --starting-position TRIM_HORIZON {{< /command >}} -Make sure to replace the `event-source` value with the stream ARN you obtained from the previous command. You should see the following output: +Make sure to replace the `event-source` value with the stream ARN you obtained from the previous command. +You should see the following output: ```bash { @@ -173,7 +186,8 @@ Make sure to replace the `event-source` value with the stream ARN you obtained f } ``` -You can now test the event source mapping by adding an item to the `BarkTable` table using the [`PutItem`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html) API. Run the following command to add an item to the table: +You can now test the event source mapping by adding an item to the `BarkTable` table using the [`PutItem`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html) API. +Run the following command to add an item to the table: {{< command >}} $ awslocal dynamodb put-item \ @@ -185,7 +199,8 @@ You can find Lambda function being triggered in the LocalStack logs. ### Inspect the stream -You can list the streams using the [`ListStreams`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ListStreams.html) API. Run the following command to list the streams: +You can list the streams using the [`ListStreams`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ListStreams.html) API. +Run the following command to list the streams: {{< command >}} awslocal dynamodbstreams list-streams @@ -205,7 +220,8 @@ The following output shows the list of streams: } ``` -You can also describe the stream using the [`DescribeStream`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeStream.html) API. Run the following command to describe the stream: +You can also describe the stream using the [`DescribeStream`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeStream.html) API. +Run the following command to describe the stream: {{< command >}} $ awslocal dynamodbstreams describe-stream --stream-arn arn:aws:dynamodb:us-east-1:000000000000:table/BarkTable/stream/2024-07-12T06:18:37.101 diff --git a/content/en/user-guide/aws/ec2/index.md b/content/en/user-guide/aws/ec2/index.md index e6f56cbe75..e5d23ff54a 100644 --- a/content/en/user-guide/aws/ec2/index.md +++ b/content/en/user-guide/aws/ec2/index.md @@ -172,7 +172,6 @@ $ ssh -p 12862 -i key.pem root@127.0.0.1 If the `ssh` command throws an error like "Identity file not accessible" or "bad permissions", then please make sure that the key file has a restrictive `0400` permission as illustrated [here]({{< relref "ec2#create-a-key-pair" >}}). {{< /callout >}} - ## VM Managers LocalStack EC2 supports multiple methods to simulate the EC2 service. @@ -182,7 +181,6 @@ For advanced setups, LocalStack Pro comes with emulation capability for certain The underlying method for this can be controlled using the [`EC2_VM_MANAGER`]({{< ref "configuration#ec2" >}}) configuration option. You may choose between plain mocked resources, containerized or virtualized. - ## Mock VM Manager With the Mock VM manager, all resources are stored as in-memory representation. @@ -193,7 +191,6 @@ To use this VM manager in LocalStack Pro, set [`EC2_VM_MANAGER`]({{< ref "config This serves as the fallback manager if an operation is not implemented in other VM managers. - ## Docker VM Manager LocalStack Pro supports the Docker VM manager which uses the [Docker Engine](https://docs.docker.com/engine/) to emulate EC2 instances. @@ -268,7 +265,7 @@ The execution log is generated at `/var/log/cloud-init-output.log` in the contai ### Networking {{< callout "note" >}} -Network access to EC2 instance is not possible on macOS. +Network access to EC2 instance is not possible on macOS. This is because Docker Desktop on macOS does not expose the bridge network to the host system. See [Docker Desktop Known Limitations](https://docs.docker.com/desktop/networking/#known-limitations). {{< /callout >}} @@ -308,7 +305,6 @@ The port mapping details are provided in the logs during the instance initializa 2022-12-20T19:43:44.544 INFO Instance i-1d6327abf04e31be6 port mappings (container -> host): {'8080/tcp': 51747, '22/tcp': 55705} ``` - ### Elastic Block Store A common use case is to attach an EBS block device to an EC2 instance, which can then be used to create a custom filesystem for additional storage. @@ -323,6 +319,7 @@ First, we create a user data script `init.sh` which creates an ext3 file system {{< command >}} $ cat > init.sh <}} - ### Instance Metadata Service The Docker VM manager supports the [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) which provides information about the running instance. @@ -378,14 +374,12 @@ If you would like support for more metadata categories, please make a feature re IMDS IPv6 endpoint is currently not supported. {{< /callout >}} - ### Configuration You can use the [`EC2_DOCKER_FLAGS`]({{< ref "configuration#ec2" >}}) LocalStack configuration variable to pass supplementary flags to Docker during the initiation of containerized instances. This allows for fine-tuned behaviours, for example, running containers in privileged mode using `--privileged` or specifying an alternate CPU platform with `--platform`. Keep in mind that this will apply to all instances that are launched in the LocalStack session. - ### Operations The following table explains the emulated action for various API operations. @@ -401,8 +395,6 @@ Any operation not listed below will use the mock VM manager. | `StartInstances` | Resumes the Docker containers that back instances | | `TerminateInstances` | Stops the Docker containers that back instances | - - ## Libvirt VM Manager {{< callout "note" >}} @@ -498,11 +490,13 @@ You may need run the following command to make sure the image is registered with {{< command >}} $ virsh pool-refresh default + Pool default refreshed - + $ virsh vol-list --pool default Name Path -------------------------------------------------------------------------------------------------------- + ami-1234abcd /var/lib/libvirt/images/ami-1234abcd {{< /command >}} @@ -513,7 +507,6 @@ These AMIs will also have the resource tag `ec2_vm_manager:libvirt`. awslocal ec2 describe-images --filters Name=tag:ec2_vm_manager,Values=libvirt {{< /command >}} - ### Instances Virtualised instances can be launched with `RunInstances` operation and specifying a compatible AMI. @@ -545,13 +538,11 @@ You can then use a compatible VNC client (e.g. [TigerVNC](https://tigervnc.org/) Tiger VNC

- ### Networking Currently all instances are behind a NAT network. Instances can access the internet but are inaccessible from the host machine. - ### Elastic Block Stores LocalStack clones the AMI into an EBS volume when the instance is initialised. @@ -559,13 +550,10 @@ LocalStack does not resize the instance root volume, instead it inherits the pro Currently it is not possible to attach additional EBS volumes to instances. - - ### Instance Metadata Service The Libvirt VM manager does not support the Instance Metadata Service endpoints. - ### Operations The following table explains the emulated action for various API operations. @@ -581,7 +569,6 @@ Any operation not listed below will use the mock VM manager. | `TerminateInstances` | Stops and undefines a Libvirt domain | | `CreateVolume` | Creates a sparse Libvirt volume | - ## Resource Browser The LocalStack Web Application provides a Resource Browser for managing EC2 instances. diff --git a/content/en/user-guide/aws/ecr/index.md b/content/en/user-guide/aws/ecr/index.md index 610306d350..5c03d1b581 100644 --- a/content/en/user-guide/aws/ecr/index.md +++ b/content/en/user-guide/aws/ecr/index.md @@ -20,7 +20,8 @@ The supported APIs are available on our [API coverage page](https://docs.localst This guide is designed for users new to Elastic Container Registry and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to build and push a Docker image to a local ECR repository. +Start your LocalStack container using your preferred method. +We will demonstrate how to build and push a Docker image to a local ECR repository. ### Create a Docker image diff --git a/content/en/user-guide/aws/ecs/index.md b/content/en/user-guide/aws/ecs/index.md index 413022e4cb..2efd58e70b 100644 --- a/content/en/user-guide/aws/ecs/index.md +++ b/content/en/user-guide/aws/ecs/index.md @@ -149,8 +149,8 @@ This task definition creates a CloudWatch Logs log group and log stream for the ### Launch a service Finally we launch an ECS service using the task definition above. -This will create a number of containers in replica mode meaning they are distributed over the nodes of the cluster, or in the case of Fargate, over availability zones within the region of the cluster. To create a service, execute the following command: - +This will create a number of containers in replica mode meaning they are distributed over the nodes of the cluster, or in the case of Fargate, over availability zones within the region of the cluster. +To create a service, execute the following command: {{< command >}} $ awslocal ecs create-service --service-name myservice --cluster mycluster --task-definition myfamily --desired-count 1 @@ -204,7 +204,7 @@ $ awslocal ecs create-service --service-name myservice --cluster mycluster --tas You should see a new docker container has been created, using the `ubuntu:latest` image, and running the infinite loop command: -``` +```bash $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 5dfeb9376391 ubuntu "sh -c 'while true; …" 3 minutes ago Up 3 minutes ls-ecs-mycluster-75f0515e-0364-4ee5-9828-19026140c91a-0-a1afaa9d @@ -263,7 +263,8 @@ Or if you are working with a single container, you can set `ECS_DOCKER_FLAGS="-p ## Mounting local directories for ECS tasks -In some cases, it can be useful to mount code from the host filesystem into the ECS container. For example, to enable a quick debugging loop where you can test changes without having to build and redeploy the task's Docker image each time - similar to the [Lambda Hot Reloading]({{< ref "hot-reloading" >}}) feature in LocalStack. +In some cases, it can be useful to mount code from the host filesystem into the ECS container. +For example, to enable a quick debugging loop where you can test changes without having to build and redeploy the task's Docker image each time - similar to the [Lambda Hot Reloading]({{< ref "hot-reloading" >}}) feature in LocalStack. In order to leverage code mounting, we can use the ECS bind mounts feature, which is covered in the [AWS Bind mounts documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/bind-mounts.html). @@ -341,10 +342,10 @@ services: Alternatively, you can download the image from the private registry before using it or employ an [Initialization Hook](https://docs.localstack.cloud/references/init-hooks/) to install the Docker client and use these credentials to download the image. - ## Running ECS on Kubernetes -LocalStack Enterprise image allows you to run ECS tasks on Kubernetes. The tasks are added to ELB load balancer target groups. +LocalStack Enterprise image allows you to run ECS tasks on Kubernetes. +The tasks are added to ELB load balancer target groups. You can do so by setting the `ECS_TASK_EXECUTOR` environment variable to `kubernetes` in the LocalStack container. In this guide, you will learn how to run ECS tasks on Kubernetes by using [`k3d](https://k3d.io/), a lightweight Kubernetes distribution. @@ -360,7 +361,8 @@ $ k3d cluster create ls-cluster -p "4566:$NODE_PORT" --wait --timeout 5m ### Install LocalStack in the cluster -You can now install LocalStack in the Kubernetes cluster by using LocalStack's Helm chart. The following command installs LocalStack with the `kubernetes` executor for ECS and sets the `LOCALSTACK_AUTH_TOKEN` environment variable: +You can now install LocalStack in the Kubernetes cluster by using LocalStack's Helm chart. +The following command installs LocalStack with the `kubernetes` executor for ECS and sets the `LOCALSTACK_AUTH_TOKEN` environment variable: {{< command >}} $ helm upgrade --install localstack localstack/localstack \ diff --git a/content/en/user-guide/aws/efs/index.md b/content/en/user-guide/aws/efs/index.md index 3e81c1a28a..a359fd577c 100644 --- a/content/en/user-guide/aws/efs/index.md +++ b/content/en/user-guide/aws/efs/index.md @@ -8,19 +8,24 @@ tags: ["Pro image"] ## Introduction -Elastic File System (EFS) is a fully managed file storage service provided by Amazon Web Services (AWS). EFS offers scalable and shared file storage that can be accessed by multiple EC2 instances and on-premises servers simultaneously. EFS utilizes the Network File System protocol to allow it to be used as a data source for various applications and workloads. +Elastic File System (EFS) is a fully managed file storage service provided by Amazon Web Services (AWS). +EFS offers scalable and shared file storage that can be accessed by multiple EC2 instances and on-premises servers simultaneously. +EFS utilizes the Network File System protocol to allow it to be used as a data source for various applications and workloads. -LocalStack allows you to use the EFS APIs in your local environment to create local file systems, lifecycle configurations, and file system policies. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_efs/), which provides information on the extent of EFS's integration with LocalStack. +LocalStack allows you to use the EFS APIs in your local environment to create local file systems, lifecycle configurations, and file system policies. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_efs/), which provides information on the extent of EFS's integration with LocalStack. ## Getting started This guide is designed for users new to Elastic File System and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a file system, apply an IAM resource-based policy, and create a lifecycle configuration using the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a file system, apply an IAM resource-based policy, and create a lifecycle configuration using the AWS CLI. ### Create a filesystem -To create a new, empty file system you can use the [`CreateFileSystem`](https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/CreateFileSystem) API. Run the following command to create a new file system: +To create a new, empty file system you can use the [`CreateFileSystem`](https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/CreateFileSystem) API. +Run the following command to create a new file system: {{< command >}} $ awslocal efs create-file-system \ @@ -51,7 +56,8 @@ The following output would be retrieved: } ``` -You can also describe the locally available file systems using the [`DescribeFileSystems`](https://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystems.html) API. Run the following command to describe the local file systems available: +You can also describe the locally available file systems using the [`DescribeFileSystems`](https://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystems.html) API. +Run the following command to describe the local file systems available: {{< command >}} $ awslocal efs describe-file-systems @@ -61,7 +67,8 @@ You can alternatively pass the `--file-system-id` parameter to the `describe-fil ### Put file system policy -You can apply an EFS `FileSystemPolicy` to an EFS file system using the [`PutFileSystemPolicy`](https://docs.aws.amazon.com/efs/latest/ug/API_PutFileSystemPolicy.html) API. Run the following command to apply a policy to the file system created in the previous step: +You can apply an EFS `FileSystemPolicy` to an EFS file system using the [`PutFileSystemPolicy`](https://docs.aws.amazon.com/efs/latest/ug/API_PutFileSystemPolicy.html) API. +Run the following command to apply a policy to the file system created in the previous step: {{< command >}} $ awslocal efs put-file-system-policy \ @@ -69,18 +76,21 @@ $ awslocal efs put-file-system-policy \ --policy "{\"Version\":\"2012-10-17\",\"Id\":\"ExamplePolicy01\",\"Statement\":[{\"Sid\":\"ExampleSatement01\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"*\"},\"Action\":[\"elasticfilesystem:ClientMount\",\"elasticfilesystem:ClientWrite\"],\"Resource\":\"arn:aws:elasticfilesystem:us-east-1:000000000000:file-system/fs-34feac549e66b814\"}]}" {{< /command >}} -You can list the file system policies using the [`DescribeFileSystemPolicy`](https://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystemPolicy.html) API. Run the following command to list the file system policies: +You can list the file system policies using the [`DescribeFileSystemPolicy`](https://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystemPolicy.html) API. +Run the following command to list the file system policies: {{< command >}} $ awslocal efs describe-file-system-policy \ --file-system-id {{< /command >}} -Replace `` with the ID of the file system you want to list the policies for. The output will return the `FileSystemPolicy` for the specified EFS file system. +Replace `` with the ID of the file system you want to list the policies for. +The output will return the `FileSystemPolicy` for the specified EFS file system. ### Create a lifecycle configuration -You can create a lifecycle configuration for an EFS file system using the [`PutLifecycleConfiguration`](https://docs.aws.amazon.com/efs/latest/ug/API_PutLifecycleConfiguration.html) API. Run the following command to create a lifecycle configuration for the file system created in the previous step: +You can create a lifecycle configuration for an EFS file system using the [`PutLifecycleConfiguration`](https://docs.aws.amazon.com/efs/latest/ug/API_PutLifecycleConfiguration.html) API. +Run the following command to create a lifecycle configuration for the file system created in the previous step: {{< command >}} $ awslocal efs put-lifecycle-configuration \ @@ -102,4 +112,5 @@ The following output would be retrieved: ## Current Limitations -LocalStack's EFS implementation is limited and lacks support for functionalities like creating mount targets, configuring access points, and generating tags. LocalStack uses Moto to emulate the EFS APIs, and efforts are underway to incorporate support for these features in upcoming updates. +LocalStack's EFS implementation is limited and lacks support for functionalities like creating mount targets, configuring access points, and generating tags. +LocalStack uses Moto to emulate the EFS APIs, and efforts are underway to incorporate support for these features in upcoming updates. diff --git a/content/en/user-guide/aws/eks/index.md b/content/en/user-guide/aws/eks/index.md index c4ea48eaad..1274474344 100644 --- a/content/en/user-guide/aws/eks/index.md +++ b/content/en/user-guide/aws/eks/index.md @@ -8,23 +8,30 @@ tags: ["Pro image"] ## Introduction -Elastic Kubernetes Service (EKS) is a managed Kubernetes service that makes it easy to run Kubernetes on AWS without installing, operating, and maintaining your own Kubernetes control plane or worker nodes. Kubernetes is an open-source system for automating containerized applications' deployment, scaling, and management. +Elastic Kubernetes Service (EKS) is a managed Kubernetes service that makes it easy to run Kubernetes on AWS without installing, operating, and maintaining your own Kubernetes control plane or worker nodes. +Kubernetes is an open-source system for automating containerized applications' deployment, scaling, and management. -LocalStack allows you to use the EKS APIs in your local environment to spin up embedded Kubernetes clusters in your local Docker engine or use an existing Kubernetes installation you can access from your local machine (defined in `$HOME/.kube/config`). The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_eks/), which provides information on the extent of EKS's integration with LocalStack. +LocalStack allows you to use the EKS APIs in your local environment to spin up embedded Kubernetes clusters in your local Docker engine or use an existing Kubernetes installation you can access from your local machine (defined in `$HOME/.kube/config`). +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_eks/), which provides information on the extent of EKS's integration with LocalStack. ## Getting started -This guide is designed for users new to Elastic Kubernetes Service and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. To interact with the Kubernetes cluster, you should also install [`kubectl`](https://kubernetes.io/docs/tasks/tools/). +This guide is designed for users new to Elastic Kubernetes Service and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. +To interact with the Kubernetes cluster, you should also install [`kubectl`](https://kubernetes.io/docs/tasks/tools/). -Start your LocalStack container using your preferred method. We will demonstrate how you can auto-install an embedded Kubernetes cluster, configure ingress, and deploy a sample service with ECR. +Start your LocalStack container using your preferred method. +We will demonstrate how you can auto-install an embedded Kubernetes cluster, configure ingress, and deploy a sample service with ECR. ### Create an embedded Kubernetes cluster -The default approach for creating Kubernetes clusters using the local EKS API is by setting up an embedded [k3d](https://k3d.io/) kube cluster within Docker. LocalStack seamlessly manages the download and installation process, making it hassle-free for users. In most cases, the installation is automatic, eliminating the need for any manual customizations. +The default approach for creating Kubernetes clusters using the local EKS API is by setting up an embedded [k3d](https://k3d.io/) kube cluster within Docker. +LocalStack seamlessly manages the download and installation process, making it hassle-free for users. +In most cases, the installation is automatic, eliminating the need for any manual customizations. A new cluster can be created using the following command: -You can create a new cluster using the [`CreateCluster`](https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateCluster.html) API. Run the following command: +You can create a new cluster using the [`CreateCluster`](https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateCluster.html) API. +Run the following command: {{< command >}} $ awslocal eks create-cluster \ @@ -67,7 +74,8 @@ f05770ec8523 rancher/k3s:v1.21.5-k3s2 "/bin/k3s server --t…" 1 minut {{< / command >}} -After successfully creating and initializing the cluster, we can easily find the server endpoint, using the [`DescribeCluster`](https://docs.aws.amazon.com/eks/latest/APIReference/API_DescribeCluster.html) API. Run the following command: +After successfully creating and initializing the cluster, we can easily find the server endpoint, using the [`DescribeCluster`](https://docs.aws.amazon.com/eks/latest/APIReference/API_DescribeCluster.html) API. +Run the following command: {{< command >}} $ awslocal eks describe-cluster --name cluster1 @@ -105,9 +113,10 @@ To modify the return value of resource URIs for most services, including ECR, yo By default, ECR returns a `repositoryUri` starting with `localhost.localstack.cloud`, such as: `localhost.localstack.cloud:/`. {{< callout >}} -In this section, we assume that `localhost.localstack.cloud` resolves in your environment, and LocalStack is connected to a non-default bridge network. For more information, refer to the article about [DNS rebind protection]({{< ref "dns-server#dns-rebind-protection" >}}). +In this section, we assume that `localhost.localstack.cloud` resolves in your environment, and LocalStack is connected to a non-default bridge network. +For more information, refer to the article about [DNS rebind protection]({{< ref "dns-server#dns-rebind-protection" >}}). -If the domain `localhost.localstack.cloud` does not resolve on your host, you can still proceed by setting `LOCALSTACK_HOST=localhost` (not recommended). +If the domain `localhost.localstack.cloud` does not resolve on your host, you can still proceed by setting `LOCALSTACK_HOST=localhost` (not recommended). LocalStack will take care of the DNS resolution of `localhost.localstack.cloud` within ECR itself, allowing you to use the `localhost:/` URI for tagging and pushing the image on your host. {{< /callout >}} @@ -116,9 +125,11 @@ Once you have configured this correctly, you can seamlessly use your ECR image w #### Deploying a sample application from an ECR image -To showcase this behavior, let's go through a concise step-by-step guide that will lead us to the successful pulling of an image from local ECR. For the purpose of this guide, we will retag the `nginx` image to be pushed to a local ECR repository under a different name, and then utilize it for a pod configuration. +To showcase this behavior, let's go through a concise step-by-step guide that will lead us to the successful pulling of an image from local ECR. +For the purpose of this guide, we will retag the `nginx` image to be pushed to a local ECR repository under a different name, and then utilize it for a pod configuration. -You can create a new ECR repository using the [`CreateRepository`](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_CreateRepository.html) API. Run the following command: +You can create a new ECR repository using the [`CreateRepository`](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_CreateRepository.html) API. +Run the following command: {{< command >}} $ awslocal ecr create-repository --repository-name "fancier-nginx" @@ -143,9 +154,11 @@ $ awslocal ecr create-repository --repository-name "fancier-nginx" {{< / command >}} {{< callout >}} -When creating an ECR repository, a port from the [external service port range]({{< ref "external-ports" >}}) is dynamically assigned. As a result, the port can differ from the static value `4510` used in the examples below. +When creating an ECR repository, a port from the [external service port range]({{< ref "external-ports" >}}) is dynamically assigned. +As a result, the port can differ from the static value `4510` used in the examples below. -To ensure the correct URL and port, it's important to use the `repositoryUrl` obtained from the `create-repository` request. This ensures that you have the accurate endpoint to access the repository. +To ensure the correct URL and port, it's important to use the `repositoryUrl` obtained from the `create-repository` request. +This ensures that you have the accurate endpoint to access the repository. {{< /callout >}} You can now pull the `nginx` image from Docker Hub using the `docker` CLI: @@ -168,7 +181,8 @@ $ docker push localhost.localstack.cloud:4510/fancier-nginx Now, let us set up the EKS cluster using the image pushed to local ECR. -Next, we can configure `kubectl` to use the EKS cluster, using the [`UpdateKubeconfig`](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateClusterConfig.html) API. Run the following command: +Next, we can configure `kubectl` to use the EKS cluster, using the [`UpdateKubeconfig`](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateClusterConfig.html) API. +Run the following command: {{< command >}} $ awslocal eks update-kubeconfig --name cluster1 && \ @@ -181,7 +195,7 @@ Switched to context "arn:aws:eks:us-east-1:000000000000:cluster/cluster1". {{< / command >}} -You can now go ahead and add a deployment configuration for the `fancier-nginx` image. +You can now go ahead and add a deployment configuration for the `fancier-nginx` image. {{< command >}} $ cat <}} -The `ls-secret-tls` secret is created in the `default` namespace. If your ingress and services are residing in a custom namespace, it is essential to copy the secret to that custom namespace to make use of it. +The `ls-secret-tls` secret is created in the `default` namespace. +If your ingress and services are residing in a custom namespace, it is essential to copy the secret to that custom namespace to make use of it. {{< /callout >}} ## Use an existing Kubernetes installation -You can also access the EKS API using your existing local Kubernetes installation. This can be achieved by mounting the `$HOME/.kube/config` file into the LocalStack container, especially when using a `docker-compose.yml` file: +You can also access the EKS API using your existing local Kubernetes installation. +This can be achieved by mounting the `$HOME/.kube/config` file into the LocalStack container, especially when using a `docker-compose.yml` file: ```yaml volumes: - "${HOME}/.kube/config:/root/.kube/config" ``` -In recent versions of Docker, you can enable Kubernetes as an embedded service running inside Docker. The picture below illustrates the Kubernetes settings in Docker for macOS (similar configurations apply for Linux/Windows). By default, the Kubernetes API is assumed to run on the local TCP port `6443`. +In recent versions of Docker, you can enable Kubernetes as an embedded service running inside Docker. +The picture below illustrates the Kubernetes settings in Docker for macOS (similar configurations apply for Linux/Windows). +By default, the Kubernetes API is assumed to run on the local TCP port `6443`. Kubernetes in Docker @@ -363,15 +382,18 @@ $ awslocal eks list-clusters {{< / command >}} -To interact with your Kubernetes cluster, configure your Kubernetes client (such as `kubectl` or other SDKs) to point to the `endpoint` provided in the `create-cluster` output mentioned earlier. However, depending on whether you're calling the Kubernetes API from your local machine or from within a Lambda function, you might need to use different endpoint URLs. +To interact with your Kubernetes cluster, configure your Kubernetes client (such as `kubectl` or other SDKs) to point to the `endpoint` provided in the `create-cluster` output mentioned earlier. +However, depending on whether you're calling the Kubernetes API from your local machine or from within a Lambda function, you might need to use different endpoint URLs. -For local machine interactions, use `https://localhost:6443` as the endpoint URL. If you are accessing the Kubernetes API from within a Lambda function, you should use `https://172.17.0.1:6443` as the endpoint URL, assuming that `172.17.0.1` is the IP address of the Docker network bridge. +For local machine interactions, use `https://localhost:6443` as the endpoint URL. +If you are accessing the Kubernetes API from within a Lambda function, you should use `https://172.17.0.1:6443` as the endpoint URL, assuming that `172.17.0.1` is the IP address of the Docker network bridge. By using the appropriate endpoint URL based on your context, you can effectively communicate with your Kubernetes cluster and manage your resources as needed. ## Customizing the Kubernetes Load Balancer Ports -By default, the Kubernetes load balancer (LB) is exposed on port `8081`. If you need to customize the port or expose the load balancer on multiple ports, you can utilize the special tag name `_lb_ports_` during the cluster creation process. +By default, the Kubernetes load balancer (LB) is exposed on port `8081`. +If you need to customize the port or expose the load balancer on multiple ports, you can utilize the special tag name `_lb_ports_` during the cluster creation process. For instance, if you want to expose the load balancer on ports 8085 and 8086, you can use the following tag definition when creating the cluster: @@ -386,7 +408,8 @@ $ awslocal eks create-cluster \ When working with EKS, a common scenario is to access multiple Kubernetes services behind different endpoints. -For instance, you might have multiple microservices, each following a common path versioning scheme, such as API request paths starting with `/v1/...`. In such cases, path-based routing may not be ideal if you need the services to be accessible in a uniform manner. +For instance, you might have multiple microservices, each following a common path versioning scheme, such as API request paths starting with `/v1/...`. +In such cases, path-based routing may not be ideal if you need the services to be accessible in a uniform manner. To address this requirement, we recommend utilizing host-based routing rules, as demonstrated in the example below: @@ -400,20 +423,20 @@ metadata: ingress.kubernetes.io/ssl-redirect: "false" spec: rules: - - host: eks-service-1.localhost.localstack.cloud +- host: eks-service-1.localhost.localstack.cloud http: paths: - - path: /v1 + - path: /v1 pathType: Prefix backend: service: name: service-1 port: number: 80 - - host: eks-service-2.localhost.localstack.cloud +- host: eks-service-2.localhost.localstack.cloud http: paths: - - path: /v1 + - path: /v1 pathType: Prefix backend: service: @@ -425,9 +448,12 @@ EOF The example defines routing rules for two local endpoints - the first rule points to a service `service-1` accessible under `/v1`, and the second rule points to a service `service-2` accessible under the same path `/v1`. -In the provided example, we define routing rules for two local endpoints. The first rule directs traffic to a service named `service-1`, accessible under the path `/v1`. Similarly, the second rule points to a service named `service-2`, also accessible under the same path `/v1`. +In the provided example, we define routing rules for two local endpoints. +The first rule directs traffic to a service named `service-1`, accessible under the path `/v1`. +Similarly, the second rule points to a service named `service-2`, also accessible under the same path `/v1`. -This approach enables us to access the two distinct services using the same path and port number, but with different host names. This host-based routing mechanism ensures that each service is uniquely identified based on its designated host name, allowing for a uniform and organized way of accessing multiple services within the EKS cluster. +This approach enables us to access the two distinct services using the same path and port number, but with different host names. +This host-based routing mechanism ensures that each service is uniquely identified based on its designated host name, allowing for a uniform and organized way of accessing multiple services within the EKS cluster. {{< command >}} $ curl http://eks-service-1.localhost.localstack.cloud:8081/v1 @@ -440,9 +466,12 @@ $ curl http://eks-service-2.localhost.localstack.cloud:8081/v1 {{< /command >}} -It is important to note that the host names `eks-service-1.localhost.localstack.cloud` and `eks-service-2.localhost.localstack.cloud` both resolve to `127.0.0.1` (localhost). Consequently, you can utilize them to communicate with your service endpoints and distinguish between different services within the Kubernetes load balancer. +It is important to note that the host names `eks-service-1.localhost.localstack.cloud` and `eks-service-2.localhost.localstack.cloud` both resolve to `127.0.0.1` (localhost). +Consequently, you can utilize them to communicate with your service endpoints and distinguish between different services within the Kubernetes load balancer. -However, it might encounter issues in scenarios where you intend to run your Load Balancer (LB) on standard ports such as 80/443 since some of these ports may already be occupied on your local machine. For instance, by default, LocalStack allocates port 443 to expose APIs via the HTTPS endpoint (`https://localhost.localstack.cloud`). Hence, it's crucial to ensure that you expose your LB on a custom, non-standard port to prevent conflicts. +However, it might encounter issues in scenarios where you intend to run your Load Balancer (LB) on standard ports such as 80/443 since some of these ports may already be occupied on your local machine. +For instance, by default, LocalStack allocates port 443 to expose APIs via the HTTPS endpoint (`https://localhost.localstack.cloud`). +Hence, it's crucial to ensure that you expose your LB on a custom, non-standard port to prevent conflicts. Additionally, note that LocalStack EKS employs [Traefik](https://doc.traefik.io/traefik/providers/kubernetes-ingress) as the Kubernetes ingress controller internally. @@ -478,10 +507,12 @@ $ awslocal eks create-cluster \ {{< / command >}} {{< callout >}} -Note that the tag was previously referred to as `__k3d_volume_mount__`, but it has now been renamed to `_volume_mount_`. As a result, the tag name `__k3d_volume_mount__` is considered deprecated and will be removed in an upcoming release. +Note that the tag was previously referred to as `__k3d_volume_mount__`, but it has now been renamed to `_volume_mount_`. +As a result, the tag name `__k3d_volume_mount__` is considered deprecated and will be removed in an upcoming release. {{< /callout >}} -After creating your cluster with the `_volume_mount_` tag, you can create your path with volume mounts as usual. The configuration for the volume mounts can be set up similar to this: +After creating your cluster with the `_volume_mount_` tag, you can create your path with volume mounts as usual. +The configuration for the volume mounts can be set up similar to this: ```yaml apiVersion: v1 diff --git a/content/en/user-guide/aws/elasticache/index.md b/content/en/user-guide/aws/elasticache/index.md index 094076f2da..1f13fe58f1 100644 --- a/content/en/user-guide/aws/elasticache/index.md +++ b/content/en/user-guide/aws/elasticache/index.md @@ -21,12 +21,10 @@ LocalStack supports ElastiCache via the Pro offering, allowing you to use the El The supported APIs are available on our [API Coverage Page]{{< ref "coverage_elasticache" >}}, which provides information on the extent of ElastiCache integration with LocalStack. - ## Getting started This guide is designed for users new to ElastiCache and assumes basic knowledge of the AWS CLI and our `awslocal` wrapper script. - ### Single cache cluster After starting LocalStack Pro, you can create a cluster with the following command. @@ -61,7 +59,6 @@ $ redis-cli -p 4510 get foo "bar" {{< / command >}} - ### Replication groups in non-cluster mode {{< command >}} @@ -73,7 +70,8 @@ $ awslocal elasticache create-replication-group \ --num-cache-clusters 3 {{< /command >}} -Wait for it to be available. When running the following command, you should see one node group when running: +Wait for it to be available. +When running the following command, you should see one node group when running: {{< command >}} $ awslocal elasticache describe-replication-groups --replication-group-id my-redis-replication-group @@ -86,7 +84,6 @@ $ awslocal elasticache describe-replication-groups --replication-group-id my-red --query "ReplicationGroups[0].NodeGroups[0].PrimaryEndpoint" {{< /command >}} - ### Replication groups in cluster mode The cluster mode is enabled by using `--num-node-groups` and `--replicas-per-node-group`: @@ -101,7 +98,8 @@ $ awslocal elasticache create-replication-group \ --replicas-per-node-group 2 {{< /command >}} -Note that the group nodes do not have a primary endpoint. Instead they have a `ConfigurationEndpoint`, which you can connect to using `redis-cli -c` where `-c` is for cluster mode. +Note that the group nodes do not have a primary endpoint. +Instead they have a `ConfigurationEndpoint`, which you can connect to using `redis-cli -c` where `-c` is for cluster mode. {{< command >}} $ awslocal elasticache describe-replication-groups --replication-group-id my-clustered-redis-replication-group \ @@ -130,7 +128,6 @@ In the ElastiCache resource browser you can: * Create new cache clusters {{< img src="elasticache-resource-browser-create.png" alt="Create a ElastiCache cluster in the resource browser" >}} - ## Current Limitations LocalStack currently supports Redis single-node and cluster mode, but not memcached. diff --git a/content/en/user-guide/aws/elasticbeanstalk/index.md b/content/en/user-guide/aws/elasticbeanstalk/index.md index de81f4e66e..f8e9148bb7 100644 --- a/content/en/user-guide/aws/elasticbeanstalk/index.md +++ b/content/en/user-guide/aws/elasticbeanstalk/index.md @@ -8,19 +8,24 @@ tags: ["Pro image"] ## Introduction -Elastic Beanstalk (EB) is a managed platform-as-a-service (PaaS) provided by Amazon Web Services (AWS) that simplifies the process of deploying, managing, and scaling web applications and services. Elastic Beanstalk orchestrates various AWS services, including EC2, S3, SNS, and Elastic Load Balancers. Elastic Beanstalk also supports various application environments, such as Java, .NET, Node.js, PHP, Python, Ruby, Go, and Docker. +Elastic Beanstalk (EB) is a managed platform-as-a-service (PaaS) provided by Amazon Web Services (AWS) that simplifies the process of deploying, managing, and scaling web applications and services. +Elastic Beanstalk orchestrates various AWS services, including EC2, S3, SNS, and Elastic Load Balancers. +Elastic Beanstalk also supports various application environments, such as Java, .NET, Node.js, PHP, Python, Ruby, Go, and Docker. -LocalStack allows you to use the Elastic Beanstalk APIs in your local environment to create and manage applications, environments and versions. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_elasticbeanstalk/), which provides information on the extent of Elastic Beanstalk's integration with LocalStack. +LocalStack allows you to use the Elastic Beanstalk APIs in your local environment to create and manage applications, environments and versions. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_elasticbeanstalk/), which provides information on the extent of Elastic Beanstalk's integration with LocalStack. ## Getting started This guide is designed for users new to Elastic Beanstalk and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create an Elastic Beanstalk application and environment with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create an Elastic Beanstalk application and environment with the AWS CLI. ### Create an application -To create an Elastic Beanstalk application, you can use the [`CreateApplication`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_CreateApplication.html) API. Run the following command to create an application named `my-app`: +To create an Elastic Beanstalk application, you can use the [`CreateApplication`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_CreateApplication.html) API. +Run the following command to create an application named `my-app`: {{< command >}} $ awslocal elasticbeanstalk create-application \ @@ -39,7 +44,8 @@ The following output would be retrieved: } ``` -You can also use the [`DescribeApplications`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeApplications.html) API to retrieve information about your application. Run the following command to retrieve information about the `my-app` application, we created earlier: +You can also use the [`DescribeApplications`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeApplications.html) API to retrieve information about your application. +Run the following command to retrieve information about the `my-app` application, we created earlier: {{< command >}} $ awslocal elasticbeanstalk describe-applications \ @@ -48,7 +54,8 @@ $ awslocal elasticbeanstalk describe-applications \ ### Create an environment -To create an Elastic Beanstalk environment, you can use the [`CreateEnvironment`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_CreateEnvironment.html) API. Run the following command to create an environment named `my-environment`: +To create an Elastic Beanstalk environment, you can use the [`CreateEnvironment`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_CreateEnvironment.html) API. +Run the following command to create an environment named `my-environment`: {{< command >}} $ awslocal elasticbeanstalk create-environment \ @@ -68,7 +75,8 @@ The following output would be retrieved: } ``` -You can also use the [`DescribeEnvironments`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html) API to retrieve information about your environment. Run the following command to retrieve information about the `my-environment` environment, we created earlier: +You can also use the [`DescribeEnvironments`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html) API to retrieve information about your environment. +Run the following command to retrieve information about the `my-environment` environment, we created earlier: {{< command >}} $ awslocal elasticbeanstalk describe-environments \ @@ -77,7 +85,8 @@ $ awslocal elasticbeanstalk describe-environments \ ### Create an application version -To create an Elastic Beanstalk application version, you can use the [`CreateApplicationVersion`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_CreateApplicationVersion.html) API. Run the following command to create an application version named `v1`: +To create an Elastic Beanstalk application version, you can use the [`CreateApplicationVersion`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_CreateApplicationVersion.html) API. +Run the following command to create an application version named `v1`: {{< command >}} $ awslocal elasticbeanstalk create-application-version \ @@ -98,7 +107,8 @@ The following output would be retrieved: } ``` -You can also use the [`DescribeApplicationVersions`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeApplicationVersions.html) API to retrieve information about your application version. Run the following command to retrieve information about the `v1` application version, we created earlier: +You can also use the [`DescribeApplicationVersions`](https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeApplicationVersions.html) API to retrieve information about your application version. +Run the following command to retrieve information about the `v1` application version, we created earlier: {{< command >}} $ awslocal elasticbeanstalk describe-application-versions \ @@ -107,4 +117,6 @@ $ awslocal elasticbeanstalk describe-application-versions \ ## Current Limitations -LocalStack's Elastic Beanstalk implementation is limited and lacks support for installing application and running it in a local Elastic Beanstalk environment. LocalStack also does not support the [`eb`](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/eb-cli3.html) CLI tool. However, you can use other integrations, such as AWS CLI & Terraform, to mock the Elastic Beanstalk APIs and test your workflow locally. +LocalStack's Elastic Beanstalk implementation is limited and lacks support for installing application and running it in a local Elastic Beanstalk environment. +LocalStack also does not support the [`eb`](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/eb-cli3.html) CLI tool. +However, you can use other integrations, such as AWS CLI & Terraform, to mock the Elastic Beanstalk APIs and test your workflow locally. diff --git a/content/en/user-guide/aws/elastictranscoder/index.md b/content/en/user-guide/aws/elastictranscoder/index.md index a803c5da08..3a741cb29b 100644 --- a/content/en/user-guide/aws/elastictranscoder/index.md +++ b/content/en/user-guide/aws/elastictranscoder/index.md @@ -7,19 +7,24 @@ tags: ["Pro image"] ## Introduction -Elastic Transcoder is a managed service that facilitates the transcoding of multimedia files into various formats to ensure compatibility across devices. Elastic Transcoder manages the underlying resources, ensuring high availability and fault tolerance. It also supports a wide range of input and output formats, enabling users to efficiently process and deliver video content at scale. +Elastic Transcoder is a managed service that facilitates the transcoding of multimedia files into various formats to ensure compatibility across devices. +Elastic Transcoder manages the underlying resources, ensuring high availability and fault tolerance. +It also supports a wide range of input and output formats, enabling users to efficiently process and deliver video content at scale. -LocalStack allows you to mock the Elastic Transcoder APIs in your local environment. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_elastictranscoder/), which provides information on the extent of Elastic Transcoder's integration with LocalStack. +LocalStack allows you to mock the Elastic Transcoder APIs in your local environment. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_elastictranscoder/), which provides information on the extent of Elastic Transcoder's integration with LocalStack. ## Getting started This guide is designed for users new to Elastic Transcoder and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create an Elastic Transcoder pipeline, read the pipeline, and list all pipelines using the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create an Elastic Transcoder pipeline, read the pipeline, and list all pipelines using the AWS CLI. ### Create S3 buckets -You can create S3 buckets using the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) API. Execute the following command to create two buckets named `elasticbucket` and `outputbucket`: +You can create S3 buckets using the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) API. +Execute the following command to create two buckets named `elasticbucket` and `outputbucket`: {{< command >}} $ awslocal s3 mb s3://elasticbucket @@ -28,7 +33,8 @@ $ awslocal s3 mb s3://outputbucket ### Create an Elastic Transcoder pipeline -You can create an Elastic Transcoder pipeline using the [`CreatePipeline`](https://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-pipeline.html) API. Execute the following command to create a pipeline named `test-pipeline`: +You can create an Elastic Transcoder pipeline using the [`CreatePipeline`](https://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-pipeline.html) API. +Execute the following command to create a pipeline named `test-pipeline`: {{< command >}} $ awslocal elastictranscoder create-pipeline \ @@ -71,7 +77,8 @@ The following output would be retrieved: ### List the pipelines -You can list all pipelines using the [`ListPipelines`](https://docs.aws.amazon.com/elastictranscoder/latest/developerguide/list-pipelines.html) API. Execute the following command to list all pipelines: +You can list all pipelines using the [`ListPipelines`](https://docs.aws.amazon.com/elastictranscoder/latest/developerguide/list-pipelines.html) API. +Execute the following command to list all pipelines: {{< command >}} $ awslocal elastictranscoder list-pipelines @@ -111,7 +118,8 @@ The following output would be retrieved: ### Read the pipeline -You can read a pipeline using the [`ReadPipeline`](https://docs.aws.amazon.com/elastictranscoder/latest/developerguide/read-pipeline.html) API. Execute the following command to read the pipeline with the ID `0998507242379-vltecz`: +You can read a pipeline using the [`ReadPipeline`](https://docs.aws.amazon.com/elastictranscoder/latest/developerguide/read-pipeline.html) API. +Execute the following command to read the pipeline with the ID `0998507242379-vltecz`: {{< command >}} $ awslocal elastictranscoder read-pipeline --id 0998507242379-vltecz diff --git a/content/en/user-guide/aws/elb/index.md b/content/en/user-guide/aws/elb/index.md index bbfce1b778..e1ca0a94c9 100644 --- a/content/en/user-guide/aws/elb/index.md +++ b/content/en/user-guide/aws/elb/index.md @@ -7,15 +7,19 @@ tags: ["Pro image"] ## Introduction -Elastic Load Balancing (ELB) is a service that allows users to distribute incoming traffic across multiple targets, such as EC2 instances, containers, IP addresses, and lambda functions and automatically scales its request handling capacity in response to incoming traffic. It also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You can check [the official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/what-is-load-balancing.html) to understand the basic terms and concepts used in the ELB. +Elastic Load Balancing (ELB) is a service that allows users to distribute incoming traffic across multiple targets, such as EC2 instances, containers, IP addresses, and lambda functions and automatically scales its request handling capacity in response to incoming traffic. +It also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. +You can check [the official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/what-is-load-balancing.html) to understand the basic terms and concepts used in the ELB. -Localstack allows you to use the Elastic Load Balancing APIs in your local environment to create, edit, and view load balancers, target groups, listeners, and rules. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_elbv2/), which provides information on the extent of ELB's integration with LocalStack. +Localstack allows you to use the Elastic Load Balancing APIs in your local environment to create, edit, and view load balancers, target groups, listeners, and rules. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_elbv2/), which provides information on the extent of ELB's integration with LocalStack. ## Getting started This guide is designed for users new to Elastic Load Balancing and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create an Application Load Balancer, along with its target group, listener, and rule, and forward requests to an IP target. +Start your LocalStack container using your preferred method. +We will demonstrate how to create an Application Load Balancer, along with its target group, listener, and rule, and forward requests to an IP target. ### Start a target server @@ -27,7 +31,8 @@ $ docker run --rm -itd -p 5678:80 ealen/echo-server ### Create a load balancer -To specify the subnet and VPC in which the load balancer will be created, you can use the [`DescribeSubnets`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeSubnets.html) API to retrieve the subnet ID and VPC ID. In this example, we will use the subnet and VPC in the `us-east-1f` availability zone. +To specify the subnet and VPC in which the load balancer will be created, you can use the [`DescribeSubnets`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeSubnets.html) API to retrieve the subnet ID and VPC ID. +In this example, we will use the subnet and VPC in the `us-east-1f` availability zone. {{< command >}} $ subnet_info=$(awslocal ec2 describe-subnets --filters Name=availability-zone,Values=us-east-1f \ @@ -38,7 +43,8 @@ $ subnet_id=$(echo $subnet_info | jq -r '.SubnetId') $ vpc_id=$(echo $subnet_info | jq -r '.VpcId') {{< /command >}} -To create a load balancer, you can use the [`CreateLoadBalancer`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateLoadBalancer.html) API. The following command creates an Application Load Balancer named `example-lb`: +To create a load balancer, you can use the [`CreateLoadBalancer`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateLoadBalancer.html) API. +The following command creates an Application Load Balancer named `example-lb`: {{< command >}} $ loadBalancer=$(awslocal elbv2 create-load-balancer --name example-lb \ @@ -47,7 +53,8 @@ $ loadBalancer=$(awslocal elbv2 create-load-balancer --name example-lb \ ### Create a target group -To create a target group, you can use the [`CreateTargetGroup`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateTargetGroup.html) API. The following command creates a target group named `example-target-group`: +To create a target group, you can use the [`CreateTargetGroup`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateTargetGroup.html) API. +The following command creates a target group named `example-target-group`: {{< command >}} $ targetGroup=$(awslocal elbv2 create-target-group --name example-target-group \ @@ -57,7 +64,8 @@ $ targetGroup=$(awslocal elbv2 create-target-group --name example-target-group \ ### Register a target -To register a target, you can use the [`RegisterTargets`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_RegisterTargets.html) API. The following command registers the target with the target group created in the previous step: +To register a target, you can use the [`RegisterTargets`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_RegisterTargets.html) API. +The following command registers the target with the target group created in the previous step: {{< command >}} $ awslocal elbv2 register-targets --targets Id=127.0.0.1,Port=5678,AvailabilityZone=all \ @@ -71,7 +79,8 @@ You can find the gateway address by running `docker inspect `. ### Create a listener and a rule -We create a for the load balancer using the [`CreateListener`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateListener.html) API. The following command creates a listener for the load balancer created in the previous step: +We create a for the load balancer using the [`CreateListener`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateListener.html) API. +The following command creates a listener for the load balancer created in the previous step: {{< command >}} $ listenerArn=$(awslocal elbv2 create-listener \ @@ -79,7 +88,8 @@ $ listenerArn=$(awslocal elbv2 create-listener \ --load-balancer-arn $loadBalancer | jq -r '.Listeners[]|.ListenerArn') {{< /command >}} -To create a rule for the listener, you can use the [`CreateRule`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateRule.html) API. The following command creates a rule for the listener created above: +To create a rule for the listener, you can use the [`CreateRule`](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateRule.html) API. +The following command creates a rule for the listener created above: {{< command >}} $ listenerRule=$(awslocal elbv2 create-rule \ diff --git a/content/en/user-guide/aws/emr/index.md b/content/en/user-guide/aws/emr/index.md index 4fc31a4a1e..d43bcc7c86 100644 --- a/content/en/user-guide/aws/emr/index.md +++ b/content/en/user-guide/aws/emr/index.md @@ -11,21 +11,29 @@ aliases: ## Introduction -Amazon Elastic MapReduce (EMR) is a fully managed big data processing service that allows developers to effortlessly create, deploy, and manage big data applications. EMR supports various big data processing frameworks, including Hadoop MapReduce, Apache Spark, Apache Hive, and Apache Pig. Developers can leverage these frameworks and their rich ecosystem of tools and libraries to perform complex data transformations, machine learning tasks, and real-time data processing. +Amazon Elastic MapReduce (EMR) is a fully managed big data processing service that allows developers to effortlessly create, deploy, and manage big data applications. +EMR supports various big data processing frameworks, including Hadoop MapReduce, Apache Spark, Apache Hive, and Apache Pig. +Developers can leverage these frameworks and their rich ecosystem of tools and libraries to perform complex data transformations, machine learning tasks, and real-time data processing. -LocalStack Pro supports EMR and allows developers to run data analytics workloads locally. EMR utilizes various tools in the [Hadoop](https://hadoop.apache.org/) and [Spark](https://spark.apache.org) ecosystem, and your EMR instance is automatically configured to connect seamlessly to LocalStack's S3 API. LocalStack also supports EMR Serverless to create applications and job runs, to run your Spark/PySpark jobs locally. +LocalStack Pro supports EMR and allows developers to run data analytics workloads locally. +EMR utilizes various tools in the [Hadoop](https://hadoop.apache.org/) and [Spark](https://spark.apache.org) ecosystem, and your EMR instance is automatically configured to connect seamlessly to LocalStack's S3 API. +LocalStack also supports EMR Serverless to create applications and job runs, to run your Spark/PySpark jobs locally. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_emr/), which provides information on the extent of EMR's integration with LocalStack. {{< callout >}} -To utilize the EMR API, certain additional dependencies need to be downloaded from the network (including Hadoop, Hive, Spark, etc). These dependencies are fetched automatically during service startup, hence it is important to ensure a reliable internet connection when retrieving the dependencies for the first time. Alternatively, you can use one of our `*-bigdata` Docker image tags which already ship with the required libraries baked in and may provide better stability (see [here](https://docs.localstack.cloud/user-guide/ci/#ci-images) for more details). +To utilize the EMR API, certain additional dependencies need to be downloaded from the network (including Hadoop, Hive, Spark, etc). +These dependencies are fetched automatically during service startup, hence it is important to ensure a reliable internet connection when retrieving the dependencies for the first time. +Alternatively, you can use one of our `*-bigdata` Docker image tags which already ship with the required libraries baked in and may provide better stability (see [here](https://docs.localstack.cloud/user-guide/ci/#ci-images) for more details). {{< /callout >}} ## Getting started This guide is designed for users new to EMR and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will create a virtual EMR cluster using the AWS CLI. To create an EMR cluster, run the following command: +Start your LocalStack container using your preferred method. +We will create a virtual EMR cluster using the AWS CLI. +To create an EMR cluster, run the following command: {{< command >}} $ awslocal emr create-cluster \ diff --git a/content/en/user-guide/aws/es/index.md b/content/en/user-guide/aws/es/index.md index 864dcbfb2a..21eb52cdfe 100644 --- a/content/en/user-guide/aws/es/index.md +++ b/content/en/user-guide/aws/es/index.md @@ -12,7 +12,6 @@ The Elasticsearch Service in LocalStack lets you create one or more single-node This service is, like its AWS counterpart, heavily linked with the [OpenSearch Service](../opensearch). Any cluster created with the Elasticsearch Service will show up in the OpenSearch Service and vice versa. - ## Creating an Elasticsearch cluster You can go ahead and use [awslocal]({{< ref "aws-cli.md#localstack-aws-cli-awslocal" >}}) to create a new elasticsearch domain via the `aws es create-elasticsearch-domain` command. @@ -127,7 +126,6 @@ $ curl -s http://my-domain.us-east-1.es.localhost.localstack.cloud:4566/_cluster } {{< / command >}} - ## Advanced topics ### Endpoints @@ -162,7 +160,6 @@ Once the domain processing is complete, you can access the cluster: $ curl http://localhost:4566/my-custom-endpoint/_cluster/health {{< / command >}} - ### Re-using a single cluster instance In some cases, you may not want to create a new cluster instance for each domain, @@ -171,7 +168,6 @@ In this case, you can set `OPENSEARCH_MULTI_CLUSTER=0` (previously `ES_MULTI_CLU This will multiplex all domains to the same cluster, or return the same port every time when using the `port` endpoint strategy. This can however lead to unexpected behavior when persisting data into Elasticsearch, or creating clusters with different versions, so we do not recommend it. - ### Storage Layout Elasticsearch will be organized in your state directory as follows: @@ -191,13 +187,12 @@ localstack@machine % tree -L 4 volume/state │ │ └── tmp ``` - ### Advanced Security Options + Since LocalStack 1.4.0, the OpenSearch and ElasticSearch services support "Advanced Security Options". This feature is currently only supported for OpenSearch domains (which can also be created by the elasticsearch service). More info can be found on [the OpenSearch Service docs page](../opensearch#advanced-security-options). - ## Custom Elasticsearch backends LocalStack downloads elasticsearch asynchronously the first time you run the `aws es create-elasticsearch-domain`, so you will get the response from localstack first and then (after download/install) you will have your elasticsearch cluster running locally. @@ -294,7 +289,8 @@ $ awslocal es create-elasticsearch-domain \ } {{< /command >}} -3. If the `Processing` status is true, it means that the cluster is not yet healthy. You can run `describe-elasticsearch-domain` to receive the status: +3. If the `Processing` status is true, it means that the cluster is not yet healthy. + You can run `describe-elasticsearch-domain` to receive the status: {{< command >}} $ awslocal es describe-elasticsearch-domain --domain-name mylogs-2 {{< /command >}} @@ -311,7 +307,6 @@ $ curl -X PUT mylogs-2.us-east-1.es.localhost.localstack.cloud:4566/my-index {"acknowledged":true,"shards_acknowledged":true,"index":"my-index"} {{< /command >}} - ## Differences to AWS * By default, AWS only sets the `Endpoint` attribute of the cluster status once the cluster is up. @@ -320,4 +315,5 @@ $ curl -X PUT mylogs-2.us-east-1.es.localhost.localstack.cloud:4566/my-index ## Current Limitations -The default Elasticsearch version used is 7.10.0. This is a slight deviation from the default version used in AWS (Elasticsearch 1.5), which is not supported in LocalStack. +The default Elasticsearch version used is 7.10.0. +This is a slight deviation from the default version used in AWS (Elasticsearch 1.5), which is not supported in LocalStack. diff --git a/content/en/user-guide/aws/events/index.md b/content/en/user-guide/aws/events/index.md index 04ed0fdcc6..8956de7d7d 100644 --- a/content/en/user-guide/aws/events/index.md +++ b/content/en/user-guide/aws/events/index.md @@ -6,19 +6,27 @@ description: Get started with EventBridge on LocalStack ## Introduction -EventBridge provides a centralized mechanism to discover and communicate events across various AWS services and applications. EventBridge allows you to register, track, and resolve events, which indicates a change in the environment and then applies a rule to route the event to a target. EventBridge rules are tied to an Event Bus to manage event-driven workflows. You can use either identity-based or resource-based policies to control access to EventBridge resources, where the former can be attached to IAM users, groups, and roles, and the latter can be attached to specific AWS resources. +EventBridge provides a centralized mechanism to discover and communicate events across various AWS services and applications. +EventBridge allows you to register, track, and resolve events, which indicates a change in the environment and then applies a rule to route the event to a target. +EventBridge rules are tied to an Event Bus to manage event-driven workflows. +You can use either identity-based or resource-based policies to control access to EventBridge resources, where the former can be attached to IAM users, groups, and roles, and the latter can be attached to specific AWS resources. -LocalStack allows you to use the EventBridge APIs in your local environment to create rules that route events to a target. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_events/), which provides information on the extent of EventBridge's integration with LocalStack. For information on EventBridge Pipes, please refer to the [EventBridge Pipes]({{< ref "user-guide/aws/pipes" >}}) section. +LocalStack allows you to use the EventBridge APIs in your local environment to create rules that route events to a target. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_events/), which provides information on the extent of EventBridge's integration with LocalStack. +For information on EventBridge Pipes, please refer to the [EventBridge Pipes]({{< ref "user-guide/aws/pipes" >}}) section. {{< callout >}} -We have introduced an all-new LocalStack-native EventBridge provider available behind a feature flag. You can activate it by configuring `PROVIDER_OVERRIDE_EVENTS=v2` in your LocalStack configuration. Learn more about the new provider in the [EventBridge v2 Discuss post](https://discuss.localstack.cloud/t/introducing-eventbridge-v2-in-localstack/946). +We have introduced an all-new LocalStack-native EventBridge provider available behind a feature flag. +You can activate it by configuring `PROVIDER_OVERRIDE_EVENTS=v2` in your LocalStack configuration. +Learn more about the new provider in the [EventBridge v2 Discuss post](https://discuss.localstack.cloud/t/introducing-eventbridge-v2-in-localstack/946). {{< /callout >}} ## Getting Started This guide is designed for users new to EventBridge and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate creating an EventBridge rule to run a Lambda function on a schedule. +Start your LocalStack container using your preferred method. +We will demonstrate creating an EventBridge rule to run a Lambda function on a schedule. ### Create a Lambda Function @@ -59,7 +67,8 @@ $ awslocal events put-rule \ --schedule-expression 'rate(2 minutes)' {{< /command >}} -In the above command, we have specified a schedule expression of `rate(2 minutes)`, which will run the rule every two minutes. It means that the Lambda function will be invoked every two minutes. +In the above command, we have specified a schedule expression of `rate(2 minutes)`, which will run the rule every two minutes. +It means that the Lambda function will be invoked every two minutes. Next, grant the EventBridge service principal (`events.amazonaws.com`) permission to run the rule, using the [`AddPermission`](https://docs.aws.amazon.com/cli/latest/reference/events/add-permission.html) API: @@ -95,7 +104,8 @@ $ awslocal events put-targets \ ### Verify the Lambda invocation -You can verify the Lambda invocation by checking the CloudWatch logs. However, wait at least 2 minutes after running the last command before checking the logs. +You can verify the Lambda invocation by checking the CloudWatch logs. +However, wait at least 2 minutes after running the last command before checking the logs. Run the following command to list the CloudWatch log groups: @@ -142,14 +152,15 @@ At this time LocalStack supports the following [target types](https://docs.aws.a - Kinesis - CloudWatch log group - ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing EventBridge Buses. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **EventBridge** under the **App Integration** section. +The LocalStack Web Application provides a Resource Browser for managing EventBridge Buses. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **EventBridge** under the **App Integration** section. The Resource Browser allows you to perform the following actions: - **View the Event Buses**: You can view the list of EventBridge Buses running locally, alongside their Amazon Resource Names (ARNs) and Policies. - **Create Event Rule**: You can create a new Event Rule by specifying **Name**, **Description**, **Event Pattern**, **Schedule Expressions**, **State**, **Role ARN**, and **Tags**. -- **Trigger Event**: You can trigger an Event by specifying the **Entries** and **Endpoint Id**. While creating an Entry, you must specify **Source**, **Event Bus Name**, **Detail**, **Resources**, **Detail Type**, and **Trace Header**. +- **Trigger Event**: You can trigger an Event by specifying the **Entries** and **Endpoint Id**. + While creating an Entry, you must specify **Source**, **Event Bus Name**, **Detail**, **Resources**, **Detail Type**, and **Trace Header**. - **Remove Selected**: You can remove the selected EventBridge Bus. diff --git a/content/en/user-guide/aws/feature-coverage.md b/content/en/user-guide/aws/feature-coverage.md index 4344d256d1..a110eb34a2 100644 --- a/content/en/user-guide/aws/feature-coverage.md +++ b/content/en/user-guide/aws/feature-coverage.md @@ -17,17 +17,17 @@ LocalStack provides emulation services for different AWS APIs (e.g., Lambda, SQS | | | |----------|------------------------------------------------------------------------------------------------------------------------| -| ⭐⭐⭐⭐⭐ | Feature fully supported by LocalStack maintainers; feature is guaranteed to pass all or the majority of tests | -| ⭐⭐⭐⭐ | Feature partially supported by LocalStack maintainers | -| ⭐⭐⭐ | Feature supports basic functionalities (e.g., CRUD operations) | -| ⭐⭐ | Feature should be considered unstable | -| ⭐ | Feature is experimental and regressions should be expected | -| **-** | Feature is not yet implemented | - +| ⭐⭐⭐⭐⭐ | Feature fully supported by LocalStack maintainers; feature is guaranteed to pass all or the majority of tests | +| ⭐⭐⭐⭐ | Feature partially supported by LocalStack maintainers | +| ⭐⭐⭐ | Feature supports basic functionalities (e.g., CRUD operations) | +| ⭐⭐ | Feature should be considered unstable | +| ⭐ | Feature is experimental and regressions should be expected | +| **-** | Feature is not yet implemented | ## Emulation Levels -* CRUD: The service accepts requests and returns proper (potentially static) responses. No additional business logic besides storing entities. +* CRUD: The service accepts requests and returns proper (potentially static) responses. + No additional business logic besides storing entities. * Emulated: The service imitates the functionality, including synchronous and asynchronous business logic operating on service entities. ## AWS Feature Coverage diff --git a/content/en/user-guide/aws/firehose/index.md b/content/en/user-guide/aws/firehose/index.md index a4b83abbad..8d44f6125f 100644 --- a/content/en/user-guide/aws/firehose/index.md +++ b/content/en/user-guide/aws/firehose/index.md @@ -13,19 +13,23 @@ Amazon recently renamed Kinesis Data Firehose to Data Firehose. ## Introduction -Kinesis Data Firehose is a service provided by AWS that allows you to extract, transform and load streaming data into various destinations, such as Amazon S3, Amazon Redshift, and Elasticsearch. With Kinesis Data Firehose, you can ingest and deliver real-time data from different sources as it automates data delivery, handles buffering and compression, and scales according to the data volume. +Kinesis Data Firehose is a service provided by AWS that allows you to extract, transform and load streaming data into various destinations, such as Amazon S3, Amazon Redshift, and Elasticsearch. +With Kinesis Data Firehose, you can ingest and deliver real-time data from different sources as it automates data delivery, handles buffering and compression, and scales according to the data volume. -LocalStack allows you to use the Kinesis Data Firehose APIs in your local environment to load and transform real-time data. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_firehose/), which provides information on the extent of Kinesis Data Firehose's integration with LocalStack. +LocalStack allows you to use the Kinesis Data Firehose APIs in your local environment to load and transform real-time data. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_firehose/), which provides information on the extent of Kinesis Data Firehose's integration with LocalStack. ## Getting started This guide is designed for users new to Kinesis Data Firehouse and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to use Firehose to load Kinesis data into Elasticsearch with S3 Backup with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to use Firehose to load Kinesis data into Elasticsearch with S3 Backup with the AWS CLI. ### Create an Elasticsearch domain -You can create an Elasticsearch domain using the [`create-elasticsearch-domain`](https://docs.aws.amazon.com/cli/latest/reference/es/create-elasticsearch-domain.html) command. Execute the following command to create a domain named `es-local`: +You can create an Elasticsearch domain using the [`create-elasticsearch-domain`](https://docs.aws.amazon.com/cli/latest/reference/es/create-elasticsearch-domain.html) command. +Execute the following command to create a domain named `es-local`: {{< command >}} $ awslocal es create-elasticsearch-domain --domain-name es-local @@ -37,7 +41,8 @@ Save the value of the `Endpoint` field from the response, as it will be required Now let us create our target S3 bucket and our source Kinesis stream: -Before creating the stream, we need to create an S3 bucket to store our backup data. You can do this using the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) command: +Before creating the stream, we need to create an S3 bucket to store our backup data. +You can do this using the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) command: {{< command >}} $ awslocal s3 mb s3://kinesis-activity-backup-local @@ -53,12 +58,16 @@ $ awslocal kinesis create-stream \ ### Create a Firehouse delivery stream -You can now create the Firehose delivery stream. In this configuration, Elasticsearch serves as the destination, while S3 serves as the repository for our AllDocuments backup. Within the `kinesis-stream-source-configuration`, it is required to specify the ARN of our Kinesis stream and the role that will allow you the access to the stream. +You can now create the Firehose delivery stream. +In this configuration, Elasticsearch serves as the destination, while S3 serves as the repository for our AllDocuments backup. +Within the `kinesis-stream-source-configuration`, it is required to specify the ARN of our Kinesis stream and the role that will allow you the access to the stream. -The `elasticsearch-destination-configuration` sets vital parameters, which includes the access role, `DomainARN` of the Elasticsearch domain where you wish to publish, and the settings including the `IndexName` and `TypeName` for the Elasticsearch setup. Additionally to backup all documents to S3, the `S3BackupMode` parameter is set to `AllDocuments`, which is accompanied by `S3Configuration`. +The `elasticsearch-destination-configuration` sets vital parameters, which includes the access role, `DomainARN` of the Elasticsearch domain where you wish to publish, and the settings including the `IndexName` and `TypeName` for the Elasticsearch setup. +Additionally to backup all documents to S3, the `S3BackupMode` parameter is set to `AllDocuments`, which is accompanied by `S3Configuration`. {{< callout >}} -Within LocalStack's default configuration, IAM roles remain unverified and no strict validation is applied on ARNs. However, when operating within the AWS environment, you need to check the access rights of the specified role for the task. +Within LocalStack's default configuration, IAM roles remain unverified and no strict validation is applied on ARNs. +However, when operating within the AWS environment, you need to check the access rights of the specified role for the task. {{< /callout >}} You can use the [`CreateDeliveryStream`](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html) API to create a Firehose delivery stream named `activity-to-elasticsearch-local`: @@ -81,16 +90,20 @@ On successful execution, the command will return the `DeliveryStreamARN` of the ### Testing the setup -Before testing the integration, it's necessary to confirm if the local Elasticsearch cluster is up. You can use the [`describe-elasticsearch-domain`](https://docs.aws.amazon.com/cli/latest/reference/es/describe-elasticsearch-domain.html) command to check the status of the Elasticsearch cluster. Run the following command: +Before testing the integration, it's necessary to confirm if the local Elasticsearch cluster is up. +You can use the [`describe-elasticsearch-domain`](https://docs.aws.amazon.com/cli/latest/reference/es/describe-elasticsearch-domain.html) command to check the status of the Elasticsearch cluster. +Run the following command: {{< command >}} $ awslocal es describe-elasticsearch-domain \ --domain-name es-local | jq ".DomainStatus.Processing" {{< / command >}} -Once the command returns `false`, you can move forward with data ingestion. The data can be added to the source Kinesis stream or directly to the Firehose delivery stream. +Once the command returns `false`, you can move forward with data ingestion. +The data can be added to the source Kinesis stream or directly to the Firehose delivery stream. -You can add data to the Kinesis stream using the [`PutRecord`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html) API. The following command adds a record to the stream: +You can add data to the Kinesis stream using the [`PutRecord`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html) API. +The following command adds a record to the stream: {{< command >}} $ awslocal kinesis put-record \ @@ -103,7 +116,8 @@ $ awslocal kinesis put-record \ For users using AWS CLI v2, consider adding `--cli-binary-format raw-in-base64-out` to the command mentioned above. {{< /callout >}} -You can use the [`PutRecord`](https://docs.aws.amazon.com/firehose/latest/APIReference/API_PutRecord.html) API to add data to the Firehose delivery stream. The following command adds a record to the stream: +You can use the [`PutRecord`](https://docs.aws.amazon.com/firehose/latest/APIReference/API_PutRecord.html) API to add data to the Firehose delivery stream. +The following command adds a record to the stream: {{< command >}} $ awslocal firehose put-record \ @@ -111,7 +125,8 @@ $ awslocal firehose put-record \ --record '{ "Data": "eyJ0YXJnZXQiOiAiSGVsbG8gd29ybGQifQ==" }' {{< / command >}} -To review the entries in Elasticsearch, you can employ `cURL` for simplicity. Remember to replace the URL with the `Endpoint` field from the initial `create-elasticsearch-domain` operation. +To review the entries in Elasticsearch, you can employ `cURL` for simplicity. +Remember to replace the URL with the `Endpoint` field from the initial `create-elasticsearch-domain` operation. {{< command >}} $ curl -s http://es-local.us-east-1.es.localhost.localstack.cloud:443/activity/_search | jq '.hits.hits' @@ -142,7 +157,8 @@ You will get an output similar to the following: ] ``` -If you receive a comparable output, your Firehose delivery stream setup is accurate! Additionally, take a look at the designated S3 bucket to ensure the backup process is functioning correctly. +If you receive a comparable output, your Firehose delivery stream setup is accurate! +Additionally, take a look at the designated S3 bucket to ensure the backup process is functioning correctly. ## Examples diff --git a/content/en/user-guide/aws/fis/index.md b/content/en/user-guide/aws/fis/index.md index f23ab4a6a0..dafbcbf7e6 100644 --- a/content/en/user-guide/aws/fis/index.md +++ b/content/en/user-guide/aws/fis/index.md @@ -85,7 +85,6 @@ Run the following command to create an FIS experiment template using the configu $ awslocal fis create-experiment-template --cli-input-json file://create-experiment.json {{< /command >}} - The following output would be retrieved: ```json @@ -233,17 +232,21 @@ The following actions are deprecated and marked for removal: - **`localstack:generic:api-error`**: Raise a custom HTTP error. This action accepts the following parameters. - - `region`: The region name where faults will be introduced, e.g. `us-west-1`. Default: region of the experiment - - `service`: The service name to limit faults to, e.g. `kms`. Default: all services - - `operation`: The operation name for the specified service to limit faults to, e.g. `ListKeys` - - `percentage`: The percentage of API calls to fail among matching calls. Default: 100 - - `exception`: The name of the exception to raise for affected API calls. Default: `InternalError` - - `errorCode`: The HTTP error code to return for impacted API calls. Default: 500 + - `region`: The region name where faults will be introduced, e.g. `us-west-1`. + Default: region of the experiment + - `service`: The service name to limit faults to, e.g. `kms`. + Default: all services + - `operation`: The operation name for the specified service to limit faults to, e.g. `ListKeys` + - `percentage`: The percentage of API calls to fail among matching calls. + Default: 100 + - `exception`: The name of the exception to raise for affected API calls. + Default: `InternalError` + - `errorCode`: The HTTP error code to return for impacted API calls. + Default: 500 - **`localstack:kms:inject-api-internal-error`**: Special case of the previous action which injects an InternalError for KMS operations. - **`localstack:log-debug`**: Prints a debug message in the LocalStack logs when experiment is started and stopped. - **`localstack:generic:latency`**: Introduces a latency in the network call. - ## Current Limitations - LocalStack does not implement the [selection mode](https://docs.aws.amazon.com/fis/latest/userguide/targets.html#target-selection-mode) mechanism available on AWS. diff --git a/content/en/user-guide/aws/glacier/index.md b/content/en/user-guide/aws/glacier/index.md index 3a91bb8a34..9281d4fa78 100644 --- a/content/en/user-guide/aws/glacier/index.md +++ b/content/en/user-guide/aws/glacier/index.md @@ -136,7 +136,7 @@ $ awslocal glacier get-job-output --vault-name sample-vault --account-id - --job {{< /command >}} {{< callout >}} -Please not that currently, this operation is only mocked, and will create an empty file named `my-archive.jpg`, not containing the contents of your archive. +Please not that currently, this operation is only mocked, and will create an empty file named `my-archive.jpg`, not containing the contents of your archive. {{< /callout >}} ### Retrieve the inventory information @@ -164,6 +164,7 @@ $ awslocal glacier get-job-output \ {{< /command >}} Inspecting the content of the `inventory.json` file, we can find an inventory of the vault: + ```json { "VaultARN": "arn:aws:glacier:us-east-1:000000000000:vaults/sample-vault", diff --git a/content/en/user-guide/aws/glue/index.md b/content/en/user-guide/aws/glue/index.md index 58956bbc7c..25a7db192f 100644 --- a/content/en/user-guide/aws/glue/index.md +++ b/content/en/user-guide/aws/glue/index.md @@ -19,7 +19,8 @@ Start your LocalStack container using your preferred method. We will demonstrate how to create databases and table metadata in Glue, run Glue ETL jobs, import databases from Athena, and run Glue Crawlers with the AWS CLI. {{< callout >}} -In order to run Glue jobs, some additional dependencies have to be fetched from the network, including a Docker image of apprx. 1.5GB which includes Spark, Presto, Hive and other tools. +In order to run Glue jobs, some additional dependencies have to be fetched from the network, including a Docker image of apprx. +1.5GB which includes Spark, Presto, Hive and other tools. These dependencies are automatically fetched when you start up the service, so please make sure you're on a decent internet connection when pulling the dependencies for the first time. {{< /callout >}} @@ -33,6 +34,7 @@ $ awslocal glue get-tables --database db1 {{< /command >}} You should see the following output: + ```json { "TableList": [ @@ -46,7 +48,6 @@ You should see the following output: ### Running Scripts with Scala and PySpark - Create a new PySpark script named `job.py` with the following code: ```python @@ -91,6 +92,7 @@ $ awslocal glue get-job-run --job-name job1 --run-id {{< / command >}} You should see the following output: + ```json { "JobRun": { @@ -108,6 +110,7 @@ For a more detailed example illustrating how to run a local Glue PySpark job, pl The Glue data catalog is integrated with Athena, and the database/table definitions can be imported via the `import-catalog-to-glue` API. Assume you are running the following Athena queries to create databases and table definitions: + ```sql CREATE DATABASE db2 CREATE EXTERNAL TABLE db2.table1 (a1 Date, a2 STRING, a3 INT) LOCATION 's3://test/table1' @@ -127,6 +130,7 @@ $ awslocal glue get-databases {{< /command >}} You should see the following output: + ```json { "DatabaseList": [ @@ -148,6 +152,7 @@ And you can query the databases with the `get-databases` operation: $ awslocal glue get-tables --database-name db2 {{< / command >}} You should see the following output: + ```json { "TableList": [ @@ -203,6 +208,7 @@ Finally, you can query the table metadata that has been created by the crawler: $ awslocal glue get-tables --database-name db1 {{< / command >}} You should see the following output: + ```json { "TableList": [{ @@ -217,6 +223,7 @@ You can also query the created table partitions: $ awslocal glue get-partitions --database-name db1 --table-name table1 {{< / command >}} You should see the following output: + ```json { "Partitions": [{ @@ -236,6 +243,7 @@ You can first create the local Redshift cluster via: $ awslocal redshift create-cluster --cluster-identifier c1 --node-type dc1.large --master-username test --master-user-password test --db-name db1 {{< / command >}} The output of this command contains the endpoint address of the created Redshift database: + ```json ... "Endpoint": { @@ -286,6 +294,7 @@ $ awslocal glue create-schema --schema-name demo-schema --registry-id RegistryNa --schema-definition '{"type":"record","namespace":"Demo","name":"Person","fields":[{"name":"Name","type":"string"}]}' {{< /command >}} You should see the following output: + ```json { "RegistryName": "demo-registry", @@ -310,6 +319,7 @@ $ awslocal glue register-schema-version --schema-id SchemaName=demo-schema,Regis {{< /command >}} You should see the following output: + ```json { "SchemaVersionId": "ee38732b-b299-430d-a88b-4c429d9e1208", @@ -376,6 +386,7 @@ $ awslocal glue start-job-run --job-name job1 {{< / command >}} The execution of the Glue job can take a few moments - once the job has finished executing, you should see a log line with the query results in the LocalStack container logs, similar to the output below: + ```text 2023-10-17 12:59:20,088 INFO scheduler.DAGScheduler: Job 15 finished: collect at /private/tmp/script-90e5371e.py:28, took 0,158257 s SQL result: ['{"name":"test1","key":123}', '{"name":"test2","key":456}'] @@ -412,7 +423,6 @@ The Resource Browser allows you to perform the following actions: The following Developer Hub applications are using Glue: {{< applications service_filter="glu">}} - The following tutorials are using Glue: {{< tutorials "/tutorials/schema-evolution-glue-msk">}} diff --git a/content/en/user-guide/aws/iam/index.md b/content/en/user-guide/aws/iam/index.md index 7e3f9aecf1..b1f48086d9 100644 --- a/content/en/user-guide/aws/iam/index.md +++ b/content/en/user-guide/aws/iam/index.md @@ -8,17 +8,22 @@ persistence: supported ## Introduction -Identity and Access Management (IAM) is a web service provided by Amazon Web Services (AWS) that enables users to control access to AWS resources securely. IAM allows organizations to create and manage AWS users, groups, and roles, defining granular permissions to access specific AWS services and resources. By centralizing access control, administrators can enforce the principle of least privilege, ensuring users have only the necessary permissions for their tasks. +Identity and Access Management (IAM) is a web service provided by Amazon Web Services (AWS) that enables users to control access to AWS resources securely. +IAM allows organizations to create and manage AWS users, groups, and roles, defining granular permissions to access specific AWS services and resources. +By centralizing access control, administrators can enforce the principle of least privilege, ensuring users have only the necessary permissions for their tasks. -LocalStack allows you to use the IAM APIs in your local environment to create and manage users, groups, and roles, granting permissions that adhere to the principle of least privilege. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_iam/), which provides information on the extent of IAM's integration with LocalStack. +LocalStack allows you to use the IAM APIs in your local environment to create and manage users, groups, and roles, granting permissions that adhere to the principle of least privilege. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_iam/), which provides information on the extent of IAM's integration with LocalStack. ## Getting started This guide is designed for users new to IAM and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a new user named `test`, create an access key pair for the user, and assert that the user is recognized after the access keys are configured in the environment. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a new user named `test`, create an access key pair for the user, and assert that the user is recognized after the access keys are configured in the environment. -By default, in the absence of custom credentials configuration, all requests to LocalStack run under the administrative root user. Run the following command to use the [`GetCallerIdentity`](https://docs.aws.amazon.com/cli/latest/reference/sts/get-caller-identity.html) API to confirm that the request is running under the root user: +By default, in the absence of custom credentials configuration, all requests to LocalStack run under the administrative root user. +Run the following command to use the [`GetCallerIdentity`](https://docs.aws.amazon.com/cli/latest/reference/sts/get-caller-identity.html) API to confirm that the request is running under the root user: {{< command >}} $ awslocal sts get-caller-identity @@ -34,13 +39,15 @@ You can see an output similar to the following: } ``` -You can now create a new user named `test` using the [`CreateUser`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-user.html) API. Run the following command: +You can now create a new user named `test` using the [`CreateUser`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-user.html) API. +Run the following command: {{< command >}} $ awslocal iam create-user --user-name test {{< / command >}} -You can now create an access key pair for the user using the [`CreateAccessKey`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-access-key.html) API. Run the following command: +You can now create an access key pair for the user using the [`CreateAccessKey`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-access-key.html) API. +Run the following command: {{< command >}} $ awslocal iam create-access-key --user-name test @@ -61,7 +68,8 @@ You can see an output similar to the following: ... ``` -You can save the `AccessKeyId` and `SecretAccessKey` values, and export them in the environment to run commands under the `test` user. Run the following command: +You can save the `AccessKeyId` and `SecretAccessKey` values, and export them in the environment to run commands under the `test` user. +Run the following command: {{< command >}} $ export AWS_ACCESS_KEY_ID=LKIAQAAAAAAAGFWKCM5F AWS_SECRET_ACCESS_KEY=DUulXk2N2yD6rgoBBR9A/5iXa6dBcLyDknr925Q5 @@ -77,7 +85,8 @@ You can see that the request is now running under the `test` user. ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing IAM users, groups, and roles. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **IAM** under the **Security Identity Compliance** section. +The LocalStack Web Application provides a Resource Browser for managing IAM users, groups, and roles. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **IAM** under the **Security Identity Compliance** section. IAM Resource Browser @@ -90,7 +99,8 @@ The Resource Browser allows you to perform the following actions: ## Supported APIs -IAM security enforcement is comprehensively available for all AWS APIs in LocalStack and has undergone thorough testing across multiple services. The services that have been rigorously tested include: +IAM security enforcement is comprehensively available for all AWS APIs in LocalStack and has undergone thorough testing across multiple services. +The services that have been rigorously tested include: - ACM - API Gateway diff --git a/content/en/user-guide/aws/identitystore/index.md b/content/en/user-guide/aws/identitystore/index.md index 955d7710f1..13479673f1 100644 --- a/content/en/user-guide/aws/identitystore/index.md +++ b/content/en/user-guide/aws/identitystore/index.md @@ -7,19 +7,24 @@ tags: ["Pro image"] ## Introduction -Identity Store is a managed service that enables the creation and management of groups within your AWS environment. Groups are used to manage access to AWS resources, and Identity Store provides a central location to create and manage groups across your AWS accounts. +Identity Store is a managed service that enables the creation and management of groups within your AWS environment. +Groups are used to manage access to AWS resources, and Identity Store provides a central location to create and manage groups across your AWS accounts. -LocalStack allows you to use the Identity Store APIs to create and manage groups in your local environment. The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_identitystore/), which provides information on the extent of Identity Store integration with LocalStack. +LocalStack allows you to use the Identity Store APIs to create and manage groups in your local environment. +The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_identitystore/), which provides information on the extent of Identity Store integration with LocalStack. ## Getting started -This guide is aimed at users who are familiar with the AWS CLI and [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. It will walk you through the basics of setting up and managing groups within the AWS Identity Store using LocalStack. +This guide is aimed at users who are familiar with the AWS CLI and [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. +It will walk you through the basics of setting up and managing groups within the AWS Identity Store using LocalStack. -Start your LocalStack container using your preferred method. This guide will demonstrate how to create a group within Identity Store, list all groups, and describe a specific group. +Start your LocalStack container using your preferred method. +This guide will demonstrate how to create a group within Identity Store, list all groups, and describe a specific group. ### Create a Group in Identity Store -You can create a new group in the Identity Store using the [`CreateGroup`](https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_CreateGroup.html) API. Execute the following command to create a group with an identity store ID of `testls`: +You can create a new group in the Identity Store using the [`CreateGroup`](https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_CreateGroup.html) API. +Execute the following command to create a group with an identity store ID of `testls`: {{< command >}} $ awslocal identitystore create-group --identity-store-id testls @@ -35,7 +40,8 @@ Copy the `GroupId` value from the output, as it will be needed in subsequent ste ### List all Groups in Identity Store -After creating groups, you might want to list all groups within the Identity Store to manage or review them. Run the following command to list all groups using the [`ListGroups`](https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_ListGroups.html) API: +After creating groups, you might want to list all groups within the Identity Store to manage or review them. +Run the following command to list all groups using the [`ListGroups`](https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_ListGroups.html) API: {{< command >}} $ awslocal identitystore list-groups --identity-store-id testls @@ -56,7 +62,8 @@ This command returns a list of all groups, including the group you created in th ### Describe a Group in Identity Store -To view details about a specific group, use the [`DescribeGroup`](https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_DescribeGroup.html) API. Run the following command to describe the group you created in the previous step: +To view details about a specific group, use the [`DescribeGroup`](https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_DescribeGroup.html) API. +Run the following command to describe the group you created in the previous step: {{< command >}} $ awslocal describe-group --identity-store-id testls --group-id 38cec731-de22-45bf-9af7-b74457bba884 diff --git a/content/en/user-guide/aws/iot/index.md b/content/en/user-guide/aws/iot/index.md index 6eb0d3f9db..984c7c92d4 100644 --- a/content/en/user-guide/aws/iot/index.md +++ b/content/en/user-guide/aws/iot/index.md @@ -10,7 +10,7 @@ aliases: ## Introduction -AWS IoT provides cloud services to manage IoT fleet and integrate them with other AWS services +AWS IoT provides cloud services to manage IoT fleet and integrate them with other AWS services LocalStack Pro supports IoT Core, IoT Data, IoT Analytics and related APIs as well as an in-built MQTT broker. Common operations for creating and updating things, groups, policies, certificates and other entities are implemented with full CloudFormation support. @@ -73,7 +73,7 @@ For more information, see [this](https://docs.aws.amazon.com/iot/latest/develope {{< /callout >}} When connecting to the endpoints, you will need to provide this root CA certificate for authentication. -This is illustrated below with Python [AWS IoT SDK](https://docs.aws.amazon.com/iot/latest/developerguide/iot-sdks.html), +This is illustrated below with Python [AWS IoT SDK](https://docs.aws.amazon.com/iot/latest/developerguide/iot-sdks.html), ```py import awscrt @@ -151,7 +151,6 @@ mqtt.connect().result() mqtt.subscribe(...) ``` - ## Lifecycle Events LocalStack publishes the [lifecycle events](https://docs.aws.amazon.com/iot/latest/developerguide/life-cycle-events.html) to the standard endpoints. @@ -186,7 +185,6 @@ For example, you can use the [`CreateTopicRule`](https://docs.aws.amazon.com/iot Supported triggers include Kinesis, Lambda, SQS, Firehose and DynamoDB v2. - ## Device Shadows LocalStack supports both unnamed (classic) and named device shadows. @@ -195,7 +193,6 @@ You can use AWS CLI and [MQTT topics](https://docs.aws.amazon.com/iot/latest/dev The endpoint as returned by `DescribeEndpoint` currently does not support the [device shadow REST API](https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-rest-api.html#API_GetThingShadow) - ## Current Limitations LocalStack MQTT broker does not support multi-account/multi-region namespacing. diff --git a/content/en/user-guide/aws/iotanalytics/index.md b/content/en/user-guide/aws/iotanalytics/index.md index e7968428d6..4fe327a8ca 100644 --- a/content/en/user-guide/aws/iotanalytics/index.md +++ b/content/en/user-guide/aws/iotanalytics/index.md @@ -7,19 +7,23 @@ description: Get started with IoT Analytics on LocalStack ## Introduction -IoT Analytics is a managed service that enables you to collect, store, process, and analyze data generated by your IoT devices. It provides a set of tools to build IoT applications without having to manage the underlying infrastructure. +IoT Analytics is a managed service that enables you to collect, store, process, and analyze data generated by your IoT devices. +It provides a set of tools to build IoT applications without having to manage the underlying infrastructure. -LocalStack allows you to use the IoT Analytics APIs to create and manage channels, data stores, and pipelines in your local environment. The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_iotanalytics/), which provides information on the extent of IoT Analytics integration with LocalStack. +LocalStack allows you to use the IoT Analytics APIs to create and manage channels, data stores, and pipelines in your local environment. +The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_iotanalytics/), which provides information on the extent of IoT Analytics integration with LocalStack. ## Getting started This guide is designed for users new to Iot Analytics and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a channel, data store, and pipeline within IoT Analytics using LocalStack. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a channel, data store, and pipeline within IoT Analytics using LocalStack. ### Create a channel -You can create a channel using the [`CreateChannel`](https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_CreateChannel.html) API. Run the following command to create a channel named `mychannel`: +You can create a channel using the [`CreateChannel`](https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_CreateChannel.html) API. +Run the following command to create a channel named `mychannel`: {{< command >}} $ awslocal iotanalytics create-channel --channel-name mychannel @@ -44,7 +48,8 @@ The following output is displayed: ### Create a data store -You can create a data store using the [`CreateDatastore`](https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_CreateDatastore.html) API. Run the following command to create a data store named `mydatastore`: +You can create a data store using the [`CreateDatastore`](https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_CreateDatastore.html) API. +Run the following command to create a data store named `mydatastore`: {{< command >}} $ awslocal iotanalytics create-datastore --datastore-name mydatastore @@ -69,7 +74,8 @@ The following output is displayed: ### Create a pipeline -You can create a pipeline using the [`CreatePipeline`](https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_CreatePipeline.html) API. Run the following command to create a pipeline named `mypipeline`: +You can create a pipeline using the [`CreatePipeline`](https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_CreatePipeline.html) API. +Run the following command to create a pipeline named `mypipeline`: {{< command >}} $ awslocal iotanalytics create-pipeline --cli-input-json file://mypipeline.json diff --git a/content/en/user-guide/aws/iotwireless/index.md b/content/en/user-guide/aws/iotwireless/index.md index 0e46f18acd..7764d654db 100644 --- a/content/en/user-guide/aws/iotwireless/index.md +++ b/content/en/user-guide/aws/iotwireless/index.md @@ -7,19 +7,23 @@ tags: ["Pro image"] ## Introduction -AWS IoT Wireless is a managed service that enables customers to connect and manage wireless devices. The service provides a set of APIs to manage wireless devices, gateways, and destinations. +AWS IoT Wireless is a managed service that enables customers to connect and manage wireless devices. +The service provides a set of APIs to manage wireless devices, gateways, and destinations. -LocalStack allows you to use the IoT Wireless APIs in your local environment from creating wireless devices and gateways. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_iotwireless/), which provides information on the extent of IoT Wireless's integration with LocalStack. +LocalStack allows you to use the IoT Wireless APIs in your local environment from creating wireless devices and gateways. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_iotwireless/), which provides information on the extent of IoT Wireless's integration with LocalStack. ## Getting started This guide is designed for users new to IoT Wireless and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to use IoT Wireless to create wireless devices and gateways with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to use IoT Wireless to create wireless devices and gateways with the AWS CLI. ### Create a Wireless Device -You can create a wireless device using the [`CreateWirelessDevice`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_CreateWirelessDevice.html) API. Run the following command to create a wireless device: +You can create a wireless device using the [`CreateWirelessDevice`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_CreateWirelessDevice.html) API. +Run the following command to create a wireless device: {{< command >}} $ awslocal iotwireless create-device-profile @@ -33,7 +37,8 @@ The following output would be retrieved: } ``` -You can list the device profiles using the [`ListDeviceProfiles`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_ListDeviceProfiles.html) API. Run the following command to list the device profiles: +You can list the device profiles using the [`ListDeviceProfiles`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_ListDeviceProfiles.html) API. +Run the following command to list the device profiles: {{< command >}} $ awslocal iotwireless list-device-profiles @@ -53,7 +58,8 @@ The following output would be retrieved: ### Create a Wireless device -You can create a wireless device using the [`CreateWirelessDevice`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_CreateWirelessDevice.html) API. Run the following command to create a wireless device: +You can create a wireless device using the [`CreateWirelessDevice`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_CreateWirelessDevice.html) API. +Run the following command to create a wireless device: {{< command >}} $ awslocal iotwireless create-wireless-device \ @@ -81,7 +87,8 @@ The `input.json` file contains the following content: } ``` -You can list the wireless devices using the [`ListWirelessDevices`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_ListWirelessDevices.html) API. Run the following command to list the wireless devices: +You can list the wireless devices using the [`ListWirelessDevices`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_ListWirelessDevices.html) API. +Run the following command to list the wireless devices: {{< command >}} $ awslocal iotwireless list-wireless-devices @@ -107,7 +114,8 @@ The following output would be retrieved: ### Create a Wireless Gateway -You can create a wireless gateway using the [`CreateWirelessGateway`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_CreateWirelessGateway.html) API. Run the following command to create a wireless gateway: +You can create a wireless gateway using the [`CreateWirelessGateway`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_CreateWirelessGateway.html) API. +Run the following command to create a wireless gateway: {{< command >}} $ awslocal iotwireless create-wireless-gateway \ @@ -124,7 +132,8 @@ The following output would be retrieved: } ``` -You can list the wireless gateways using the [`ListWirelessGateways`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_ListWirelessGateways.html) API. Run the following command to list the wireless gateways: +You can list the wireless gateways using the [`ListWirelessGateways`](https://docs.aws.amazon.com/iot-wireless/2020-11-22/API_ListWirelessGateways.html) API. +Run the following command to list the wireless gateways: {{< command >}} $ awslocal iotwireless list-wireless-gateways diff --git a/content/en/user-guide/aws/kinesis/index.md b/content/en/user-guide/aws/kinesis/index.md index be639378f6..0105cc3657 100644 --- a/content/en/user-guide/aws/kinesis/index.md +++ b/content/en/user-guide/aws/kinesis/index.md @@ -8,19 +8,25 @@ persistence: supported ## Introduction -Kinesis is a platform provided by Amazon Web Services (AWS) that enables your application to ingest, buffer, and process data in real-time. Kinesis is suitable for applications that require processing and deriving insights from data streams such as logs, metrics, user interactions, and IoT sensor readings. Kinesis offers three main services: Kinesis Data Streams, Kinesis Data Firehose, and Kinesis Data Analytics. In this page, we take a look at Kinesis Data Streams which allows you to capture and store real-time data streams. +Kinesis is a platform provided by Amazon Web Services (AWS) that enables your application to ingest, buffer, and process data in real-time. +Kinesis is suitable for applications that require processing and deriving insights from data streams such as logs, metrics, user interactions, and IoT sensor readings. +Kinesis offers three main services: Kinesis Data Streams, Kinesis Data Firehose, and Kinesis Data Analytics. +In this page, we take a look at Kinesis Data Streams which allows you to capture and store real-time data streams. -LocalStack allows you to use the Kinesis APIs in your local environment from setting up data streams and configuring data processing to building real-time analytics applications. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_kinesis/), which provides information on the extent of Kinesis's integration with LocalStack. +LocalStack allows you to use the Kinesis APIs in your local environment from setting up data streams and configuring data processing to building real-time analytics applications. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_kinesis/), which provides information on the extent of Kinesis's integration with LocalStack. ## Getting started This guide is designed for users new to Kinesis and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a Lambda function to consume events from a Kinesis stream with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a Lambda function to consume events from a Kinesis stream with the AWS CLI. ### Create a Lambda function -You need to create a Lambda function that receives a Kinesis event input and processes the messages that it contains. Create a file named `index.mjs` with the following content: +You need to create a Lambda function that receives a Kinesis event input and processes the messages that it contains. +Create a file named `index.mjs` with the following content: ```javascript console.log('Loading function'); @@ -33,7 +39,8 @@ export const handler = (event, context) => { }; ``` -You can create a Lambda function using the [`CreateFunction`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) API. Run the following command to create a Lambda function named `ProcessKinesisRecords`: +You can create a Lambda function using the [`CreateFunction`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) API. +Run the following command to create a Lambda function named `ProcessKinesisRecords`: {{< command >}} $ zip function.zip index.mjs @@ -85,7 +92,9 @@ Create a file named `input.txt` with the following JSON content: } ``` -The JSON contains a sample Kinesis event. You can use the [`Invoke`](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) API to invoke the Lambda function with the Kinesis event as input. Execute the following command: +The JSON contains a sample Kinesis event. +You can use the [`Invoke`](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) API to invoke the Lambda function with the Kinesis event as input. +Execute the following command: {{< command >}} $ awslocal lambda invoke \ @@ -95,7 +104,8 @@ $ awslocal lambda invoke \ ### Create a Kinesis Stream -You can create a Kinesis Stream using the [`CreateStream`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_CreateStream.html) API. Run the following command to create a Kinesis Stream named `lambda-stream`: +You can create a Kinesis Stream using the [`CreateStream`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_CreateStream.html) API. +Run the following command to create a Kinesis Stream named `lambda-stream`: {{< command >}} $ awslocal kinesis create-stream \ @@ -103,7 +113,8 @@ $ awslocal kinesis create-stream \ --shard-count 1 {{< / command >}} -You can retrieve the Stream ARN using the [`DescribeStream`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_DescribeStream.html) API. Execute the following command: +You can retrieve the Stream ARN using the [`DescribeStream`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_DescribeStream.html) API. +Execute the following command: {{< command >}} $ awslocal kinesis describe-stream \ @@ -135,7 +146,8 @@ You can save the `StreamARN` value for later use. ### Add an Event Source in Lambda -You can add an Event Source to your Lambda function using the [`CreateEventSourceMapping`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html) API. Run the following command to add the Kinesis Stream as an Event Source to your Lambda function: +You can add an Event Source to your Lambda function using the [`CreateEventSourceMapping`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html) API. +Run the following command to add the Kinesis Stream as an Event Source to your Lambda function: {{< command >}} $ awslocal lambda create-event-source-mapping \ @@ -147,7 +159,8 @@ $ awslocal lambda create-event-source-mapping \ ### Test the Event Source mapping -You can test the event source mapping by adding a record to the Kinesis Stream using the [`PutRecord`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html) API. Run the following command to add a record to the Kinesis Stream: +You can test the event source mapping by adding a record to the Kinesis Stream using the [`PutRecord`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html) API. +Run the following command to add a record to the Kinesis Stream: {{< command >}} $ awslocal kinesis put-record \ @@ -169,7 +182,8 @@ You can fetch the CloudWatch logs for your Lambda function reading records from ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing Kinesis Streams & Kafka Clusters. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Kinesis** under the **Analytics** section. +The LocalStack Web Application provides a Resource Browser for managing Kinesis Streams & Kafka Clusters. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Kinesis** under the **Analytics** section. Kinesis Resource Browser
@@ -192,4 +206,5 @@ The following code snippets and sample applications provide practical examples o ## Current Limitations -In multi-account setups, each AWS account launches a separate instance of Kinesis Mock, which is very resource intensive when a large number of AWS accounts are used. An [open Kinesis Mock issue](https://github.com/etspaceman/kinesis-mock/issues/377) is being used to keep track of this feature. +In multi-account setups, each AWS account launches a separate instance of Kinesis Mock, which is very resource intensive when a large number of AWS accounts are used. +An [open Kinesis Mock issue](https://github.com/etspaceman/kinesis-mock/issues/377) is being used to keep track of this feature. diff --git a/content/en/user-guide/aws/kinesisanalytics/index.md b/content/en/user-guide/aws/kinesisanalytics/index.md index 1c4bd8554f..03b6377848 100644 --- a/content/en/user-guide/aws/kinesisanalytics/index.md +++ b/content/en/user-guide/aws/kinesisanalytics/index.md @@ -107,5 +107,6 @@ The following output would be retrieved: ## Current Limitations -- LocalStack supports basic emulation for the version 1 of the Kinesis Data Analytics API. However, the queries are not fully supported and lack parity with AWS. -- LocalStack supports CRUD mocking for the version 2 of the Kinesis Data Analytics API. +* LocalStack supports basic emulation for the version 1 of the Kinesis Data Analytics API. + However, the queries are not fully supported and lack parity with AWS. +* LocalStack supports CRUD mocking for the version 2 of the Kinesis Data Analytics API. diff --git a/content/en/user-guide/aws/kms/index.md b/content/en/user-guide/aws/kms/index.md index 5edbe013dd..56c0b97c7b 100644 --- a/content/en/user-guide/aws/kms/index.md +++ b/content/en/user-guide/aws/kms/index.md @@ -8,25 +8,32 @@ persistence: supported ## Introduction -Key Management Service (KMS) is a managed service that allows users to handle encryption keys within the Amazon Web Services ecosystem. KMS allows users to create, control, and utilize keys to encrypt and decrypt data, as well as to sign and verify messages. KMS allows you to create, delete, list, and update aliases, friendly names for your KMS keys, and tag them for identification and automation. You can check [the official AWS documentation](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) to understand the basic terms and concepts used in the KMS. +Key Management Service (KMS) is a managed service that allows users to handle encryption keys within the Amazon Web Services ecosystem. +KMS allows users to create, control, and utilize keys to encrypt and decrypt data, as well as to sign and verify messages. +KMS allows you to create, delete, list, and update aliases, friendly names for your KMS keys, and tag them for identification and automation. +You can check [the official AWS documentation](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) to understand the basic terms and concepts used in the KMS. -LocalStack allows you to use the KMS APIs in your local environment to create, edit, and view symmetric and asymmetric KMS keys, including HMAC keys. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_kms/), which provides information on the extent of KMS's integration with LocalStack. +LocalStack allows you to use the KMS APIs in your local environment to create, edit, and view symmetric and asymmetric KMS keys, including HMAC keys. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_kms/), which provides information on the extent of KMS's integration with LocalStack. ## Getting started This guide is designed for users new to KMS and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a simple symmetric encryption key and use it to encrypt/decrypt data. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a simple symmetric encryption key and use it to encrypt/decrypt data. ### Create a key -To generate a new key within the KMS, you can use the [`CreateKey`](https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html) API. Execute the following command to create a new key: +To generate a new key within the KMS, you can use the [`CreateKey`](https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html) API. +Execute the following command to create a new key: {{< command >}} $ awslocal kms create-key {{}} -By default, this command generates a symmetric encryption key, eliminating the need for any additional arguments. You can take a look at the `KeyId` of the freshly generated key in the output, and save it for future use. +By default, this command generates a symmetric encryption key, eliminating the need for any additional arguments. +You can take a look at the `KeyId` of the freshly generated key in the output, and save it for future use. In case the key ID is misplaced, it is possible to retrieve a comprehensive list of IDs and [Amazon Resource Names](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) (ARNs) for all available keys through the following command: @@ -42,7 +49,10 @@ $ awslocal kms describe-key --key-id ### Encrypt the data -You can now leverage the generated key for encryption purposes. For instance, let's consider encrypting "_some important stuff_". To do so, you can use the [`Encrypt`](https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html) API. Execute the following command to encrypt the data: +You can now leverage the generated key for encryption purposes. +For instance, let's consider encrypting "_some important stuff_". +To do so, you can use the [`Encrypt`](https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html) API. +Execute the following command to encrypt the data: {{< command >}} $ awslocal kms encrypt \ @@ -53,11 +63,14 @@ $ awslocal kms encrypt \ | base64 --decode > my_encrypted_data {{}} -You will notice that a new file named `my_encrypted_data` has been created in your current directory. This file contains the encrypted data, which can be decrypted using the same key. +You will notice that a new file named `my_encrypted_data` has been created in your current directory. +This file contains the encrypted data, which can be decrypted using the same key. ### Decrypt the data -To decrypt the data, you can use the [`Decrypt`](https://docs.aws.amazon.com/kms/latest/APIReference/API_Decrypt.html) API. You don't need to specify the `KEY_ID` while decrypting the file, since AWS includes the Key ID into the encrypted data. However, with asymmetric keys the `KEY_ID` has to be specified. +To decrypt the data, you can use the [`Decrypt`](https://docs.aws.amazon.com/kms/latest/APIReference/API_Decrypt.html) API. +You don't need to specify the `KEY_ID` while decrypting the file, since AWS includes the Key ID into the encrypted data. +However, with asymmetric keys the `KEY_ID` has to be specified. Execute the following command to decrypt the data: @@ -69,7 +82,9 @@ $ awslocal kms decrypt \ | base64 --decode {{}} -Similar to the previous `Encrypt` operation, to retrieve the actual data, it's necessary to decode the Base64-encoded output. To achieve this, employ the `output` and `query` parameters along with the `base64` tool as before. Upon successful execution, the output will correspond to our original text: +Similar to the previous `Encrypt` operation, to retrieve the actual data, it's necessary to decode the Base64-encoded output. +To achieve this, employ the `output` and `query` parameters along with the `base64` tool as before. +Upon successful execution, the output will correspond to our original text: ```sh some important stuff @@ -77,7 +92,8 @@ some important stuff ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing KMS keys. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **KMS** under the **Security Identity Compliance** section. +The LocalStack Web Application provides a Resource Browser for managing KMS keys. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **KMS** under the **Security Identity Compliance** section. KMS Resource Browser
@@ -92,7 +108,8 @@ The Resource Browser allows you to perform the following actions: ## Custom IDs for KMS keys via tags -You can assign custom IDs to KMS keys using the `_custom_id_` tag during key creation. This can be useful to pre-seed a test environment and use a static `KeyId` for your keys. +You can assign custom IDs to KMS keys using the `_custom_id_` tag during key creation. +This can be useful to pre-seed a test environment and use a static `KeyId` for your keys. Below is a simple example to create a key with a custom `KeyId` (note that the `KeyId` should have the format of a UUID): @@ -113,7 +130,7 @@ The following output will be displayed: ## Custom Key Material for KMS Keys via Tags -You can seed a KMS key with custom key material using the `_custom_key_material_` tag during creation. +You can seed a KMS key with custom key material using the `_custom_key_material_` tag during creation. This can be useful to pre-seed a development environment so values encrypted with KMS can be decrypted later. Here is an example of using custom key material with the value being base64 encoded: @@ -138,13 +155,16 @@ $ awslocal kms create-key --tags '[{"TagKey":"_custom_key_material_","TagValue": ### Encryption data format -In LocalStack's KMS implementation, the encryption process is uniformly symmetric, even when an asymmetric key is requested. Furthermore, LocalStack utilizes an encrypted data format distinct from that employed by AWS. +In LocalStack's KMS implementation, the encryption process is uniformly symmetric, even when an asymmetric key is requested. +Furthermore, LocalStack utilizes an encrypted data format distinct from that employed by AWS. -This could lead to decryption failures if a key is manually generated outside the local KMS environment, imported to KMS using the [ImportKeyMaterial](https://docs.aws.amazon.com/kms/latest/APIReference/API_ImportKeyMaterial.html) API, utilized for encryption within local KMS, and later decryption is attempted externally using the self-generated key. However, conventional setups are likely to function seamlessly. +This could lead to decryption failures if a key is manually generated outside the local KMS environment, imported to KMS using the [ImportKeyMaterial](https://docs.aws.amazon.com/kms/latest/APIReference/API_ImportKeyMaterial.html) API, utilized for encryption within local KMS, and later decryption is attempted externally using the self-generated key. +However, conventional setups are likely to function seamlessly. ### Key states -In AWS KMS, cryptographic keys exhibit [multiple states](https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). However, LocalStack's KMS implementation provides only a subset of these states +In AWS KMS, cryptographic keys exhibit [multiple states](https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). +However, LocalStack's KMS implementation provides only a subset of these states - `Enabled` - `Disabled` @@ -152,14 +172,19 @@ In AWS KMS, cryptographic keys exhibit [multiple states](https://docs.aws.amazon - `PendingImport` - `PendingDeletion` -### Multi-region keys +### Multi-region keys -LocalStack's KMS implementation is equipped to facilitate [multi-region keys](https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html), but there's a distinct behavior compared to AWS KMS. Unlike AWS KMS, the replication of multi-region key replicas in LocalStack KMS isn't automatically synchronized with their corresponding primary key. Consequently, adjustments made to the primary key's settings won't propagate automatically to the replica. +LocalStack's KMS implementation is equipped to facilitate [multi-region keys](https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html), but there's a distinct behavior compared to AWS KMS. +Unlike AWS KMS, the replication of multi-region key replicas in LocalStack KMS isn't automatically synchronized with their corresponding primary key. +Consequently, adjustments made to the primary key's settings won't propagate automatically to the replica. -### Key aliases +### Key aliases -While AWS KMS conveniently establishes [aliases](https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html), LocalStack follows suit by supporting these pre-configured aliases. However, it's important to note that in LocalStack, these aliases come into picture after the initial access attempt. Until that point, they are not visible. +While AWS KMS conveniently establishes [aliases](https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html), LocalStack follows suit by supporting these pre-configured aliases. +However, it's important to note that in LocalStack, these aliases come into picture after the initial access attempt. +Until that point, they are not visible. -### Key specs +### Key specs -In AWS KMS, [SM2](https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm:~:text=the%20message%20digest.-,SM2%20key%20spec%20(China%20Regions%20only),-The%20SM2%20key) is a supported key spec for asymmetric keys. However, LocalStack's KMS implementation doesn't support this key spec. +In AWS KMS, [SM2](https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm:~:text=the%20message%20digest.-,SM2%20key%20spec%20(China%20Regions%20only),-The%20SM2%20key) is a supported key spec for asymmetric keys. +However, LocalStack's KMS implementation doesn't support this key spec. diff --git a/content/en/user-guide/aws/lakeformation/index.md b/content/en/user-guide/aws/lakeformation/index.md index 68da980518..93fcd85c67 100644 --- a/content/en/user-guide/aws/lakeformation/index.md +++ b/content/en/user-guide/aws/lakeformation/index.md @@ -6,15 +6,18 @@ description: Get started with Lake Formation on LocalStack ## Introduction -Lake Formation is a managed service that allows users to build, secure, and manage data lakes. Lake Formation allows users to define and enforce fine-grained access controls, manage metadata, and discover and share data across multiple data sources. +Lake Formation is a managed service that allows users to build, secure, and manage data lakes. +Lake Formation allows users to define and enforce fine-grained access controls, manage metadata, and discover and share data across multiple data sources. -LocalStack allows you to use the Lake Formation APIs in your local environment to register resources, grant permissions, and list resources and permissions. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_lakeformation/), which provides information on the extent of Lake Formation's integration with LocalStack. +LocalStack allows you to use the Lake Formation APIs in your local environment to register resources, grant permissions, and list resources and permissions. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_lakeformation/), which provides information on the extent of Lake Formation's integration with LocalStack. ## Getting started This guide is designed for users new to Lake Formation and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to register an S3 bucket as a resource in Lake Formation, grant permissions to a user, and list the resources and permissions. +Start your LocalStack container using your preferred method. +We will demonstrate how to register an S3 bucket as a resource in Lake Formation, grant permissions to a user, and list the resources and permissions. ### Register the resource @@ -24,7 +27,8 @@ Create a new S3 bucket named `test-bucket` using the `mb` command: $ awslocal s3 mb s3://test-bucket {{}} -You can now register the S3 bucket as a resource in Lake Formation using the [`RegisterResource`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_RegisterResource.html) API. Create a file named `input.json` with the following content: +You can now register the S3 bucket as a resource in Lake Formation using the [`RegisterResource`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_RegisterResource.html) API. +Create a file named `input.json` with the following content: ```json { @@ -42,7 +46,8 @@ awslocal lakeformation register-resource \ ### List resources -You can list the registered resources using the [`ListResources`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_ListResources.html) API. Execute the following command to list the resources: +You can list the registered resources using the [`ListResources`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_ListResources.html) API. +Execute the following command to list the resources: {{< command >}} awslocal lakeformation list-resources @@ -63,7 +68,8 @@ The following output is displayed: ### Grant permissions -You can grant permissions to a user or group using the [`GrantPermissions`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_GrantPermissions.html) API. Create a file named `permissions.json` with the following content: +You can grant permissions to a user or group using the [`GrantPermissions`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_GrantPermissions.html) API. +Create a file named `permissions.json` with the following content: ```json { @@ -94,7 +100,8 @@ $ awslocal lakeformation grant-permissions \ ### List permissions -You can list the permissions granted to a user or group using the [`ListPermissions`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_ListPermissions.html) API. Execute the following command to list the permissions: +You can list the permissions granted to a user or group using the [`ListPermissions`](https://docs.aws.amazon.com/lake-formation/latest/dg/API_ListPermissions.html) API. +Execute the following command to list the permissions: {{< command >}} $ awslocal lakeformation list-permissions diff --git a/content/en/user-guide/aws/lambda/index.md b/content/en/user-guide/aws/lambda/index.md index ee87041988..30596afac7 100644 --- a/content/en/user-guide/aws/lambda/index.md +++ b/content/en/user-guide/aws/lambda/index.md @@ -13,15 +13,20 @@ persistence: supported with limitations ## Introduction -AWS Lambda is a Serverless Function as a Service (FaaS) platform that lets you run code in your preferred programming language on the AWS ecosystem. AWS Lambda automatically scales your code to meet demand and handles server provisioning, management, and maintenance. AWS Lambda allows you to break down your application into smaller, independent functions that integrate seamlessly with AWS services. +AWS Lambda is a Serverless Function as a Service (FaaS) platform that lets you run code in your preferred programming language on the AWS ecosystem. +AWS Lambda automatically scales your code to meet demand and handles server provisioning, management, and maintenance. +AWS Lambda allows you to break down your application into smaller, independent functions that integrate seamlessly with AWS services. -LocalStack allows you to use the Lambda APIs to create, deploy, and test your Lambda functions. The supported APIs are available on our [Lambda coverage page](https://docs.localstack.cloud/references/coverage/coverage_lambda/), which provides information on the extent of Lambda's integration with LocalStack. +LocalStack allows you to use the Lambda APIs to create, deploy, and test your Lambda functions. +The supported APIs are available on our [Lambda coverage page](https://docs.localstack.cloud/references/coverage/coverage_lambda/), which provides information on the extent of Lambda's integration with LocalStack. ## Getting started This guide is designed for users new to Lambda and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a Lambda function with a Function URL. With the Function URL property, you can call a Lambda Function via an HTTP API call. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a Lambda function with a Function URL. +With the Function URL property, you can call a Lambda Function via an HTTP API call. ### Create a Lambda function @@ -69,16 +74,21 @@ $ awslocal lambda create-function-url-config \ .... } {{< / command >}} -You must specify the `_custom_id_` tag **before** using the `create-function-url-config` command. After the URL configuration is set up, any modifications to the tag will not affect it. At present, custom IDs can be assigned only to the `$LATEST` version of the function. LocalStack does not yet support custom IDs for function version aliases. +You must specify the `_custom_id_` tag **before** using the `create-function-url-config` command. +After the URL configuration is set up, any modifications to the tag will not affect it. +At present, custom IDs can be assigned only to the `$LATEST` version of the function. +LocalStack does not yet support custom IDs for function version aliases. {{< /callout >}} {{< callout >}} -In the old Lambda provider, you could create a function with any arbitrary string as the role, such as `r1`. However, the new provider requires the role ARN to be in the format `arn:aws:iam::000000000000:role/lambda-role` and validates it using an appropriate regex. However, it currently does not check whether the role exists. +In the old Lambda provider, you could create a function with any arbitrary string as the role, such as `r1`. +However, the new provider requires the role ARN to be in the format `arn:aws:iam::000000000000:role/lambda-role` and validates it using an appropriate regex. However, it currently does not check whether the role exists. {{< /callout >}} ### Invoke the Function -To invoke the Lambda function, you can use the [`Invoke` API](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). Run the following command to invoke the function: +To invoke the Lambda function, you can use the [`Invoke` API](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). +Run the following command to invoke the function: {{< tabpane text=true persist=false >}} {{% tab header="AWS CLI v1" lang="shell" %}} @@ -102,7 +112,8 @@ To invoke the Lambda function, you can use the [`Invoke` API](https://docs.aws.a [Response streaming](https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html) is currently not supported, so it will still return a synchronous/full response instead. {{< /callout >}} -With the Function URL property, there is now a new way to call a Lambda Function via HTTP API call using the [`CreateFunctionURLConfig` API](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunctionUrlConfig.html). To create a URL for invoking the function, run the following command: +With the Function URL property, there is now a new way to call a Lambda Function via HTTP API call using the [`CreateFunctionURLConfig` API](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunctionUrlConfig.html). +To create a URL for invoking the function, run the following command: {{< command >}} $ awslocal lambda create-function-url-config \ @@ -110,7 +121,8 @@ $ awslocal lambda create-function-url-config \ --auth-type NONE {{< / command >}} -This will generate a HTTP URL that can be used to invoke the Lambda function. The URL will be in the format `http://.lambda-url.us-east-1.localhost.localstack.cloud:4566`. +This will generate a HTTP URL that can be used to invoke the Lambda function. +The URL will be in the format `http://.lambda-url.us-east-1.localhost.localstack.cloud:4566`. ### Trigger the Lambda function URL @@ -136,20 +148,24 @@ LocalStack now supports a new event rule engine for [Lambda event filtering](htt You can [configure]({{< ref "configuration" >}}) `EVENT_RULE_ENGINE=java` (preview) to use the AWS [event-ruler](https://github.com/aws/event-ruler), which offers better parity. {{< /callout >}} -[Lambda event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html) allows you to connect Lambda functions to other AWS services. The following event sources are supported in LocalStack: +[Lambda event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html) allows you to connect Lambda functions to other AWS services. +The following event sources are supported in LocalStack: -- [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) -- [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) -- [Managed Streaming for Apache Kafka (MSK)](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) -- [Simple Queue Service (SQS)](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) +- [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) +- [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) +- [Managed Streaming for Apache Kafka (MSK)](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) +- [Simple Queue Service (SQS)](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) ## Lambda Layers (Pro) -[Lambda layers](https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html) let you include additional code and dependencies in your Lambda functions. The LocalStack Pro image allows you to deploy Lambda Layers locally to streamline your development and testing process. The Community image also allows creating, updating, and deleting Lambda Layers, but they are not applied when invoking a Lambda function. +[Lambda layers](https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html) let you include additional code and dependencies in your Lambda functions. +The LocalStack Pro image allows you to deploy Lambda Layers locally to streamline your development and testing process. +The Community image also allows creating, updating, and deleting Lambda Layers, but they are not applied when invoking a Lambda function. ### Creating and using a Lambda Layer Locally -To create a Lambda Layer locally, you can use the [`PublishLayerVersion` API](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html) in LocalStack. Here's a simple example using Python: +To create a Lambda Layer locally, you can use the [`PublishLayerVersion` API](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html) in LocalStack. +Here's a simple example using Python: {{< command >}} $ mkdir -p /tmp/python/ @@ -176,9 +192,11 @@ $ awslocal lambda create-function \ --layers $LAYER_ARN {{< / command >}} -Here, we've defined a Lambda function called `handler()` that imports the `util()` function from our `layer1` Lambda Layer. We then used the [`CreateFunction` API](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) to create this Lambda function in LocalStack, specifying the `layer1` Lambda Layer as a dependency. +Here, we've defined a Lambda function called `handler()` that imports the `util()` function from our `layer1` Lambda Layer. +We then used the [`CreateFunction` API](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) to create this Lambda function in LocalStack, specifying the `layer1` Lambda Layer as a dependency. -To test our Lambda function and see the output from the Lambda Layer, we can invoke the function and check the logs (with `DEBUG=1` enabled). Here's an example: +To test our Lambda function and see the output from the Lambda Layer, we can invoke the function and check the logs (with `DEBUG=1` enabled). +Here's an example: ```shell > START RequestId: a8bc4ce6-e2e8-189e-cf58-c2eb72827c23 Version: $LATEST @@ -189,7 +207,8 @@ To test our Lambda function and see the output from the Lambda Layer, we can inv ### Referencing Lambda layers from AWS -If your Lambda function references a layer in real AWS, you can integrate it into your local dev environment by making it accessible to the `886468871268` AWS account ID. This account is managed by LocalStack on AWS. +If your Lambda function references a layer in real AWS, you can integrate it into your local dev environment by making it accessible to the `886468871268` AWS account ID. +This account is managed by LocalStack on AWS. To grant access to your layer, run the following command: @@ -220,8 +239,10 @@ but these configurations are primarily intended for LocalStack developers and co The LocalStack [configuration]({{< ref "configuration" >}}) `LAMBDA_DOCKER_FLAGS` can be used to configure all Lambda containers, for example `LAMBDA_DOCKER_FLAGS=-e LOCALSTACK_INIT_LOG_LEVEL=debug`. Some noteworthy configurations include: -* `LOCALSTACK_INIT_LOG_LEVEL` defines the log level of the Golang binary. Values: `trace`, `debug`, `info`, `warn` (default), `error`, `fatal`, `panic` -* `LOCALSTACK_USER` defines the system user executing the Lambda runtime. Values: `sbx_user1051` (default), `root` (skip dropping root privileges) +- `LOCALSTACK_INIT_LOG_LEVEL` defines the log level of the Golang binary. + Values: `trace`, `debug`, `info`, `warn` (default), `error`, `fatal`, `panic` +- `LOCALSTACK_USER` defines the system user executing the Lambda runtime. + Values: `sbx_user1051` (default), `root` (skip dropping root privileges) The full list of configurations is defined in the Golang function [InitLsOpts](https://github.com/localstack/lambda-runtime-init/blob/localstack/cmd/localstack/main.go#L43). @@ -230,23 +251,30 @@ The full list of configurations is defined in the Golang function LocalStack provides various tools to help you develop, debug, and test your AWS Lambda functions more efficiently. -* **Hot reloading**: With Lambda hot reloading, you can continuously apply code changes to your Lambda functions without needing to redeploy them manually. To learn more about how to use hot reloading with LocalStack, check out our [hot reloading documentation]({{< ref "hot-reloading" >}}). -* **Remote debugging**: LocalStack's remote debugging functionality allows you to attach a debugger to your Lambda function using your preferred IDE. To get started with remote debugging in LocalStack, see our [debugging documentation]({{< ref "debugging" >}}). -* **Lambda VS Code Extension**: LocalStack's Lambda VS Code Extension supports deploying and invoking Python Lambda functions through AWS SAM or AWS CloudFormation. To get started with the Lambda VS Code Extension, see our [Lambda VS Code Extension documentation]({{< ref "user-guide/lambda-tools/vscode-extension" >}}). -* **API for querying Lambda runtimes**: LocalStack offers a metadata API to query the list of Lambda runtimes via `GET http://localhost.localstack.cloud:4566/_aws/lambda/runtimes`. It returns the [Supported Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) matching AWS parity (i.e., excluding deprecated runtimes) and offers additional filters for `deprecated` runtimes and `all` runtimes (`GET /_aws/lambda/runtimes?filter=all`). +- **Hot reloading**: With Lambda hot reloading, you can continuously apply code changes to your Lambda functions without needing to redeploy them manually. + To learn more about how to use hot reloading with LocalStack, check out our [hot reloading documentation]({{< ref "hot-reloading" >}}). +- **Remote debugging**: LocalStack's remote debugging functionality allows you to attach a debugger to your Lambda function using your preferred IDE. + To get started with remote debugging in LocalStack, see our [debugging documentation]({{< ref "debugging" >}}). +- **Lambda VS Code Extension**: LocalStack's Lambda VS Code Extension supports deploying and invoking Python Lambda functions through AWS SAM or AWS CloudFormation. + To get started with the Lambda VS Code Extension, see our [Lambda VS Code Extension documentation]({{< ref "user-guide/lambda-tools/vscode-extension" >}}). +- **API for querying Lambda runtimes**: LocalStack offers a metadata API to query the list of Lambda runtimes via `GET http://localhost.localstack.cloud:4566/_aws/lambda/runtimes`. + It returns the [Supported Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) matching AWS parity (i.e., excluding deprecated runtimes) and offers additional filters for `deprecated` runtimes and `all` runtimes (`GET /_aws/lambda/runtimes?filter=all`). ## Resource Browser -The LocalStack Web Application provides a [Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) for managing Lambda resources. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Lambda** under the **Compute** section. +The LocalStack Web Application provides a [Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) for managing Lambda resources. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Lambda** under the **Compute** section. -The Resource Browser displays [Functions](https://app.localstack.cloud/resources/lambda/functions) and [Layers](https://app.localstack.cloud/resources/lambda/layers) resources. You can click on individual resources to view their details. +The Resource Browser displays [Functions](https://app.localstack.cloud/resources/lambda/functions) and [Layers](https://app.localstack.cloud/resources/lambda/layers) resources. +You can click on individual resources to view their details. Lambda Resource Browser The Resource Browser allows you to perform the following actions: - **Create Functions & Layers**: Create a new [Lambda function](https://app.localstack.cloud/resources/lambda/functions/new) or a new [Lambda Layer](https://app.localstack.cloud/resources/lambda/layers/new) by clicking on **Create API** button on top-right and creating a new configuration by clicking on **Submit** button. -- **View Function & Layer Details**: Click on any function or layer to view detailed information such as the resource's name, ARN, runtime, handler, and more. You can also navigate across different versions of the resource. +- **View Function & Layer Details**: Click on any function or layer to view detailed information such as the resource's name, ARN, runtime, handler, and more. + You can also navigate across different versions of the resource. - **Delete Functions & Layers**: To delete a function or layer, select the resource from the Resource Browser, click on the **Remove Selected** button at the top-right of the screen, and confirm the deletion by clicking on the **Continue** button. ## Migrating to Lambda v2 @@ -258,34 +286,56 @@ The legacy Lambda implementation has been removed since LocalStack 3.0 (Doc As part of the [LocalStack 2.0 release](https://discuss.localstack.cloud/t/new-lambda-implementation-in-localstack-2-0/258), the Lambda provider has been migrated to `v2` (formerly known as `asf`). With the new implementation, the following changes have been introduced: -- To run Lambda functions in LocalStack, mount the Docker socket into the LocalStack container. Add the following Docker volume mount to your LocalStack startup configuration: `/var/run/docker.sock:/var/run/docker.sock`. You can find an example of this configuration in our official [`docker-compose.yml` file](https://docs.localstack.cloud/getting-started/installation/#starting-localstack-with-docker-compose). -- The `v2` provider discontinues Lambda Executor Modes such as `LAMBDA_EXECUTOR=local`. Previously, this mode was used as a fallback when the Docker socket was unavailable in the LocalStack container, but many users unintentionally used it instead of the configured `LAMBDA_EXECUTOR=docker`. The new provider now behaves similarly to the old `docker-reuse` executor and does not require such configuration. -- The Lambda containers are now reused between invocations. The changes made to the filesystem (such as in `/tmp`) will persist between subsequent invocations if the function is dispatched to the same container. This is known as a **warm start** (see [Operating Lambda](https://aws.amazon.com/blogs/compute/operating-lambda-performance-optimization-part-1/) for more information). To ensure that each invocation starts with a fresh container, you can set the `LAMBDA_KEEPALIVE_MS` configuration option to 0 milliseconds, to force **cold starts**. -- The platform uses [official Docker base images](https://docs.aws.amazon.com/lambda/latest/dg/runtimes-images.html) pulled from `public.ecr.aws/lambda/`, instead of `lambci`, and supports both `arm64` and `x86_64` architectures. The Lambda functions filesystem now matches the AWS Lambda production environment. The ARM containers for compatible runtimes are based on Amazon Linux 2, and ARM-compatible hosts can create functions with the `arm64` architecture. -- Lambda functions in LocalStack resolve AWS domains, such as `s3.amazonaws.com`, to the LocalStack container. This domain resolution is DNS-based and can be disabled by setting `DNS_ADDRESS=0`. For more information, refer to [Transparent Endpoint Injection]({{< ref "user-guide/tools/transparent-endpoint-injection" >}}). Previously, LocalStack provided patched AWS SDKs to redirect AWS API calls transparently to LocalStack. -- The new provider may generate more exceptions due to invalid input. For instance, while the old provider accepted arbitrary strings (such as `r1`) as Lambda roles when creating a function, the new provider validates role ARNs using a regular expression that requires them to be in the format `arn:aws:iam::000000000000:role/lambda-role`. However, it currently does not verify whether the role actually exists. -- The new Lambda provider now follows the [AWS Lambda state model](https://aws.amazon.com/blogs/compute/tracking-the-state-of-lambda-functions/), while creating and updating Lambda functions, which allows for asynchronous processing. Functions are always created in the `Pending state` and move to `Active` once they are ready to accept invocations. Previously, the functions were created synchronously by blocking until the function state was active. The configuration `LAMBDA_SYNCHRONOUS_CREATE=1` can force synchronous function creation, but it is not recommended. -- LocalStack's Lambda implementation, allows you to customize the Lambda execution environment using the [Lambda Extensions API](https://docs.aws.amazon.com/lambda/latest/dg/runtimes-extensions-api.html). This API allows for advanced monitoring, observability, or developer tooling, providing greater control and flexibility over your Lambda functions. Lambda functions can also be run on hosts with [multi-architecture support](https://docs.localstack.cloud/references/arm64-support/#lambda-multi-architecture-support), allowing you to leverage LocalStack's Lambda API to develop and test Lambda functions with high parity. +- To run Lambda functions in LocalStack, mount the Docker socket into the LocalStack container. + Add the following Docker volume mount to your LocalStack startup configuration: `/var/run/docker.sock:/var/run/docker.sock`. + You can find an example of this configuration in our official [`docker-compose.yml` file](https://docs.localstack.cloud/getting-started/installation/#starting-localstack-with-docker-compose). +- The `v2` provider discontinues Lambda Executor Modes such as `LAMBDA_EXECUTOR=local`. + Previously, this mode was used as a fallback when the Docker socket was unavailable in the LocalStack container, but many users unintentionally used it instead of the configured `LAMBDA_EXECUTOR=docker`. + The new provider now behaves similarly to the old `docker-reuse` executor and does not require such configuration. +- The Lambda containers are now reused between invocations. + The changes made to the filesystem (such as in `/tmp`) will persist between subsequent invocations if the function is dispatched to the same container. + This is known as a **warm start** (see [Operating Lambda](https://aws.amazon.com/blogs/compute/operating-lambda-performance-optimization-part-1/) for more information). + To ensure that each invocation starts with a fresh container, you can set the `LAMBDA_KEEPALIVE_MS` configuration option to 0 milliseconds, to force **cold starts**. +- The platform uses [official Docker base images](https://docs.aws.amazon.com/lambda/latest/dg/runtimes-images.html) pulled from `public.ecr.aws/lambda/`, instead of `lambci`, and supports both `arm64` and `x86_64` architectures. + The Lambda functions filesystem now matches the AWS Lambda production environment. + The ARM containers for compatible runtimes are based on Amazon Linux 2, and ARM-compatible hosts can create functions with the `arm64` architecture. +- Lambda functions in LocalStack resolve AWS domains, such as `s3.amazonaws.com`, to the LocalStack container. + This domain resolution is DNS-based and can be disabled by setting `DNS_ADDRESS=0`. + For more information, refer to [Transparent Endpoint Injection]({{< ref "user-guide/tools/transparent-endpoint-injection" >}}). + Previously, LocalStack provided patched AWS SDKs to redirect AWS API calls transparently to LocalStack. +- The new provider may generate more exceptions due to invalid input. + For instance, while the old provider accepted arbitrary strings (such as `r1`) as Lambda roles when creating a function, the new provider validates role ARNs using a regular expression that requires them to be in the format `arn:aws:iam::000000000000:role/lambda-role`. + However, it currently does not verify whether the role actually exists. +- The new Lambda provider now follows the [AWS Lambda state model](https://aws.amazon.com/blogs/compute/tracking-the-state-of-lambda-functions/), while creating and updating Lambda functions, which allows for asynchronous processing. + Functions are always created in the `Pending state` and move to `Active` once they are ready to accept invocations. + Previously, the functions were created synchronously by blocking until the function state was active. + The configuration `LAMBDA_SYNCHRONOUS_CREATE=1` can force synchronous function creation, but it is not recommended. +- LocalStack's Lambda implementation, allows you to customize the Lambda execution environment using the [Lambda Extensions API](https://docs.aws.amazon.com/lambda/latest/dg/runtimes-extensions-api.html). + This API allows for advanced monitoring, observability, or developer tooling, providing greater control and flexibility over your Lambda functions. + Lambda functions can also be run on hosts with [multi-architecture support](https://docs.localstack.cloud/references/arm64-support/#lambda-multi-architecture-support), allowing you to leverage LocalStack's Lambda API to develop and test Lambda functions with high parity. The following configuration options from the old provider are discontinued in the new provider: -* The `LAMBDA_EXECUTOR` and specifically, the `LAMBDA_EXECUTOR=local` options are no longer supported. -* The `LAMBDA_STAY_OPEN_MODE` is now the default behavior and can be removed. Instead, use the `LAMBDA_KEEPALIVE_MS` option to configure how long containers should be kept running in between invocations. -* The `LAMBDA_REMOTE_DOCKER` option is not used anymore since the new provider automatically copies zip files and configures hot reloading. -* The `LAMBDA_CODE_EXTRACT_TIME` option is no longer used because function creation is now asynchronous. -* The `LAMBDA_FALLBACK_URL`, `SYNCHRONOUS_KINESIS_EVENTS`, `SYNCHRONOUS_SNS_EVENTS` and `LAMBDA_FORWARD_URL` options are currently not supported. -* The `LAMBDA_CONTAINER_REGISTRY` option is not used anymore. Instead, use the more flexible `LAMBDA_RUNTIME_IMAGE_MAPPING` option to customize individual runtimes. -* The `LAMBDA_XRAY_INIT` option is no longer needed because the X-Ray daemon is always initialized. +- The `LAMBDA_EXECUTOR` and specifically, the `LAMBDA_EXECUTOR=local` options are no longer supported. +- The `LAMBDA_STAY_OPEN_MODE` is now the default behavior and can be removed. + Instead, use the `LAMBDA_KEEPALIVE_MS` option to configure how long containers should be kept running in between invocations. +- The `LAMBDA_REMOTE_DOCKER` option is not used anymore since the new provider automatically copies zip files and configures hot reloading. +- The `LAMBDA_CODE_EXTRACT_TIME` option is no longer used because function creation is now asynchronous. +- The `LAMBDA_FALLBACK_URL`, `SYNCHRONOUS_KINESIS_EVENTS`, `SYNCHRONOUS_SNS_EVENTS` and `LAMBDA_FORWARD_URL` options are currently not supported. +- The `LAMBDA_CONTAINER_REGISTRY` option is not used anymore. + Instead, use the more flexible `LAMBDA_RUNTIME_IMAGE_MAPPING` option to customize individual runtimes. +- The `LAMBDA_XRAY_INIT` option is no longer needed because the X-Ray daemon is always initialized. However, the new provider still supports the following configuration options: -* The `BUCKET_MARKER_LOCAL` option has a new default value, `hot-reload`. The former default value `__local__` is an invalid bucket name. -* The `LAMBDA_TRUNCATE_STDOUT` option. -* The `LAMBDA_DOCKER_NETWORK` option. -* The `LAMBDA_DOCKER_FLAGS` option. -* The `LAMBDA_REMOVE_CONTAINERS` option. -* The `LAMBDA_DOCKER_DNS` option since LocalStack 2.2. -* The `HOSTNAME_FROM_LAMBDA` option since LocalStack 3.0. +- The `BUCKET_MARKER_LOCAL` option has a new default value, `hot-reload`. + The former default value `__local__` is an invalid bucket name. +- The `LAMBDA_TRUNCATE_STDOUT` option. +- The `LAMBDA_DOCKER_NETWORK` option. +- The `LAMBDA_DOCKER_FLAGS` option. +- The `LAMBDA_REMOVE_CONTAINERS` option. +- The `LAMBDA_DOCKER_DNS` option since LocalStack 2.2. +- The `HOSTNAME_FROM_LAMBDA` option since LocalStack 3.0. ## Examples @@ -303,29 +353,35 @@ The following code snippets and sample applications provide practical examples o ### Docker not available -In the old Lambda provider, Lambda functions were executed within the LocalStack container using the local executor mode. This mode was used as a fallback if the Docker socket was unavailable in the LocalStack container. However, many users inadvertently used the local executor mode instead of the intended Docker executor mode, which caused unexpected behavior. +In the old Lambda provider, Lambda functions were executed within the LocalStack container using the local executor mode. +This mode was used as a fallback if the Docker socket was unavailable in the LocalStack container. +However, many users inadvertently used the local executor mode instead of the intended Docker executor mode, which caused unexpected behavior. If you encounter the following error message, you may be using the local executor mode: {{< tabpane lang="bash" >}} {{< tab header="LocalStack Logs" lang="shell" >}} -Lambda 'arn:aws:lambda:us-east-1:000000000000:function:my-function:$LATEST' changed to failed. Reason: Docker not available +Lambda 'arn:aws:lambda:us-east-1:000000000000:function:my-function:$LATEST' changed to failed. +Reason: Docker not available ... raise DockerNotAvailable("Docker not available") {{< /tab >}} {{< tab header="AWS CLI" lang="shell" >}} -An error occurred (ResourceConflictException) when calling the Invoke operation (reached max retries: 0): The operation cannot be performed at this time. The function is currently in the following state: Failed +An error occurred (ResourceConflictException) when calling the Invoke operation (reached max retries: 0): The operation cannot be performed at this time. +The function is currently in the following state: Failed {{< /tab >}} {{< tab header="SAM" lang="shell" >}} Error: Failed to create/update the stack: sam-app, Waiter StackCreateComplete failed: Waiter encountered a terminal failure state: For expression "Stacks[].StackStatus" we matched expected path: "CREATE_FAILED" at least once {{< /tab >}} {{< /tabpane >}} -To fix this issue, add the Docker volume mount `/var/run/docker.sock:/var/run/docker.sock` to your LocalStack startup. Refer to our [sample `docker-compose.yml` file](https://github.com/localstack/localstack/blob/master/docker-compose.yml) as an example. +To fix this issue, add the Docker volume mount `/var/run/docker.sock:/var/run/docker.sock` to your LocalStack startup. +Refer to our [sample `docker-compose.yml` file](https://github.com/localstack/localstack/blob/master/docker-compose.yml) as an example. ### Function in Pending state -If you receive a `ResourceConflictException` when trying to invoke a function, it is currently in a `Pending` state and cannot be executed yet. To wait until the function becomes `active`, you can use the following command: +If you receive a `ResourceConflictException` when trying to invoke a function, it is currently in a `Pending` state and cannot be executed yet. +To wait until the function becomes `active`, you can use the following command: {{< command >}} $ awslocal lambda get-function --function-name my-function @@ -363,8 +419,10 @@ $ awslocal lambda get-function --function-name my-function } {{< / command >}} -If the function is still in the `Pending` state, the output will include a `"State": "Pending"` field and a `"StateReason": "The function is being created."` message. Once the function is active, the `"State"` field will change to `"Active"` and the `"LastUpdateStatus"` field will indicate the status of the last update. +If the function is still in the `Pending` state, the output will include a `"State": "Pending"` field and a `"StateReason": "The function is being created."` message. +Once the function is active, the `"State"` field will change to `"Active"` and the `"LastUpdateStatus"` field will indicate the status of the last update. ### Not implemented error -If you are using LocalStack versions prior to 2.0, and encounter a `NotImplementedError` in the LocalStack logs and an `InternalFailure (501) error` in the client while creating a Lambda function using the [`CreateFunction` API](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html), check your `PROVIDER_OVERRIDE_LAMBDA` configuration. You might encounter this error if it is set to `legacy`. +If you are using LocalStack versions prior to 2.0, and encounter a `NotImplementedError` in the LocalStack logs and an `InternalFailure (501) error` in the client while creating a Lambda function using the [`CreateFunction` API](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html), check your `PROVIDER_OVERRIDE_LAMBDA` configuration. +You might encounter this error if it is set to `legacy`. diff --git a/content/en/user-guide/aws/logs/index.md b/content/en/user-guide/aws/logs/index.md index 0c02143a92..613af3e1ca 100644 --- a/content/en/user-guide/aws/logs/index.md +++ b/content/en/user-guide/aws/logs/index.md @@ -8,24 +8,31 @@ persistence: supported --- -[CloudWatch Logs](https://docs.aws.amazon.com/cloudwatch/index.html) allows to store and retrieve logs. While some services automatically create and write logs (e.g. Lambda), logs can also be added manually. -CloudWatch Logs is available in the Community version. However, some specific features are only available in Pro. +[CloudWatch Logs](https://docs.aws.amazon.com/cloudwatch/index.html) allows to store and retrieve logs. +While some services automatically create and write logs (e.g. Lambda), logs can also be added manually. +CloudWatch Logs is available in the Community version. +However, some specific features are only available in Pro. ## Subscription Filters -Subscription filters can be used to forward logs to certain services, e.g. Kinesis, Lambda, and Kinesis Data Firehose. You can read upon details in the [official AWS docs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html). + +Subscription filters can be used to forward logs to certain services, e.g. Kinesis, Lambda, and Kinesis Data Firehose. +You can read upon details in the [official AWS docs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html). ### Subscription Filters with Kinesis Example + In the following we setup a little example on how to use subscription filters with kinesis. -First, we setup the required resources. Therefore, we create a kinesis stream, a log group and log stream. Then we can configure the subscription filter. +First, we setup the required resources. +Therefore, we create a kinesis stream, a log group and log stream. +Then we can configure the subscription filter. {{< command >}} $ awslocal kinesis create-stream --stream-name "logtest" --shard-count 1 $ kinesis_arn=$(awslocal kinesis describe-stream --stream-name "logtest" | jq -r .StreamDescription.StreamARN) $ awslocal logs create-log-group --log-group-name test $ awslocal logs create-log-stream \ - --log-group-name test \ - --log-stream-name test + --log-group-name test \ + --log-stream-name test $ awslocal logs put-subscription-filter \ --log-group-name "test" \ @@ -41,7 +48,9 @@ $ timestamp=$(($(date +'%s * 1000 + %-N / 1000000'))) $ awslocal logs put-log-events --log-group-name test --log-stream-name test --log-events "[{\"timestamp\": ${timestamp} , \"message\": \"hello from cloudwatch\"}]" {{< / command >}} -Now we can retrieve the data. In our example, there will only be one record. The data record is base64 encoded and compressed in gzip format: +Now we can retrieve the data. +In our example, there will only be one record. +The data record is base64 encoded and compressed in gzip format: {{< command >}} $ shard_iterator=$(awslocal kinesis get-shard-iterator --stream-name logtest --shard-id shardId-000000000000 --shard-iterator-type TRIM_HORIZON | jq -r .ShardIterator) $ record=$(awslocal kinesis get-records --limit 10 --shard-iterator $shard_iterator | jq -r '.Records[0].Data') @@ -49,20 +58,22 @@ $ echo $record | base64 -d | zcat {{< / command >}} ## Filter Pattern (Pro only) -[Filter patterns](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html) can be used to select certain logs only. + +[Filter patterns](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html) can be used to select certain logs only. LocalStack currently supports simple json-property filter. ### Metric Filter Example -Metric filters can be used to automatically create CloudWatch metrics. -In the following example we are interested in logs that include a key-value pair `"foo": "bar"` and create a metric filter. +Metric filters can be used to automatically create CloudWatch metrics. + +In the following example we are interested in logs that include a key-value pair `"foo": "bar"` and create a metric filter. {{< command >}} $ awslocal logs create-log-group --log-group-name test-filter $ awslocal logs create-log-stream \ - --log-group-name test-filter \ - --log-stream-name test-filter-stream + --log-group-name test-filter \ + --log-stream-name test-filter-stream $ awslocal logs put-metric-filter \ --log-group-name test-filter \ @@ -92,11 +103,13 @@ awslocal cloudwatch get-metric-statistics --namespace MyNamespace \ {{< / command >}} ### Filter Log Events + Similarly, you can use filter-pattern to filter logs with different kinds of patterns as described by [AWS](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). #### JSON Filter Pattern + For purely JSON structured log messages, you can use JSON filter patterns to traverse the JSON object. -Enclose your pattern in curly braces, like this: +Enclose your pattern in curly braces, like this: {{< command >}} $ awslocal logs filter-log-events --log-group-name test-filter --filter-pattern "{$.foo = \"bar\"}" {{< / command >}} @@ -104,6 +117,7 @@ $ awslocal logs filter-log-events --log-group-name test-filter --filter-pattern This returns all events whose top level "foo" key has the "bar" value. #### Regular Expression Filter Pattern + You can use a simplified regex syntax for regular expression matching. Enclose your pattern in percentage signs like this: {{< command >}} @@ -113,6 +127,7 @@ This returns all events containing "Foo" or "foo". For a complete set of the supported syntax, check [the official AWS documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html#regex-expressions) #### Unstructured Filter Pattern + If not specified otherwise in the pattern, we look for a match in the whole event message: {{< command >}} $ awslocal logs filter-log-events --log-group-name test-filter --filter-pattern "foo" @@ -120,7 +135,8 @@ $ awslocal logs filter-log-events --log-group-name test-filter --filter-pattern ## Resource Browser -The LocalStack Web Application provides a Resource Browser for exploring CloudWatch Logs. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **CloudWatch Logs** under the **Management/Governance** section. +The LocalStack Web Application provides a Resource Browser for exploring CloudWatch Logs. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **CloudWatch Logs** under the **Management/Governance** section. CloudWatch Logs Resource Browser
diff --git a/content/en/user-guide/aws/managedblockchain/index.md b/content/en/user-guide/aws/managedblockchain/index.md index da82de8ae2..dacc682ff9 100644 --- a/content/en/user-guide/aws/managedblockchain/index.md +++ b/content/en/user-guide/aws/managedblockchain/index.md @@ -6,19 +6,23 @@ description: > tags: ["Pro image"] --- -Managed Blockchain (AMB) is a managed service that enables the creation and management of blockchain networks, such as Hyperledger Fabric, Bitcoin, Polygon and Ethereum. Blockchain enables the development of applications in which multiple entities can conduct transactions and exchange data securely and transparently, eliminating the requirement for a central, trusted authority. +Managed Blockchain (AMB) is a managed service that enables the creation and management of blockchain networks, such as Hyperledger Fabric, Bitcoin, Polygon and Ethereum. +Blockchain enables the development of applications in which multiple entities can conduct transactions and exchange data securely and transparently, eliminating the requirement for a central, trusted authority. -LocalStack allows you to use the AMB APIs to develop and deploy decentralized applications in your local environment. The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_managedblockchain/), which provides information on the extent of AMB integration with LocalStack. +LocalStack allows you to use the AMB APIs to develop and deploy decentralized applications in your local environment. +The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_managedblockchain/), which provides information on the extent of AMB integration with LocalStack. ## Getting started This guide is designed for users new to AMB and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a blockchain network, a node, and a proposal. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a blockchain network, a node, and a proposal. ### Create a blockchain network -You can create a blockchain network using the [`CreateNetwork`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateNetwork.html) API. Run the following command to create a network named `OurBlockchainNet` which uses the Hyperledger Fabric with the following configuration: +You can create a blockchain network using the [`CreateNetwork`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateNetwork.html) API. +Run the following command to create a network named `OurBlockchainNet` which uses the Hyperledger Fabric with the following configuration: {{< command >}} $ awslocal managedblockchain create-network \ @@ -59,7 +63,7 @@ $ awslocal managedblockchain create-network \ } } }' - + { "NetworkId": "n-X24AF1AK2GC6MDW11HYW5I5DQC", "MemberId": "m-6VWBWHP2Y15F7TQ2DS093RTCW2" @@ -71,7 +75,8 @@ Copy the `NetworkId` and `MemberId` values from the output of the above command, ### Create a node -You can create a node using the [`CreateNode`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateNode.html) API. Run the following command to create a node with the following configuration: +You can create a node using the [`CreateNode`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateNode.html) API. +Run the following command to create a node with the following configuration: {{< command >}} $ awslocal managedblockchain create-node \ @@ -96,7 +101,7 @@ $ awslocal managedblockchain create-node \ --network-id n-X24AF1AK2GC6MDW11HYW5I5DQC \ --member-id m-6VWBWHP2Y15F7TQ2DS093RTCW2 -{ +{ "NodeId": "nd-77K8AI0O5BEQD1IW4L8OGKMXV7" } @@ -106,7 +111,8 @@ Replace the `NetworkId` and `MemberId` values in the above command with the valu ### Create a proposal -You can create a proposal using the [`CreateProposal`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateProposal.html) API. Run the following command to create a proposal with the following configuration: +You can create a proposal using the [`CreateProposal`](https://docs.aws.amazon.com/managed-blockchain/latest/APIReference/API_CreateProposal.html) API. +Run the following command to create a proposal with the following configuration: {{< command >}} $ awslocal managedblockchain create-proposal \ diff --git a/content/en/user-guide/aws/mediastore/index.md b/content/en/user-guide/aws/mediastore/index.md index 29c38c9d5d..401a55c3a9 100644 --- a/content/en/user-guide/aws/mediastore/index.md +++ b/content/en/user-guide/aws/mediastore/index.md @@ -7,8 +7,8 @@ tags: ["Pro image"] ## Introduction -MediaStore is a scalable and highly available object storage service designed specifically for media content. -It provides a reliable way to store, manage, and serve media assets, such as audio, video, and images, with low latency and high performance. +MediaStore is a scalable and highly available object storage service designed specifically for media content. +It provides a reliable way to store, manage, and serve media assets, such as audio, video, and images, with low latency and high performance. MediaStore seamlessly integrates with other AWS services like Elemental MediaConvert, Elemental MediaLive, Elemental MediaPackage, and CloudFront. LocalStack allows you to use the Elemental MediaStore APIs as a high-performance storage solution for media content in your local environment. @@ -18,11 +18,12 @@ The supported APIs are available on our [API Coverage Page](https://docs.localst This guide is designed for users new to Elemental MediaStore and assumes basic knowledge of the AWS CLI and our `awslocal` wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a MediaStore container, upload an asset, and download the asset. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a MediaStore container, upload an asset, and download the asset. ### Create a container -You can create a container using the [`CreateContainer`](https://docs.aws.amazon.com/mediastore/latest/apireference/API_CreateContainer.html) API. +You can create a container using the [`CreateContainer`](https://docs.aws.amazon.com/mediastore/latest/apireference/API_CreateContainer.html) API. Run the following command to create a container and retrieve the the `Endpoint` value which should be used in subsequent requests: {{< command >}} @@ -44,9 +45,9 @@ You should see the following output: ### Upload an asset -To upload a file named `myfile.txt` to the container, utilize the [`PutObject`](https://docs.aws.amazon.com/mediastore/latest/apireference/API_PutObject.html) API. -This action will transfer the file to the specified path, `/myfolder/myfile.txt`, within the container. -Provide the `endpoint` obtained in the previous step for the operation to be successful. +To upload a file named `myfile.txt` to the container, utilize the [`PutObject`](https://docs.aws.amazon.com/mediastore/latest/apireference/API_PutObject.html) API. +This action will transfer the file to the specified path, `/myfolder/myfile.txt`, within the container. +Provide the `endpoint` obtained in the previous step for the operation to be successful. Run the following command to upload the file: {{< command >}} @@ -68,9 +69,9 @@ You should see the following output: ### Download an asset -To retrieve the file from the container, utilize the [`GetObject`](https://docs.aws.amazon.com/mediastore/latest/apireference/API_GetObject.html) API. -In this process, you need to specify the endpoint, the path for downloading the file, and the location where the output file, such as `/tmp/out.txt`, will be stored. -The downloaded file will then be accessible at the specified output path. +To retrieve the file from the container, utilize the [`GetObject`](https://docs.aws.amazon.com/mediastore/latest/apireference/API_GetObject.html) API. +In this process, you need to specify the endpoint, the path for downloading the file, and the location where the output file, such as `/tmp/out.txt`, will be stored. +The downloaded file will then be accessible at the specified output path. Run the following command to download the file: {{< command >}} @@ -94,4 +95,5 @@ You should see the following output: ## Troubleshooting -The Elemental MediaStore service requires the use of a custom HTTP/HTTPS endpoint. In case you encounter any issues, please consult our [Networking documentation]({{< ref "references/network-troubleshooting" >}}) for assistance. +The Elemental MediaStore service requires the use of a custom HTTP/HTTPS endpoint. +In case you encounter any issues, please consult our [Networking documentation]({{< ref "references/network-troubleshooting" >}}) for assistance. diff --git a/content/en/user-guide/aws/memorydb/index.md b/content/en/user-guide/aws/memorydb/index.md index baa9d1278b..953c35e776 100644 --- a/content/en/user-guide/aws/memorydb/index.md +++ b/content/en/user-guide/aws/memorydb/index.md @@ -13,26 +13,30 @@ aliases: MemoryDB is a fully managed, Redis-compatible, in-memory database tailored for workloads demanding ultra-fast, primary database functionality. It streamlines the deployment and management of in-memory databases within the AWS cloud environment, acting as a replacement for using a cache in front of a database for improved durability and performance. -LocalStack's Pro offering contains support for the main MemoryDB APIs surrounding cluster creation, allowing developers to utilize the MemoryDB functionalities in their local development environment. The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_memorydb/), which provides information on the extent of MemoryDB's integration with LocalStack. +LocalStack's Pro offering contains support for the main MemoryDB APIs surrounding cluster creation, allowing developers to utilize the MemoryDB functionalities in their local development environment. +The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_memorydb/), which provides information on the extent of MemoryDB's integration with LocalStack. ## Getting started This guide is designed for users new to MemoryDB and assumes basic knowledge of the AWS CLI and our `awslocal` wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a MemoryDB cluster and connect to it. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a MemoryDB cluster and connect to it. ### Basic cluster creation -You can create a MemoryDB cluster using the [`CreateCluster`](https://docs.aws.amazon.com/memorydb/latest/APIReference/API_CreateCluster.html) API. Run the following command to create a cluster: +You can create a MemoryDB cluster using the [`CreateCluster`](https://docs.aws.amazon.com/memorydb/latest/APIReference/API_CreateCluster.html) API. +Run the following command to create a cluster: {{< command >}} $ awslocal memorydb create-cluster \ --cluster-name my-redis-cluster \ --node-type db.t4g.small \ - --acl-name open-access + --acl-name open-access {{< /command>}} -Once it becomes available, you will be able to use the cluster endpoint for Redis operations. Run the following command to retrieve the cluster endpoint using the [`DescribeClusters`](https://docs.aws.amazon.com/memorydb/latest/APIReference/API_DescribeClusters.html) API: +Once it becomes available, you will be able to use the cluster endpoint for Redis operations. +Run the following command to retrieve the cluster endpoint using the [`DescribeClusters`](https://docs.aws.amazon.com/memorydb/latest/APIReference/API_DescribeClusters.html) API: {{< command >}} $ awslocal memorydb describe-clusters --query "Clusters[0].ClusterEndpoint" @@ -63,13 +67,15 @@ $ redis-cli -c -p 4510 cluster nodes ## Container mode -To start Redis clusters of a specific version, enable container mode for Redis-based services in LocalStack. -This approach directs LocalStack to launch Redis instances in distinct containers, utilizing your chosen image tag. -Additionally, container mode is beneficial for independently examining the logs of each Redis instance. To activate this, set the `REDIS_CONTAINER_MODE` configuration variable to `1`. +To start Redis clusters of a specific version, enable container mode for Redis-based services in LocalStack. +This approach directs LocalStack to launch Redis instances in distinct containers, utilizing your chosen image tag. +Additionally, container mode is beneficial for independently examining the logs of each Redis instance. +To activate this, set the `REDIS_CONTAINER_MODE` configuration variable to `1`. ## Current Limitations -LocalStack's emulation support for MemoryDB primarily focuses on the creation and termination of Redis servers in cluster mode. Essential resources for running a cluster, such as parameter groups, security groups, and subnet groups, are mocked but have no effect on the Redis servers' operation. +LocalStack's emulation support for MemoryDB primarily focuses on the creation and termination of Redis servers in cluster mode. +Essential resources for running a cluster, such as parameter groups, security groups, and subnet groups, are mocked but have no effect on the Redis servers' operation. LocalStack currently doesn't support MemoryDB snapshots, failovers, users/passwords, service updates, replication scaling, SSL, migrations, service integration (like CloudWatch/Kinesis log delivery, SNS notifications) or tests. diff --git a/content/en/user-guide/aws/mq/index.md b/content/en/user-guide/aws/mq/index.md index b21772e853..4c1b4a2aaf 100644 --- a/content/en/user-guide/aws/mq/index.md +++ b/content/en/user-guide/aws/mq/index.md @@ -7,19 +7,24 @@ tags: ["Pro image"] ## Introduction -MQ is a managed message broker service offered by Amazon Web Services (AWS). It facilitates the exchange of messages between various components of distributed applications, enabling reliable and scalable communication. AWS MQ supports popular messaging protocols like MQTT, AMQP, and STOMP, making it suitable for a wide range of messaging use cases. +MQ is a managed message broker service offered by Amazon Web Services (AWS). +It facilitates the exchange of messages between various components of distributed applications, enabling reliable and scalable communication. +AWS MQ supports popular messaging protocols like MQTT, AMQP, and STOMP, making it suitable for a wide range of messaging use cases. -LocalStack allows you to use the MQ APIs to implement pub/sub messaging, request/response patterns, or distributed event-driven architectures in your local environment. The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_mq/), which provides information on the extent of MQ integration with LocalStack. +LocalStack allows you to use the MQ APIs to implement pub/sub messaging, request/response patterns, or distributed event-driven architectures in your local environment. +The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_mq/), which provides information on the extent of MQ integration with LocalStack. ## Getting started This guide is designed for users new to MQ and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create an MQ broker and send a message to a sample queue. +Start your LocalStack container using your preferred method. +We will demonstrate how to create an MQ broker and send a message to a sample queue. ### Create a broker -You can create a broker using the [`CreateBroker`](https://docs.aws.amazon.com/amazon-mq/latest/api-reference/brokers.html#brokerspost) API. Run the following command to create a broker named `test-broker` with the following configuration: +You can create a broker using the [`CreateBroker`](https://docs.aws.amazon.com/amazon-mq/latest/api-reference/brokers.html#brokerspost) API. +Run the following command to create a broker named `test-broker` with the following configuration: {{< command >}} $ awslocal mq create-broker \ @@ -41,11 +46,12 @@ $ awslocal mq create-broker \ ### Describe the broker -You can use the [`DescribeBroker`](https://docs.aws.amazon.com/amazon-mq/latest/api-reference/brokers.html#brokersget) API to get more detailed information about the broker. Run the following command to get information about the broker we created above: +You can use the [`DescribeBroker`](https://docs.aws.amazon.com/amazon-mq/latest/api-reference/brokers.html#brokersget) API to get more detailed information about the broker. +Run the following command to get information about the broker we created above: {{< command >}} $ awslocal mq describe-broker --broker-id - + b-f503abb7-66bc-47fb-b1a9-8d8c51ef6545 { "BrokerArn": "arn:aws:mq:us-east-1:000000000000:broker:test-broker:b-f503abb7-66bc-47fb-b1a9-8d8c51ef6545", @@ -72,7 +78,8 @@ b-f503abb7-66bc-47fb-b1a9-8d8c51ef6545 ### Send a message -Now that the broker is actively listening, we can use curl to send a message to a sample queue. Run the following command to send a message to the `orders.input` queue: +Now that the broker is actively listening, we can use curl to send a message to a sample queue. +Run the following command to send a message to the `orders.input` queue: {{< command >}} $ curl -XPOST -d "body=message" http://admin:admin@localhost:4513/api/message\?destination\=queue://orders.input @@ -80,7 +87,8 @@ $ curl -XPOST -d "body=message" http://admin:admin@localhost:4513/api/message\?d ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing MQ brokers. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **MQ** under the **App Integration** section. +The LocalStack Web Application provides a Resource Browser for managing MQ brokers. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **MQ** under the **App Integration** section. MQ Resource Browser
@@ -102,8 +110,9 @@ The following code snippets and sample applications provide practical examples o Currently, our MQ emulation offers only fundamental capabilities, and it comes with certain limitations: -- **ActiveMQ Version Limitation:** Presently, only ActiveMQ version 5.16.6 is supported. RabbitMQ is not supported at this time. +- **ActiveMQ Version Limitation:** Presently, only ActiveMQ version 5.16.6 is supported. + RabbitMQ is not supported at this time. - **IAM User Management:** IAM Users are not actively enforced, although they are necessary for making correct calls within the system. -- **Configuration Enforcement:** While it is feasible to create configurations, they are not actively enforced within the broker. +- **Configuration Enforcement:** While it is feasible to create configurations, they are not actively enforced within the broker. - **Persistence and Cloud Pods:** LocalStack does not provide support for Persistence and Cloud Pods at this time. - **API Coverage:** Please note that there is limited API coverage available as part of the current emulation capabilities. diff --git a/content/en/user-guide/aws/msk/index.md b/content/en/user-guide/aws/msk/index.md index 71a0d886f0..40a9d3c561 100644 --- a/content/en/user-guide/aws/msk/index.md +++ b/content/en/user-guide/aws/msk/index.md @@ -9,21 +9,26 @@ persistence: supported with limitations ## Introduction -Managed Streaming for Apache Kafka (MSK) is a fully managed Apache Kafka service that allows you to build and run applications that process streaming data. MSK offers a centralized platform to facilitate seamless communication between various AWS services and applications through event-driven architectures, facilitating data ingestion, processing, and analytics for various applications. MSK also features automatic scaling and built-in monitoring, allowing users to build robust, high-throughput data pipelines. +Managed Streaming for Apache Kafka (MSK) is a fully managed Apache Kafka service that allows you to build and run applications that process streaming data. +MSK offers a centralized platform to facilitate seamless communication between various AWS services and applications through event-driven architectures, facilitating data ingestion, processing, and analytics for various applications. +MSK also features automatic scaling and built-in monitoring, allowing users to build robust, high-throughput data pipelines. -LocalStack allows you to use the MSK APIs in your local environment to spin up Kafka clusters on the local machine, create topics for exchanging messages, and define event source mappings that trigger Lambda functions when messages are received on a certain topic. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_kafka/), which provides information on the extent of MSK's integration with LocalStack. +LocalStack allows you to use the MSK APIs in your local environment to spin up Kafka clusters on the local machine, create topics for exchanging messages, and define event source mappings that trigger Lambda functions when messages are received on a certain topic. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_kafka/), which provides information on the extent of MSK's integration with LocalStack. ## Getting started This guide is designed for users new to Managed Streaming for Kafka and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to configure an MSK Cluster locally, create a Kafka topic, and produce and consume messages. +Start your LocalStack container using your preferred method. +We will demonstrate how to configure an MSK Cluster locally, create a Kafka topic, and produce and consume messages. ### Create a local MSK Cluster -To set up a local MSK (Managed Streaming for Apache Kafka) cluster, you can use the [`CreateCluster`](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#CreateCluster) API to create a cluster named `EventsCluster` with three broker nodes. +To set up a local MSK (Managed Streaming for Apache Kafka) cluster, you can use the [`CreateCluster`](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#CreateCluster) API to create a cluster named `EventsCluster` with three broker nodes. -In this process, you'll need a JSON file named `brokernodegroupinfo.json` which specifies the three subnets where you want your local Amazon MSK to distribute the broker nodes. Create the file and add the following content to it: +In this process, you'll need a JSON file named `brokernodegroupinfo.json` which specifies the three subnets where you want your local Amazon MSK to distribute the broker nodes. +Create the file and add the following content to it: ```json { @@ -57,7 +62,9 @@ The output of the command looks similar to this: } ``` -The cluster creation process might take a few minutes. You can describe the cluster using the [`DescribeCluster`](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#DescribeCluster) API. Run the following command, replacing `ClusterArn` with the Amazon Resource Name (ARN) you obtained above when you created cluster. +The cluster creation process might take a few minutes. +You can describe the cluster using the [`DescribeCluster`](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#DescribeCluster) API. +Run the following command, replacing `ClusterArn` with the Amazon Resource Name (ARN) you obtained above when you created cluster. {{< command >}} $ awslocal kafka describe-cluster \ @@ -103,7 +110,8 @@ $ wget https://archive.apache.org/dist/kafka/2.8.0/kafka_2.12-2.8.0.tgz $ tar -xzf kafka_2.12-2.8.0.tgz {{< / command >}} -Navigate to the **kafka_2.12-2.8.0** directory. Execute the following command, replacing `ZookeeperConnectString` with the value you saved after running the [`DescribeCluster`](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#DescribeCluster) API: +Navigate to the **kafka_2.12-2.8.0** directory. +Execute the following command, replacing `ZookeeperConnectString` with the value you saved after running the [`DescribeCluster`](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#DescribeCluster) API: {{< command >}} $ bin/kafka-topics.sh \ @@ -122,9 +130,11 @@ Created topic LocalMSKTopic. ### Interacting with the topic -You can now utilize the JVM truststore to establish communication with the MSK cluster. Create a folder named `/tmp` on the client machine, and navigate to the bin folder of the Apache Kafka installation. +You can now utilize the JVM truststore to establish communication with the MSK cluster. +Create a folder named `/tmp` on the client machine, and navigate to the bin folder of the Apache Kafka installation. -Run the following command, replacing `java_home` with the path of your `java_home`. For this instance, the java_home path is `/Library/Internet\ Plug-Ins/JavaAppletPlugin.plugin/Contents/Home`. +Run the following command, replacing `java_home` with the path of your `java_home`. +For this instance, the java_home path is `/Library/Internet\ Plug-Ins/JavaAppletPlugin.plugin/Contents/Home`. {{< callout >}} The following step is optional and may not be required, depending on the operating system environment being used. @@ -147,7 +157,8 @@ $ awslocal kafka get-bootstrap-brokers \ --cluster-arn ClusterArn {{< / command >}} -To proceed with the following commands, save the value associated with the string named `BootstrapBrokerStringTls` from the JSON result obtained from the previous command. It should look like this: +To proceed with the following commands, save the value associated with the string named `BootstrapBrokerStringTls` from the JSON result obtained from the previous command. +It should look like this: ```bash { @@ -164,11 +175,13 @@ $ ./kafka-console-producer.sh \ --topic LocalMSKTopic {{< / command >}} -To send messages to your Apache Kafka cluster, enter any desired message and press Enter. You can repeat this process twice or thrice, sending each line as a separate message to the Kafka cluster. +To send messages to your Apache Kafka cluster, enter any desired message and press Enter. +You can repeat this process twice or thrice, sending each line as a separate message to the Kafka cluster. -Keep the connection to the client machine open, and open a separate connection to the same machine in a new window. +Keep the connection to the client machine open, and open a separate connection to the same machine in a new window. -In this new connection, navigate to the `bin` folder and run a command, replacing `BootstrapBrokerStringTls` with the value you saved earlier. This command will allow you to interact with the Apache Kafka cluster using the saved value for secure communication. +In this new connection, navigate to the `bin` folder and run a command, replacing `BootstrapBrokerStringTls` with the value you saved earlier. +This command will allow you to interact with the Apache Kafka cluster using the saved value for secure communication. {{< command >}} $ ./kafka-console-consumer.sh \ @@ -178,11 +191,14 @@ $ ./kafka-console-consumer.sh \ --from-beginning {{< / command >}} -You should start seeing the messages you entered earlier when you used the console producer command. These messages are TLS encrypted in transit. Enter more messages in the producer window, and watch them appear in the consumer window. +You should start seeing the messages you entered earlier when you used the console producer command. +These messages are TLS encrypted in transit. +Enter more messages in the producer window, and watch them appear in the consumer window. -### Adding a local MSK trigger +### Adding a local MSK trigger -You can add a Lambda Event Source Mapping API to create a mapping between a Lambda function, named `my-kafka-function`, and a Kafka topic called `LocalMSKTopic`. The configuration for this mapping sets the starting position of the topic to `LATEST`. +You can add a Lambda Event Source Mapping API to create a mapping between a Lambda function, named `my-kafka-function`, and a Kafka topic called `LocalMSKTopic`. +The configuration for this mapping sets the starting position of the topic to `LATEST`. Run the following command to use the [`CreateEventSourceMapping`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html) API by specifying the Event Source ARN, the topic name, the starting position, and the Lambda function name. @@ -214,13 +230,16 @@ Upon successful completion of the operation to create the Lambda Event Source Ma } ``` -With the event source mapping feature, LocalStack offers an automated process for spawning Lambda functions whenever a message is published to the designated Kafka topic. +With the event source mapping feature, LocalStack offers an automated process for spawning Lambda functions whenever a message is published to the designated Kafka topic. -You can use the `kafka-console-producer.sh` client script to publish messages to the topic. By doing so, you can closely monitor the execution of Lambda functions within Docker containers as new messages arrive by simply observing the LocalStack log output. +You can use the `kafka-console-producer.sh` client script to publish messages to the topic. +By doing so, you can closely monitor the execution of Lambda functions within Docker containers as new messages arrive by simply observing the LocalStack log output. ## Delete the local MSK cluster -You can delete the local MSK cluster using the [`DeleteCluster`](https://docs.aws.amazon.com/cli/latest/reference/kafka/delete-cluster.html) API. To do so, you must first obtain the ARN of the cluster you want to delete. Run the following command to list all the clusters in the region: +You can delete the local MSK cluster using the [`DeleteCluster`](https://docs.aws.amazon.com/cli/latest/reference/kafka/delete-cluster.html) API. +To do so, you must first obtain the ARN of the cluster you want to delete. +Run the following command to list all the clusters in the region: {{< command >}} $ awslocal kafka list-clusters --region us-east-1 @@ -234,7 +253,7 @@ awslocal kafka delete-cluster --cluster-arn ClusterArn ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing MSK clusters. +The LocalStack Web Application provides a Resource Browser for managing MSK clusters. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Kafka** under the **Analytics** section. MSK Resource Browser diff --git a/content/en/user-guide/aws/mwaa/index.md b/content/en/user-guide/aws/mwaa/index.md index 5a6e052d70..03eba0d305 100644 --- a/content/en/user-guide/aws/mwaa/index.md +++ b/content/en/user-guide/aws/mwaa/index.md @@ -26,7 +26,6 @@ We will demonstrate how to create an Airflow environment and access the Airflow Create a S3 bucket that will be used for Airflow resources. Run the following command to create a bucket using the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) command. - {{< command >}} $ awslocal s3 mb s3://my-mwaa-bucket {{< /command >}} @@ -81,8 +80,8 @@ To configure Airflow environments effectively, you can utilize the `AirflowConfi These options are transformed into corresponding environment variables and passed to Airflow. For instance: -- `agent.code`:`007` is transformed into `AIRFLOW__AGENT__CODE:007`. -- `agent.name`:`bond` is transformed into `AIRFLOW__AGENT__NAME:bond`. +- `agent.code`:`007` is transformed into `AIRFLOW__AGENT__CODE:007`. +- `agent.name`:`bond` is transformed into `AIRFLOW__AGENT__NAME:bond`. This transformation process ensures that your configuration settings are easily applied within the Airflow environment. @@ -93,8 +92,8 @@ Just upload your DAGs to the designated S3 bucket path, configured by the `DagS3 For example, the command below uploads a sample DAG named `sample_dag.py` to your S3 bucket named `my-mwaa-bucket`: -{{< command >}} -$ awslocal s3 cp sample_dag.py s3://my-mwaa-bucket/dags +{{< command >}} +$ awslocal s3 cp sample_dag.py s3://my-mwaa-bucket/dags {{< /command >}} LocalStack syncs new and changed objects in the S3 bucket to the Airflow container every 30 seconds. diff --git a/content/en/user-guide/aws/neptune/index.md b/content/en/user-guide/aws/neptune/index.md index 0fa8203d0c..3cbc667f72 100644 --- a/content/en/user-guide/aws/neptune/index.md +++ b/content/en/user-guide/aws/neptune/index.md @@ -8,7 +8,7 @@ tags: ["Pro image"] ## Introduction -Neptune is a fully managed, highly available, and scalable graph database service offered by AWS. +Neptune is a fully managed, highly available, and scalable graph database service offered by AWS. It is designed for storing and querying highly connected data for applications that require complex relationship modeling, such as social networks, recommendation engines, and fraud detection. Neptune supports popular graph query languages like Gremlin and SPARQL, making it compatible with a wide range of graph applications and tools. @@ -31,9 +31,10 @@ The supported APIs are available on our [API coverage page](https://docs.localst ## Getting started -This guide is designed for users new to Neptune and assumes basic knowledge of the AWS CLI and our `awslocal` wrapper script. +This guide is designed for users new to Neptune and assumes basic knowledge of the AWS CLI and our `awslocal` wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate the following with AWS CLI & Python: +Start your LocalStack container using your preferred method. +We will demonstrate the following with AWS CLI & Python: - Creating a Neptune cluster. - Starting a connection to the Neptune cluster. @@ -41,7 +42,7 @@ Start your LocalStack container using your preferred method. We will demonstrate ### Create a Neptune cluster -To create a Neptune cluster you can use the [`CreateDBCluster`](https://docs.aws.amazon.com/neptune/latest/userguide/api-clusters.html#CreateDBCluster) API. +To create a Neptune cluster you can use the [`CreateDBCluster`](https://docs.aws.amazon.com/neptune/latest/userguide/api-clusters.html#CreateDBCluster) API. Run the following command to create a Neptune cluster: {{< command >}} @@ -66,7 +67,7 @@ You should see the following output: ### Add an instance to the cluster -To add an instance you can use the [`CreateDBInstance`](https://docs.aws.amazon.com/neptune/latest/userguide/api-instances.html#CreateDBInstance) API. +To add an instance you can use the [`CreateDBInstance`](https://docs.aws.amazon.com/neptune/latest/userguide/api-instances.html#CreateDBInstance) API. Run the following command to create a Neptune instance: {{< command >}} @@ -134,7 +135,8 @@ if __name__ == '__main__': ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing Neptune databases and clusters. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Neptune** under the **Database** section. +The LocalStack Web Application provides a Resource Browser for managing Neptune databases and clusters. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Neptune** under the **Database** section. Neptune Resource Browser
@@ -142,17 +144,19 @@ The LocalStack Web Application provides a Resource Browser for managing Neptune The Resource Browser allows you to perform the following actions: -* **Create Cluster**: Create a new Neptune cluster by clicking on **Create Cluster** under the **Clusters** tab and providing the required parameters. -* **List Clusters**: View a list of all Neptune clusters in your LocalStack environment by clicking on the **Clusters** tab. -* **View Cluster Details**: Click on a cluster name to view detailed information about the cluster, including its status, endpoint, and other configuration details. -* **Graph Browser**: Access the Neptune Graph Browser by clicking on the **Graph Browser** tab in the cluster details. The Graph Browser allows you to interactively query and visualize the graph data stored in your Neptune cluster. -* **Quick Actions**: Perform quick actions on the cluster, such as adding a new Node, modifying an existing one or creating a new Edge between 2 nodes. You can access the **Quick Actions** by clicking in the respective tab from the cluster details page. -* **Create instance**: Create a new Neptune database by clicking on **Create Instance** under the **Instances** tab and providing the required parameters. -* **List Instances**: View a list of all Neptune databases in your LocalStack environment by clicking on the **Instances** tab. -* **View Instance Details**: Click on a database name to view detailed information about the database, including its status, endpoint, and other configuration details. -* **Edit Instance**: Edit the configuration of a Neptune database by clicking on the **Edit Instance** button in the instance details. +- **Create Cluster**: Create a new Neptune cluster by clicking on **Create Cluster** under the **Clusters** tab and providing the required parameters. +- **List Clusters**: View a list of all Neptune clusters in your LocalStack environment by clicking on the **Clusters** tab. +- **View Cluster Details**: Click on a cluster name to view detailed information about the cluster, including its status, endpoint, and other configuration details. +- **Graph Browser**: Access the Neptune Graph Browser by clicking on the **Graph Browser** tab in the cluster details. + The Graph Browser allows you to interactively query and visualize the graph data stored in your Neptune cluster. +- **Quick Actions**: Perform quick actions on the cluster, such as adding a new Node, modifying an existing one or creating a new Edge between 2 nodes. + You can access the **Quick Actions** by clicking in the respective tab from the cluster details page. +- **Create instance**: Create a new Neptune database by clicking on **Create Instance** under the **Instances** tab and providing the required parameters. +- **List Instances**: View a list of all Neptune databases in your LocalStack environment by clicking on the **Instances** tab. +- **View Instance Details**: Click on a database name to view detailed information about the database, including its status, endpoint, and other configuration details. +- **Edit Instance**: Edit the configuration of a Neptune database by clicking on the **Edit Instance** button in the instance details. -## Examples +## Examples The following code snippets and sample applications provide practical examples of how to use Neptune in LocalStack for various use cases: @@ -162,17 +166,25 @@ The following code snippets and sample applications provide practical examples o ### Gremlin Transactions -Gremlin transactions can be enabled by setting the environment `NEPTUNE_ENABLE_TRANSACTION=1`. Be aware that the `engine_version` provided when creating your cluster will be ignored and LocalStack will use `3.7.2` Gremlin Server. This feature is in beta and any feedback is appreciated. +Gremlin transactions can be enabled by setting the environment `NEPTUNE_ENABLE_TRANSACTION=1`. +Be aware that the `engine_version` provided when creating your cluster will be ignored and LocalStack will use `3.7.2` Gremlin Server. +This feature is in beta and any feedback is appreciated. #### Current Limitations - Fixed id - - Creating a Vertex with an id in a transaction, then deleting it. Trying to recreate a vertex with the same id will fail. + - Creating a Vertex with an id in a transaction, then deleting it. + Trying to recreate a vertex with the same id will fail. - Serializer considerations - - While it is possible to connect to the server with a lower version of Gremlin Language Variants, there are breaking changes to the default `GraphBinarySerializersV1` serializer used by most languages. One possible fix is to use the matching version for your language variant. Otherwise, using the `GraphSONSerializersV3d0` serializer also seems to be working. See example below. - - If using Neptune <= `1.2.0.2`, the Gryo message serializer is no longer supported. Only affects users explicitly using that serializer. + - While it is possible to connect to the server with a lower version of Gremlin Language Variants, there are breaking changes to the default `GraphBinarySerializersV1` serializer used by most languages. + One possible fix is to use the matching version for your language variant. + Otherwise, using the `GraphSONSerializersV3d0` serializer also seems to be working. + See example below. + - If using Neptune <= `1.2.0.2`, the Gryo message serializer is no longer supported. + Only affects users explicitly using that serializer. Example using `gremlinpython==3.6.2` + ```python from gremlin_python.driver import serializer from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection diff --git a/content/en/user-guide/aws/opensearch/index.md b/content/en/user-guide/aws/opensearch/index.md index 71cd345584..4b84ede8b0 100644 --- a/content/en/user-guide/aws/opensearch/index.md +++ b/content/en/user-guide/aws/opensearch/index.md @@ -7,9 +7,11 @@ description: > ## Introduction -OpenSearch Service is an open-source search and analytics engine, offering developers and organizations advanced search capabilities, robust data analysis, and insightful visualizations. OpenSearch Service also offers log analytics, real-time application monitoring, and clickstream analysis. +OpenSearch Service is an open-source search and analytics engine, offering developers and organizations advanced search capabilities, robust data analysis, and insightful visualizations. +OpenSearch Service also offers log analytics, real-time application monitoring, and clickstream analysis. -LocalStack allows you to use the OpenSearch Service APIs in your local environment to create, manage, and operate the OpenSearch clusters. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_opensearch/), which provides information on the extent of OpenSearch's integration with LocalStack. +LocalStack allows you to use the OpenSearch Service APIs in your local environment to create, manage, and operate the OpenSearch clusters. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_opensearch/), which provides information on the extent of OpenSearch's integration with LocalStack. The following versions of OpenSearch Service are supported by LocalStack: @@ -22,32 +24,40 @@ The following versions of OpenSearch Service are supported by LocalStack: - 2.9 - 2.11 (**default**) -OpenSearch is closely coupled with the [Elasticsearch Service](../elasticsearch). Clusters generated through the OpenSearch Service will be visible within the Elasticsearch Service interface, and vice versa. You can select an Elasticsearch version with the `--engine-version` parameter while creating an OpenSearch Service domain. +OpenSearch is closely coupled with the [Elasticsearch Service](../elasticsearch). +Clusters generated through the OpenSearch Service will be visible within the Elasticsearch Service interface, and vice versa. +You can select an Elasticsearch version with the `--engine-version` parameter while creating an OpenSearch Service domain. ## Getting started This guide is designed for users new to OpenSearch Service and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a new OpenSearch Service cluster and interact with it, using the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a new OpenSearch Service cluster and interact with it, using the AWS CLI. ### Creating an OpenSearch cluster -To create an OpenSearch Service cluster, you can use the [`CreateDomain`](https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_CreateDomain.html) API. OpenSearch Service domain is synonymous with an OpenSearch cluster. Execute the following command to create a new OpenSearch domain: +To create an OpenSearch Service cluster, you can use the [`CreateDomain`](https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_CreateDomain.html) API. +OpenSearch Service domain is synonymous with an OpenSearch cluster. +Execute the following command to create a new OpenSearch domain: {{< command >}} $ awslocal opensearch create-domain --domain-name my-domain {{< / command >}} -Each time you establish a cluster using a new version of OpenSearch, the corresponding OpenSearch binary must be downloaded, a process that might require some time to complete. In the LocalStack log you will see something like, where you can see the cluster starting up in the background. +Each time you establish a cluster using a new version of OpenSearch, the corresponding OpenSearch binary must be downloaded, a process that might require some time to complete. +In the LocalStack log you will see something like, where you can see the cluster starting up in the background. -You can open the LocalStack logs, to see that the OpenSearch Service cluster is being created in the background. You can use the [`DescribeDomain`](https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_DescribeDomain.html) API to check the status of the cluster: +You can open the LocalStack logs, to see that the OpenSearch Service cluster is being created in the background. +You can use the [`DescribeDomain`](https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_DescribeDomain.html) API to check the status of the cluster: {{< command >}} $ awslocal opensearch describe-domain \ --domain-name my-domain | jq ".DomainStatus.Processing" {{< / command >}} -The `Processing` attribute will be `false` once the cluster is up and running. Once the cluster is up, you can interact with the cluster. +The `Processing` attribute will be `false` once the cluster is up and running. +Once the cluster is up, you can interact with the cluster. ### Interact with the cluster @@ -90,7 +100,8 @@ The following output will be visible on your terminal: ## Domain Endpoints -There are two configurable strategies that govern how domain endpoints are created. The strategy can be configured via the `OPENSEARCH_ENDPOINT_STRATEGY` environment variable. +There are two configurable strategies that govern how domain endpoints are created. +The strategy can be configured via the `OPENSEARCH_ENDPOINT_STRATEGY` environment variable. | Value | Format | Description | | ------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | @@ -98,11 +109,14 @@ There are two configurable strategies that govern how domain endpoints are creat | `path` | `localhost:4566///` | An alternative strategy useful if resolving LocalStack's localhost domain poses difficulties. | | `port` | `localhost:` | Directly exposes cluster(s) via ports from [the external service port range]({{< ref "external-ports" >}}). | -Irrespective of the originating service for the clusters, the domain of each cluster consistently aligns with its engine type, be it OpenSearch or Elasticsearch. Consequently, OpenSearch clusters incorporate `opensearch` within their domains (e.g., `my-domain.us-east-1.opensearch.localhost.localstack.cloud:4566`), while Elasticsearch clusters feature `es` in their domains (e.g., `my-domain.us-east-1.es.localhost.localstack.cloud:4566`). +Irrespective of the originating service for the clusters, the domain of each cluster consistently aligns with its engine type, be it OpenSearch or Elasticsearch. +Consequently, OpenSearch clusters incorporate `opensearch` within their domains (e.g., `my-domain.us-east-1.opensearch.localhost.localstack.cloud:4566`), while Elasticsearch clusters feature `es` in their domains (e.g., `my-domain.us-east-1.es.localhost.localstack.cloud:4566`). ## Custom Endpoints -LocalStack allows you to define arbitrary endpoints for your clusters within the domain endpoint options. This functionality can be used to overwrite the behavior of the aforementioned endpoint strategies. Moreover, you can opt for custom domains, though it's important to incorporate the edge port (80/443, or the default 4566). +LocalStack allows you to define arbitrary endpoints for your clusters within the domain endpoint options. +This functionality can be used to overwrite the behavior of the aforementioned endpoint strategies. +Moreover, you can opt for custom domains, though it's important to incorporate the edge port (80/443, or the default 4566). Run the following command to create a new OpenSearch domain with a custom endpoint: @@ -119,9 +133,13 @@ $ curl http://localhost:4566/my-custom-endpoint/_cluster/health ## Re-using a single cluster instance -In certain scenarios, creating a distinct cluster instance for each domain might not align with your use-case. For example, if your focus is solely on testing API interactions rather than actual OpenSearch functionality, individual clusters might be excessive. In such situations, the option to set `OPENSEARCH_MULTI_CLUSTER=0` exists, allowing all domains to be funneled into a single cluster instance. +In certain scenarios, creating a distinct cluster instance for each domain might not align with your use-case. +For example, if your focus is solely on testing API interactions rather than actual OpenSearch functionality, individual clusters might be excessive. +In such situations, the option to set `OPENSEARCH_MULTI_CLUSTER=0` exists, allowing all domains to be funneled into a single cluster instance. -However, it's important to be aware that it can introduce unexpected complications. This is particularly true when dealing with data persistence within OpenSearch or when working with clusters of varying versions. As a result, we advise caution when considering this approach and generally recommend against it. +However, it's important to be aware that it can introduce unexpected complications. +This is particularly true when dealing with data persistence within OpenSearch or when working with clusters of varying versions. +As a result, we advise caution when considering this approach and generally recommend against it. ## Storage Layout @@ -144,9 +162,13 @@ $ tree -L 4 ./volume/state ## Advanced Security Options -Both OpenSearch and Elasticsearch services offer **Advanced Security Options**. Presently, OpenSearch domains are equipped with support for an internal user database. However, Elasticsearch domains are not currently covered, whether through the OpenSearch or the Elasticsearch service. IAM support is also not yet available. +Both OpenSearch and Elasticsearch services offer **Advanced Security Options**. +Presently, OpenSearch domains are equipped with support for an internal user database. +However, Elasticsearch domains are not currently covered, whether through the OpenSearch or the Elasticsearch service. +IAM support is also not yet available. -A secure OpenSearch domain can be spawned with this example CLI input. Save it in a file named `opensearch_domain.json`. +A secure OpenSearch domain can be spawned with this example CLI input. +Save it in a file named `opensearch_domain.json`. ```json { @@ -236,7 +258,7 @@ Now you can start another container for OpenSearch Dashboards, which is configur {{< command >}} docker inspect localstack-main | \ - jq -r '.[0].NetworkSettings.Networks | to_entries | .[].value.IPAddress' + jq -r '.[0].NetworkSettings.Networks | to_entries | .[].value.IPAddress' # prints 172.22.0.2 docker run --rm -p 5601:5601 \ @@ -251,11 +273,15 @@ Once the container is running, you can reach OpenSearch Dashboards at `http://lo ## Custom OpenSearch backends -LocalStack employs an asynchronous approach to download OpenSearch the first time you create an OpenSearch cluster. Consequently, you'll receive a prompt response from LocalStack initially, followed by the setup of your local OpenSearch cluster once the download and installation are completed. +LocalStack employs an asynchronous approach to download OpenSearch the first time you create an OpenSearch cluster. +Consequently, you'll receive a prompt response from LocalStack initially, followed by the setup of your local OpenSearch cluster once the download and installation are completed. -However, there might be scenarios where this behavior is not desirable. For instance, you may prefer to use an existing OpenSearch cluster that is already up and running. This approach can also prove beneficial when you require a cluster with a customized configuration that isn't supported by LocalStack. +However, there might be scenarios where this behavior is not desirable. +For instance, you may prefer to use an existing OpenSearch cluster that is already up and running. +This approach can also prove beneficial when you require a cluster with a customized configuration that isn't supported by LocalStack. -To tailor the OpenSearch backend according to your needs, you can initiate your own local OpenSearch cluster and then direct LocalStack to utilize it through the `OPENSEARCH_CUSTOM_BACKEND` environment variable. It's important to bear in mind that only a single backend configuration is possible, resulting in behavior akin to the approach of [re-using a single cluster instance](#re-using-a-single-cluster-instance). +To tailor the OpenSearch backend according to your needs, you can initiate your own local OpenSearch cluster and then direct LocalStack to utilize it through the `OPENSEARCH_CUSTOM_BACKEND` environment variable. +It's important to bear in mind that only a single backend configuration is possible, resulting in behavior akin to the approach of [re-using a single cluster instance](#re-using-a-single-cluster-instance). Here is a sample `docker-compose.yaml` file that contains a single-node OpenSearch cluster and a basic LocalStack setup. @@ -314,7 +340,8 @@ You can now create an OpenSearch cluster using the `awslocal` CLI: $ awslocal opensearch create-domain --domain-name my-domain {{< /command >}} -If the `Processing` status shows as `true`, the cluster isn't fully operational yet. You can use the `describe-domain` command to retrieve the current status: +If the `Processing` status shows as `true`, the cluster isn't fully operational yet. +You can use the `describe-domain` command to retrieve the current status: {{< command >}} $ awslocal opensearch describe-domain --domain-name my-domain @@ -336,7 +363,7 @@ $ curl -X PUT my-domain.us-east-1.opensearch.localhost.localstack.cloud:4566/my- ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing OpenSearch domains. +The LocalStack Web Application provides a Resource Browser for managing OpenSearch domains. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **OpenSearch Service** under the **Analytics** section. OpenSearch Resource Browser @@ -352,12 +379,16 @@ The Resource Browser allows you to perform the following actions: ## Current Limitations -Internally, LocalStack makes use of the [OpenSearch Python client 2.x](https://github.com/opensearch-project/opensearch-py). The functionalities marked as deprecated in OpenSearch 1.x and subsequently removed in OpenSearch 2.x may not operate reliably when interacting with OpenSearch 1.x clusters through LocalStack. You can refer to the [compatibility documentation](https://github.com/opensearch-project/opensearch-py/blob/main/COMPATIBILITY.md) provided by the [OpenSearch Python client repository](https://github.com/opensearch-project/opensearch-py). +Internally, LocalStack makes use of the [OpenSearch Python client 2.x](https://github.com/opensearch-project/opensearch-py). +The functionalities marked as deprecated in OpenSearch 1.x and subsequently removed in OpenSearch 2.x may not operate reliably when interacting with OpenSearch 1.x clusters through LocalStack. +You can refer to the [compatibility documentation](https://github.com/opensearch-project/opensearch-py/blob/main/COMPATIBILITY.md) provided by the [OpenSearch Python client repository](https://github.com/opensearch-project/opensearch-py). + +AWS typically populates the `Endpoint` attribute of the cluster status only after the cluster is fully operational. +In contrast, LocalStack provides the endpoint information immediately but retains `Processing = "true"` until the cluster initialization is complete. -AWS typically populates the `Endpoint` attribute of the cluster status only after the cluster is fully operational. In contrast, LocalStack provides the endpoint information immediately but retains `Processing = "true"` until the cluster initialization is complete. - The `CustomEndpointOptions` in LocalStack offers the flexibility to utilize arbitrary endpoint URLs, a feature that diverges from the constraints imposed by AWS. ## Troubleshooting -If you encounter difficulties resolving subdomains while employing the `OPENSEARCH_ENDPOINT_STRATEGY=domain` (the default setting), it's advisable to investigate whether your DNS configuration might be obstructing rebind queries. For further insights on addressing this issue, refer to the section on [DNS rebind protection]({{< ref "dns-server#dns-rebind-protection" >}}). +If you encounter difficulties resolving subdomains while employing the `OPENSEARCH_ENDPOINT_STRATEGY=domain` (the default setting), it's advisable to investigate whether your DNS configuration might be obstructing rebind queries. +For further insights on addressing this issue, refer to the section on [DNS rebind protection]({{< ref "dns-server#dns-rebind-protection" >}}). diff --git a/content/en/user-guide/aws/organizations/index.md b/content/en/user-guide/aws/organizations/index.md index 98a2d6cf98..b5d197094f 100644 --- a/content/en/user-guide/aws/organizations/index.md +++ b/content/en/user-guide/aws/organizations/index.md @@ -8,13 +8,17 @@ aliases: - /user-guide/aws/organization/ --- -Amazon Web Services Organizations is an account management service that allows you to consolidate multiple different AWS accounts into an organization. It allows you to manage different accounts in a single organization and consolidate billing. With Organizations, you can also attach different policies to your organizational units (OUs) or individual accounts in your organization. +Amazon Web Services Organizations is an account management service that allows you to consolidate multiple different AWS accounts into an organization. +It allows you to manage different accounts in a single organization and consolidate billing. +With Organizations, you can also attach different policies to your organizational units (OUs) or individual accounts in your organization. Organizations is available over LocalStack Pro and the supported APIs are available over our [configuration page]({{< ref "configuration" >}}). ## Getting started -In this getting started guide, you'll learn how to create your local AWS Organization and configure it with member accounts. This guide is intended for users who wish to get more acquainted with Organizations, and assumes you have basic knowledge of the AWS CLI (and our `awslocal` wrapper script). To get started, start your LocalStack instance using your preferred method: +In this getting started guide, you'll learn how to create your local AWS Organization and configure it with member accounts. +This guide is intended for users who wish to get more acquainted with Organizations, and assumes you have basic knowledge of the AWS CLI (and our `awslocal` wrapper script). +To get started, start your LocalStack instance using your preferred method: 1. Create a new local AWS Organization with the feature set flag set to `ALL`: {{< command >}} @@ -32,7 +36,8 @@ In this getting started guide, you'll learn how to create your local AWS Organiz --email example@example.com \ --account-name "Test Account" {{< /command >}} - Since LocalStack essentially mocks AWS, the account creation is instantaneous. You can now run the `list-accounts` command to see the details of your organization: + Since LocalStack essentially mocks AWS, the account creation is instantaneous. + You can now run the `list-accounts` command to see the details of your organization: {{< command >}} $ awslocal organizations list-accounts {{< /command >}} @@ -47,7 +52,8 @@ In this getting started guide, you'll learn how to create your local AWS Organiz $ awslocal organizations close-account --account-id 000000000000 {{< /command >}} -6. You can use organizational units (OUs) to group accounts together to administer as a single unit. To create an OU, you can run: +6. You can use organizational units (OUs) to group accounts together to administer as a single unit. + To create an OU, you can run: {{< command >}} $ awslocal organizations list-roots $ awslocal organizations list-children \ @@ -58,7 +64,8 @@ In this getting started guide, you'll learn how to create your local AWS Organiz --name New-Child-OU {{< /command >}} -7. Before you can create and attach a policy to your organization, you must enable a policy type. To enable a policy type, you can run: +7. Before you can create and attach a policy to your organization, you must enable a policy type. + To enable a policy type, you can run: {{< command >}} $ awslocal organizations enable-policy-type \ --root-id \ diff --git a/content/en/user-guide/aws/pca/index.md b/content/en/user-guide/aws/pca/index.md index 4c3a2f841c..c7ecacaf39 100644 --- a/content/en/user-guide/aws/pca/index.md +++ b/content/en/user-guide/aws/pca/index.md @@ -7,11 +7,11 @@ tags: ["Pro image"] ## Introduction -AWS Private Certificate Authority (ACM PCA) is a managed private Certificate Authority (CA) service that manages the lifecycle of your private certificates. +AWS Private Certificate Authority (ACM PCA) is a managed private Certificate Authority (CA) service that manages the lifecycle of your private certificates. ACM PCA extends ACM's certificate management capabilities to private certificates, enabling you to manage public and private certificates centrally. LocalStack allows you to use the ACM PCA APIs to create, list, and delete private certificates. -You can creating, describing, tagging, and listing tags for a CA using ACM PCA. +You can creating, describing, tagging, and listing tags for a CA using ACM PCA. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_acm-pca/), which provides information on the extent of ACM PCA's integration with LocalStack. ## Getting started @@ -20,7 +20,7 @@ This guide is designed for users who are new to ACM PCA and assumes basic knowle ### Create a Certificate Authority (CA) -Start by creating a new Certificate Authority with ACM PCA using the [`CreateCertificateAuthority`](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) API. +Start by creating a new Certificate Authority with ACM PCA using the [`CreateCertificateAuthority`](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) API. This command sets up a new CA with specified configurations for key algorithm, signing algorithm, and subject information. {{< command >}} @@ -55,7 +55,7 @@ Note the `CertificateAuthorityArn` from the output as it will be needed for subs ### Describe the Certificate Authority -To retrieve the detailed information about the created Certificate Authority, use the [`DescribeCertificateAuthority`](https://docs.aws.amazon.com/privateca/latest/APIReference/API_DescribeCertificateAuthority.html) API. +To retrieve the detailed information about the created Certificate Authority, use the [`DescribeCertificateAuthority`](https://docs.aws.amazon.com/privateca/latest/APIReference/API_DescribeCertificateAuthority.html) API. This command returns the detailed information about the CA, including the CA's ARN, status, and configuration. {{< command >}} @@ -95,7 +95,8 @@ $ awslocal acm-pca describe-certificate-authority \ ### Tag the Certificate Authority -Tagging resources in AWS helps in managing and identifying them. Use the [`TagCertificateAuthority`](https://docs.aws.amazon.com/privateca/latest/APIReference/API_TagCertificateAuthority.html) API to tag the created Certificate Authority. +Tagging resources in AWS helps in managing and identifying them. +Use the [`TagCertificateAuthority`](https://docs.aws.amazon.com/privateca/latest/APIReference/API_TagCertificateAuthority.html) API to tag the created Certificate Authority. This command adds the specified tags to the specified CA. {{< command >}} @@ -104,7 +105,7 @@ $ awslocal acm-pca tag-certificate-authority \ --tags Key=Admin,Value=Alice {{< /command >}} -After tagging your Certificate Authority, you may want to view these tags. +After tagging your Certificate Authority, you may want to view these tags. You can use the [`ListTags`](https://docs.aws.amazon.com/privateca/latest/APIReference/API_ListTags.html) API to list all the tags associated with the specified CA. {{< command >}} diff --git a/content/en/user-guide/aws/pinpoint/index.md b/content/en/user-guide/aws/pinpoint/index.md index 126630fe90..cde19d4343 100644 --- a/content/en/user-guide/aws/pinpoint/index.md +++ b/content/en/user-guide/aws/pinpoint/index.md @@ -9,19 +9,23 @@ persistence: supported ## Introduction -Pinpoint is a customer engagement service to facilitate communication across multiple channels, including email, SMS, and push notifications. Pinpoint allows developers to create and manage customer segments based on various attributes, such as user behavior and demographics, while integrating with other AWS services to send targeted messages to customers. +Pinpoint is a customer engagement service to facilitate communication across multiple channels, including email, SMS, and push notifications. +Pinpoint allows developers to create and manage customer segments based on various attributes, such as user behavior and demographics, while integrating with other AWS services to send targeted messages to customers. -LocalStack allows you to mock the Pinpoint APIs in your local environment. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_pinpoint/), which provides information on the extent of Pinpoint's integration with LocalStack. +LocalStack allows you to mock the Pinpoint APIs in your local environment. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_pinpoint/), which provides information on the extent of Pinpoint's integration with LocalStack. ## Getting started This guide is designed for users new to Pinpoint and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a Pinpoint application, retrieve all applications, and list tags for the resource. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a Pinpoint application, retrieve all applications, and list tags for the resource. ### Create an application -Create a Pinpoint application using the [`CreateApp`](https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id.html) API. Execute the following command: +Create a Pinpoint application using the [`CreateApp`](https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id.html) API. +Execute the following command: {{< command >}} $ awslocal pinpoint create-app \ @@ -43,7 +47,8 @@ The following output would be retrieved: ### List applications -You can list all applications using the [`GetApps`](https://docs.aws.amazon.com/pinpoint/latest/apireference/apps.html) API. Execute the following command: +You can list all applications using the [`GetApps`](https://docs.aws.amazon.com/pinpoint/latest/apireference/apps.html) API. +Execute the following command: {{< command >}} $ awslocal pinpoint get-apps @@ -68,14 +73,16 @@ The following output would be retrieved: ### List tags for the application -You can list all tags for the application using the [`GetApp`](https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id.html) API. Execute the following command: +You can list all tags for the application using the [`GetApp`](https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id.html) API. +Execute the following command: {{< command >}} $ awslocal pinpoint list-tags-for-resource \ --resource-arn arn:aws:mobiletargeting:us-east-1:000000000000:apps/4487a55ac6fb4a2699a1b90727c978e7 {{< /command >}} -Replace the `resource-arn` with the ARN of the application you created earlier. The following output would be retrieved: +Replace the `resource-arn` with the ARN of the application you created earlier. +The following output would be retrieved: ```bash { diff --git a/content/en/user-guide/aws/pipes/index.md b/content/en/user-guide/aws/pipes/index.md index 2c4152baca..70fa43d01e 100644 --- a/content/en/user-guide/aws/pipes/index.md +++ b/content/en/user-guide/aws/pipes/index.md @@ -7,11 +7,15 @@ tags: ["Pro image"] ## Introduction -EventBridge Pipes allows users to create point-to-point integrations between event producers and consumers with transform, filter and enrichment steps. Pipes are particularly useful for scenarios involving real-time data processing, application integration, and automated workflows, while simplifying the process of routing events between AWS services. Pipes offer a point-to-point connection from one source to one target (one-to-one). In contrast, EventBridge Event Bus offers a one-to-many integration where an event router delivers one event to zero or more destinations. +EventBridge Pipes allows users to create point-to-point integrations between event producers and consumers with transform, filter and enrichment steps. +Pipes are particularly useful for scenarios involving real-time data processing, application integration, and automated workflows, while simplifying the process of routing events between AWS services. +Pipes offer a point-to-point connection from one source to one target (one-to-one). +In contrast, EventBridge Event Bus offers a one-to-many integration where an event router delivers one event to zero or more destinations. -LocalStack allows you to use the Pipes APIs in your local environment to create Pipes with SQS queues and Kinesis streams as source and target. You can also filter events using EventBridge event patterns and enrich events using Lambda. +LocalStack allows you to use the Pipes APIs in your local environment to create Pipes with SQS queues and Kinesis streams as source and target. +You can also filter events using EventBridge event patterns and enrich events using Lambda. -The supported APIs are available on our [API coverage page]({{< ref "coverage_pipes" >}}), which provides information on the extent of Pipe's integration with LocalStack. +The supported APIs are available on our [API coverage page]({{< ref "coverage_pipes" >}}), which provides information on the extent of Pipe's integration with LocalStack. {{< callout >}} The implementation of EventBridge Pipes is currently in **preview** stage and under active development. @@ -27,11 +31,13 @@ You can [configure]({{< ref "configuration" >}}) `EVENT_RULE_ENGINE=java` (previ This guide is designed for users new to EventBridge Pipes and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a Pipe with SQS queues as source and target, and send events to the source queue which will be routed to the target queue. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a Pipe with SQS queues as source and target, and send events to the source queue which will be routed to the target queue. ### Create an SQS queue -Create two SQS queues that will be used as source and target for the Pipe. Run the following command to create a queue using the [`CreateQueue`](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) API: +Create two SQS queues that will be used as source and target for the Pipe. +Run the following command to create a queue using the [`CreateQueue`](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) API: {{< command >}} $ awslocal sqs create-queue --queue-name source-queue @@ -47,7 +53,8 @@ $ TARGET_QUEUE_ARN=$(awslocal sqs get-queue-attributes --queue-url http://sqs.us ### Create a Pipe -You can now create a Pipe, using the [`CreatePipe`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_CreatePipe.html) API. Run the following command, by specifying the source and target queue ARNs we created earlier: +You can now create a Pipe, using the [`CreatePipe`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_CreatePipe.html) API. +Run the following command, by specifying the source and target queue ARNs we created earlier: {{< command >}} $ awslocal pipes create-pipe --name sample-pipe \ @@ -104,7 +111,8 @@ The following output would be retrieved: ### Send events to the source queue -You can now send events to the source queue, which will be routed to the target queue. Run the following command to send an event to the source queue: +You can now send events to the source queue, which will be routed to the target queue. +Run the following command to send an event to the source queue: {{< command >}} $ awslocal sqs send-message \ @@ -172,7 +180,7 @@ or Timestream for LiveAnalytics table. ## Supported log destinations -LocalStack supports the following [log destinations](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html) for detailed Pipes logging: +LocalStack supports the following [log destinations](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html) for detailed Pipes logging: * CloudWatch Logs diff --git a/content/en/user-guide/aws/qldb/index.md b/content/en/user-guide/aws/qldb/index.md index f105322e92..18fd75ccf8 100644 --- a/content/en/user-guide/aws/qldb/index.md +++ b/content/en/user-guide/aws/qldb/index.md @@ -8,15 +8,16 @@ description: Get started with Quantum Ledger Database (QLDB) on LocalStack ## Introduction Amazon Quantum Ledger Database is a fully managed ledger database service offered by Amazon Web -Services. It is designed to provide transparent, immutable, and cryptographically verifiable +Services. +It is designed to provide transparent, immutable, and cryptographically verifiable transaction -log functionality to applications. QLDB is particularly useful for applications that need a secure +log functionality to applications. +QLDB is particularly useful for applications that need a secure and scalable way to maintain a complete and verifiable history of data changes over time. - LocalStack allows you to use the QLDB APIs in your local environment to create and manage ledgers. -The supported APIs are available on the [API coverage page]({{< ref "/references/coverage/coverage_qldb/index.md" >}} "QLDB service coverage page"), which provides information on the extent of QLDB's integration with LocalStack. +The supported APIs are available on the [API coverage page]({{< ref "/references/coverage/coverage_qldb/index.md" >}} "QLDB service coverage page"), which provides information on the extent of QLDB's integration with LocalStack. ## Getting started @@ -32,7 +33,8 @@ QLDB supports PartiQL, a SQL-compatible query language, which allows you to quer data stored in QLDB. You can write PartiQL statements to perform complex queries, aggregations, and transformations on your data. -Amazon QLDB provides a command line shell for interaction with the transactional data API. With the +Amazon QLDB provides a command line shell for interaction with the transactional data API. +With the QLDB shell, you can run PartiQL statements on ledger data. @@ -73,7 +75,8 @@ the ledger. **Standard** (Recommended) - A permissions mode that enables access control with finer granularity for ledgers, -tables, and PartiQL commands. It is recommended using this permissions mode to maximize the security +tables, and PartiQL commands. +It is recommended using this permissions mode to maximize the security of your ledger data. By default, this mode denies all requests to run any PartiQL commands on any tables in this ledger. @@ -93,8 +96,8 @@ The user can continue from here to create tables, populate and interrogate them. ### Creating tables and sample data -PartiQL is a query language designed for processing structured data, allowing you to perform -various data manipulation tasks using familiar SQL-like syntax. +PartiQL is a query language designed for processing structured data, allowing you to perform +various data manipulation tasks using familiar SQL-like syntax. {{< command >}} qldb> CREATE TABLE VehicleRegistration @@ -120,7 +123,8 @@ qldb> CREATE TABLE VehicleRegistration 1 document in bag (read-ios: 0, server-time: 0ms, total-time: 31ms) ``` -The `VehicleRegistration` table was created. Now it's time to add some items: +The `VehicleRegistration` table was created. +Now it's time to add some items: {{< command >}} qldb> INSERT INTO VehicleRegistration VALUE @@ -188,12 +192,14 @@ person ID. qldb> UPDATE VehicleRegistration AS r SET r.Owners.PrimaryOwner.PersonId = '112233445566NO' WHERE r.VIN = 'KM8SRDHF6EU074761' {{< / command >}} The command will return the updated document ID. + ```bash { documentId: "3TYR9BamzyqHWBjYOfHegE" } 1 document in bag (read-ios: 0, server-time: 0ms, total-time: 62ms) ``` + The next step is to check on the updates made to the `PersonId` field of the `PrimaryOwner`: {{< command >}} qldb> SELECT r.Owners FROM VehicleRegistration AS r WHERE r.VIN = 'KM8SRDHF6EU074761' @@ -228,6 +234,7 @@ First the unique `id` of the document must be found. {{< command >}} qldb> SELECT r_id FROM VehicleRegistration AS r BY r_id WHERE r.VIN = 'KM8SRDHF6EU074761' {{< / command >}} + ```bash { r_id: "3TYR9BamzyqHWBjYOfHegE" @@ -282,7 +289,8 @@ qldb> SELECT h.data.VIN, h.data.City, h.data.Owners FROM history(VehicleRegistra ### Cleaning up resources -Unused ledgers can be deleted. You'll notice that directly running the following command will lead +Unused ledgers can be deleted. +You'll notice that directly running the following command will lead to an error message. {{< command >}} @@ -314,7 +322,8 @@ Now the `delete-ledger` command can be repeated without errors. ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing QLDB ledgers. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **QLDB** under the **Database** section. +The LocalStack Web Application provides a Resource Browser for managing QLDB ledgers. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **QLDB** under the **Database** section. QLDB Resource Browser
@@ -331,7 +340,8 @@ The Resource Browser allows you to perform the following actions: Interacting with Amazon QLDB (Quantum Ledger Database) is typically done using language-specific software -development kits (SDKs) provided by AWS. These SDKs make it easier for developers to interact with +development kits (SDKs) provided by AWS. +These SDKs make it easier for developers to interact with QLDB and perform operations such as managing ledgers, executing PartiQL queries, and processing the results. When interacting with QLDB, it's common to use a combination of SDKs and PartiQL queries to achieve diff --git a/content/en/user-guide/aws/ram/index.md b/content/en/user-guide/aws/ram/index.md index e5d1594ae5..91e8d24469 100644 --- a/content/en/user-guide/aws/ram/index.md +++ b/content/en/user-guide/aws/ram/index.md @@ -37,5 +37,5 @@ $ awslocal ram create-resource-share \ ## Current Limitations RAM on LocalStack currently functions as a CRUD interface only. -Resource shares do not lead to IAM policies being created or attached to resources. +Resource shares do not lead to IAM policies being created or attached to resources. This means that the specified principals do not end up being granted access to the specified resources. diff --git a/content/en/user-guide/aws/rds/index.md b/content/en/user-guide/aws/rds/index.md index 727c901eb6..0e4ee51b0f 100644 --- a/content/en/user-guide/aws/rds/index.md +++ b/content/en/user-guide/aws/rds/index.md @@ -9,26 +9,30 @@ persistence: supported with limitations ## Introduction -Relational Database Service (RDS) is a managed database service provided by Amazon Web Services (AWS) that allows users to setup, operate, and scale relational databases in the cloud. RDS allows you to deploy and manage various relational database engines like MySQL, PostgreSQL, MariaDB, and Microsoft SQL Server. RDS handles routine database tasks such as provisioning, patching, backup, recovery, and scaling. +Relational Database Service (RDS) is a managed database service provided by Amazon Web Services (AWS) that allows users to setup, operate, and scale relational databases in the cloud. +RDS allows you to deploy and manage various relational database engines like MySQL, PostgreSQL, MariaDB, and Microsoft SQL Server. +RDS handles routine database tasks such as provisioning, patching, backup, recovery, and scaling. -LocalStack allows you to use the RDS APIs in your local environment to create and manage RDS clusters and instances for testing & integration purposes. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_rds/), which provides information on the extent of RDS's integration with LocalStack. +LocalStack allows you to use the RDS APIs in your local environment to create and manage RDS clusters and instances for testing & integration purposes. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_rds/), which provides information on the extent of RDS's integration with LocalStack. ## Getting started This guide is designed for users new to RDS and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate the following with the AWS CLI: +Start your LocalStack container using your preferred method. +We will demonstrate the following with the AWS CLI: 1. Creating an RDS cluster. 2. Generating a `SecretsManager` secret containing the database password. 3. Executing a basic `SELECT 123 query` through the RDS Data API. -LocalStack's RDS implementation also supports the [RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html), which allows executing data queries against RDS clusters over a JSON/REST interface. +LocalStack's RDS implementation also supports the [RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html), which allows executing data queries against RDS clusters over a JSON/REST interface. ### Create an RDS cluster -To create an RDS cluster, you can use the [`CreateDBCluster`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html) API. -The following command creates a new cluster with the name `db1` and the engine `aurora-postgresql`. +To create an RDS cluster, you can use the [`CreateDBCluster`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html) API. +The following command creates a new cluster with the name `db1` and the engine `aurora-postgresql`. Instances for the cluster must be added manually. {{< command >}} @@ -66,12 +70,14 @@ $ awslocal rds create-db-instance \ ### Create a SecretsManager secret -To create a `SecretsManager` secret, you can use the [`CreateSecret`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateSecret.html) API. Before creating the secret, you need to create a JSON file containing the credentials for the database. The following command creates a file called `mycreds.json` with the credentials for the database. +To create a `SecretsManager` secret, you can use the [`CreateSecret`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateSecret.html) API. +Before creating the secret, you need to create a JSON file containing the credentials for the database. +The following command creates a file called `mycreds.json` with the credentials for the database. {{< command >}} $ cat << 'EOF' > mycreds.json { - "engine": "aurora-postgresql", + "engine": "aurora-postgresql", "username": "myuser", "password": "mypassword", "host": "localhost", @@ -101,11 +107,12 @@ You should see the following output: ### Execute a query -To execute a query, you can use the [`ExecuteStatement`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_ExecuteStatement.html) API. +To execute a query, you can use the [`ExecuteStatement`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_ExecuteStatement.html) API. Make sure to replace the `secret-arn` with the ARN from the secret you just created in the previous step, and check that the `resource-arn` matches the `cluster-arn` that you have created before. -The following command executes a query against the database. The query returns the value `123`. +The following command executes a query against the database. +The query returns the value `123`. {{< command >}} $ awslocal rds-data execute-statement \ @@ -148,7 +155,8 @@ You should see the following output: } ``` -Alternative clients, such as `psql`, can also be employed to interact with the database. You can retrieve the hostname and port of your created instance either from the preceding output or by using the [`DescribeDbInstances`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) API. +Alternative clients, such as `psql`, can also be employed to interact with the database. +You can retrieve the hostname and port of your created instance either from the preceding output or by using the [`DescribeDbInstances`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) API. {{< command >}} $ psql -d test -U test -p 4513 -h localhost -W @@ -156,67 +164,89 @@ $ psql -d test -U test -p 4513 -h localhost -W ## Supported DB engines -Presently, you can spin up PostgreSQL, MariaDB, MySQL, and MSSQL (SQL Server) databases directly on your local machine, using LocalStack's RDS implementation. However, certain configurations of RDS clusters and instances currently offer only CRUD functionality. For instance, the `storage-encrypted` flag is returned as configured, but active support for actual storage encryption is not yet available. +Presently, you can spin up PostgreSQL, MariaDB, MySQL, and MSSQL (SQL Server) databases directly on your local machine, using LocalStack's RDS implementation. +However, certain configurations of RDS clusters and instances currently offer only CRUD functionality. +For instance, the `storage-encrypted` flag is returned as configured, but active support for actual storage encryption is not yet available. ### PostgreSQL Engine -When you establish an RDS DB cluster or instance using the `postgres`/`aurora-postgresql` DB engine along with a specified `EngineVersion`, LocalStack will dynamically install and configure the corresponding PostgreSQL version as required. Presently, you have the option to choose major versions ranging from 10 to 15. If you select a major version beyond this range, the system will automatically default to version 11. +When you establish an RDS DB cluster or instance using the `postgres`/`aurora-postgresql` DB engine along with a specified `EngineVersion`, LocalStack will dynamically install and configure the corresponding PostgreSQL version as required. +Presently, you have the option to choose major versions ranging from 10 to 15. +If you select a major version beyond this range, the system will automatically default to version 11. -It's important to note that the selection of minor versions is not available. The latest major version will be installed within the Docker environment. If you wish to prevent the installation of customized versions, adjusting the `RDS_PG_CUSTOM_VERSIONS` environment variable to `0` will enforce the use of the default PostgreSQL version 11. +It's important to note that the selection of minor versions is not available. +The latest major version will be installed within the Docker environment. +If you wish to prevent the installation of customized versions, adjusting the `RDS_PG_CUSTOM_VERSIONS` environment variable to `0` will enforce the use of the default PostgreSQL version 11. {{< callout >}} -While the [`DescribeDbCluster`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html) and [`DescribeDbInstances`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) APIs will still reflect the initially defined `engine-version`, the actual installed PostgreSQL engine might differ. This can have implications, particularly when employing a Terraform configuration, where unexpected changes should be avoided. +While the [`DescribeDbCluster`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html) and [`DescribeDbInstances`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) APIs will still reflect the initially defined `engine-version`, the actual installed PostgreSQL engine might differ. +This can have implications, particularly when employing a Terraform configuration, where unexpected changes should be avoided. {{< /callout >}} Instances and clusters with the PostgreSQL engine have the capability to both create and restore snapshots. ### MariaDB Engine -MariaDB will be set up as an operating system package within LocalStack. However, currently, the option to choose a particular version is not available. As of now, snapshots are not supported for MariaDB. +MariaDB will be set up as an operating system package within LocalStack. +However, currently, the option to choose a particular version is not available. +As of now, snapshots are not supported for MariaDB. ### MySQL Engine -A MySQL community server will be launched in a new Docker container upon requesting the MySQL engine. +A MySQL community server will be launched in a new Docker container upon requesting the MySQL engine. -The `engine-version` will serve as the tag for the Docker image, allowing you to freely select the desired MySQL version from those available on the [official MySQL Docker Hub](https://hub.docker.com/_/mysql). If you have a specific image in mind, you can also use the environment variable `MYSQL_IMAGE=`. +The `engine-version` will serve as the tag for the Docker image, allowing you to freely select the desired MySQL version from those available on the [official MySQL Docker Hub](https://hub.docker.com/_/mysql). +If you have a specific image in mind, you can also use the environment variable `MYSQL_IMAGE=`. {{< callout >}} -The `arm64` MySQL images are limited to newer versions. For more information about availability, check the [MySQL Docker Hub repository](https://hub.docker.com/_/mysql). +The `arm64` MySQL images are limited to newer versions. +For more information about availability, check the [MySQL Docker Hub repository](https://hub.docker.com/_/mysql). {{< /callout >}} -It's essential to understand that the `MasterUserPassword` you define for the database cluster/instance will be used as the `MYSQL_ROOT_PASSWORD` environment variable for the `root` user within the MySQL container. The user specified in `MasterUserName` will use the same password and will have complete access to the database. As of now, snapshots are not supported for MySQL. +It's essential to understand that the `MasterUserPassword` you define for the database cluster/instance will be used as the `MYSQL_ROOT_PASSWORD` environment variable for the `root` user within the MySQL container. +The user specified in `MasterUserName` will use the same password and will have complete access to the database. +As of now, snapshots are not supported for MySQL. ### Microsoft SQL Server Engine -To utilize MSSQL databases, it's necessary to expressly agree to the terms of the [Microsoft SQL Server End-User Licensing Agreement (EULA)](https://hub.docker.com/_/microsoft-mssql-server) by configuring `MSSQL_ACCEPT_EULA=Y` within the LocalStack container environment. The `arm64` architecture is not currently officially supported for MSSQL. +To utilize MSSQL databases, it's necessary to expressly agree to the terms of the [Microsoft SQL Server End-User Licensing Agreement (EULA)](https://hub.docker.com/_/microsoft-mssql-server) by configuring `MSSQL_ACCEPT_EULA=Y` within the LocalStack container environment. +The `arm64` architecture is not currently officially supported for MSSQL. -For the MSSQL engine, the database server is initiated in a fresh Docker container using the `latest` image. As of now, snapshots are not supported for MSSQL. +For the MSSQL engine, the database server is initiated in a fresh Docker container using the `latest` image. +As of now, snapshots are not supported for MSSQL. ## Default Usernames and Passwords The following details concern default usernames, passwords, and database names for local RDS clusters created by LocalStack: -- The default values for `master-username` and `db-name` are both **test**. For the `master-user-password`, the default is **test**, except for MSSQL databases, which employ **Test123!** as the default master password. -- When setting up a new RDS instance, you have the flexibility to utilize any `master-username`, with the exception of **postgres**. The system will automatically generate the user. -- It's important to remember that the username **postgres** has special significance, preventing the creation of a new RDS instance under this particular name. -- For clarity, please avoid using the `db-name` **postgres**, as it is already allocated for use by LocalStack. +- The default values for `master-username` and `db-name` are both **test**. + For the `master-user-password`, the default is **test**, except for MSSQL databases, which employ **Test123!** as the default master password. +- When setting up a new RDS instance, you have the flexibility to utilize any `master-username`, with the exception of **postgres**. + The system will automatically generate the user. +- It's important to remember that the username **postgres** has special significance, preventing the creation of a new RDS instance under this particular name. +- For clarity, please avoid using the `db-name` **postgres**, as it is already allocated for use by LocalStack. ## IAM Authentication Support -IAM authentication tokens can be employed to establish connections with RDS. As of now, this functionality is supported for PostgreSQL within LocalStack. However, IAM authentication is not yet validated at this stage. Consequently, any database user assigned the `rds_iam` role will obtain a valid token, thereby gaining the ability to connect to the database. +IAM authentication tokens can be employed to establish connections with RDS. +As of now, this functionality is supported for PostgreSQL within LocalStack. +However, IAM authentication is not yet validated at this stage. +Consequently, any database user assigned the `rds_iam` role will obtain a valid token, thereby gaining the ability to connect to the database. In this example, you will be able to verify the IAM authentication process for RDS Postgres: -1. Establish a database instance and obtain the corresponding host and port information. -2. Connect to the database using the master username and password. Subsequently, generate a new user and assign the `rds_iam` role as follows: - - `CREATE USER WITH LOGIN` - - `GRANT rds_iam TO ` -3. Create a token for the `` using the `generate-db-auth-token` command. -4. Connect to the database utilizing the user you generated and the token obtained in the previous step as the password. +1. Establish a database instance and obtain the corresponding host and port information. +2. Connect to the database using the master username and password. + Subsequently, generate a new user and assign the `rds_iam` role as follows: + - `CREATE USER WITH LOGIN` + - `GRANT rds_iam TO ` +3. Create a token for the `` using the `generate-db-auth-token` command. +4. Connect to the database utilizing the user you generated and the token obtained in the previous step as the password. ### Create a database instance -The following command creates a new database instance with the name `mydb` and the engine `postgres`. The database will be created with a single instance, which will be used as the master instance. +The following command creates a new database instance with the name `mydb` and the engine `postgres`. +The database will be created with a single instance, which will be used as the master instance. {{< command >}} $ MASTER_USER=hello @@ -234,7 +264,8 @@ $ awslocal rds create-db-instance \ ### Connect to the database -You can retrieve the hostname and port of your created instance either from the preceding output or by using the [`DescribeDbInstances`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) API. Run the following command to retrieve the host and port of the instance: +You can retrieve the hostname and port of your created instance either from the preceding output or by using the [`DescribeDbInstances`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) API. +Run the following command to retrieve the host and port of the instance: {{< command >}} $ PORT=$(awslocal rds describe-db-instances --db-instance-identifier mydb | jq -r ".DBInstances[0].Endpoint.Port") @@ -266,7 +297,8 @@ $ PGPASSWORD=$TOKEN psql -d $DB_NAME -U myiam -w -p $PORT -h $HOST LocalStack extends support for [Aurora Global Database](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) with certain limitations: -- Creating a global database will result in the generation of a single local database. All clusters and instances associated with the global database will share a common endpoint. +- Creating a global database will result in the generation of a single local database. + All clusters and instances associated with the global database will share a common endpoint. - It's important to note that clusters removed from a global database lose their ability to function as standalone clusters, differing from their intended behavior on AWS. - At present, the capability for persistence within global databases is not available. @@ -280,6 +312,7 @@ At the moment, primarily extension functions for the PostgreSQL engine are suppo The [`aws_lambda` extension](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/PostgreSQL-Lambda.html) can be used in local RDS PostgreSQL databases to interact with the Lambda API. For example, in the SQL code snippet below, we are loading the `aws_lambda` extension, then generate a full ARN from a function name, and finally invoke the Lambda function directly from the SQL query: + ```sql CREATE EXTENSION IF NOT EXISTS aws_lambda CASCADE; -- create a Lambda function ARN @@ -303,6 +336,7 @@ SELECT aws_s3.table_import_from_s3( ``` Analogously, we can use the `query_export_to_s3(..)` extension function to export data from a table `table2` into a CSV file `test.csv` in local S3 bucket `mybucket2`: + ```sql CREATE EXTENSION IF NOT EXISTS aws_s3 CASCADE; SELECT aws_s3.query_export_to_s3( @@ -316,16 +350,17 @@ SELECT aws_s3.query_export_to_s3( In addition to the `aws_*` extensions described in the sections above, LocalStack RDS supports the following PostgreSQL extensions (some of which are bundled with the [`PostGIS` extension](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.PostgreSQL.CommonDBATasks.PostGIS.html)): -* `address_standardizer_data_us` -* `fuzzystrmatch` -* `postgis` -* `postgis_raster` -* `postgis_tiger_geocoder` -* `postgis_topology` +- `address_standardizer_data_us` +- `fuzzystrmatch` +- `postgis` +- `postgis_raster` +- `postgis_tiger_geocoder` +- `postgis_topology` ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing RDS instances and clusters. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **RDS** under the **Database** section. +The LocalStack Web Application provides a Resource Browser for managing RDS instances and clusters. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **RDS** under the **Database** section. RDS Resource Browser
diff --git a/content/en/user-guide/aws/redshift/index.md b/content/en/user-guide/aws/redshift/index.md index 1805fc7740..f510186534 100644 --- a/content/en/user-guide/aws/redshift/index.md +++ b/content/en/user-guide/aws/redshift/index.md @@ -7,23 +7,24 @@ tags: ["Pro image"] ## Introduction -RedShift is a cloud-based data warehouse solution which allows end users to aggregate huge volumes of data and parallel processing of data. -RedShift is fully managed by AWS and serves as a petabyte-scale service which allows users to create visualization reports and critically analyze collected data. +RedShift is a cloud-based data warehouse solution which allows end users to aggregate huge volumes of data and parallel processing of data. +RedShift is fully managed by AWS and serves as a petabyte-scale service which allows users to create visualization reports and critically analyze collected data. The query results can be saved to an S3 Data Lake while additional analytics can be provided by Athena or SageMaker. -LocalStack allows you to use the RedShift APIs in your local environment to analyze structured and semi-structured data across local data warehouses and data lakes. +LocalStack allows you to use the RedShift APIs in your local environment to analyze structured and semi-structured data across local data warehouses and data lakes. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_redshift/), which provides information on the extent of RedShift's integration with LocalStack. ## Getting started This guide is designed for users new to RedShift and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. +Start your LocalStack container using your preferred method. We will demonstrate how to create a RedShift cluster and database while using a Glue Crawler to populate the metadata store with the schema of the RedShift database tables using the AWS CLI. ### Define the variables -First, we will define the variables we will use throughout this guide. Export the following variables in your shell: +First, we will define the variables we will use throughout this guide. +Export the following variables in your shell: ```bash REDSHIFT_CLUSTER_IDENTIFIER="redshiftcluster" @@ -37,11 +38,13 @@ GLUE_CONNECTION_NAME="glueconnection" GLUE_CRAWLER_NAME="gluecrawler" ``` -The above variables will be used to create a RedShift cluster, database, table, and user. You will also create a Glue database, connection, and crawler to populate the Glue Data Catalog with the schema of the RedShift database tables. +The above variables will be used to create a RedShift cluster, database, table, and user. +You will also create a Glue database, connection, and crawler to populate the Glue Data Catalog with the schema of the RedShift database tables. ### Create a RedShift cluster and database -You can create a RedShift cluster using the [`CreateCluster`](https://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateCluster.html) API. The following command will create a RedShift cluster with the variables defined above: +You can create a RedShift cluster using the [`CreateCluster`](https://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateCluster.html) API. +The following command will create a RedShift cluster with the variables defined above: {{< command >}} $ awslocal redshift create-cluster \ @@ -52,7 +55,8 @@ $ awslocal redshift create-cluster \ --node-type n1 {{< / command >}} -You can fetch the status of the cluster using the [`DescribeClusters`](https://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusters.html) API. Run the following command to extract the URL of the cluster: +You can fetch the status of the cluster using the [`DescribeClusters`](https://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusters.html) API. +Run the following command to extract the URL of the cluster: {{< command >}} $ REDSHIFT_URL=$(awslocal redshift describe-clusters \ @@ -61,21 +65,24 @@ $ REDSHIFT_URL=$(awslocal redshift describe-clusters \ ### Create a Glue database, connection, and crawler -You can create a Glue database using the [`CreateDatabase`](https://docs.aws.amazon.com/glue/latest/webapi/API_CreateDatabase.html) API. The following command will create a Glue database: +You can create a Glue database using the [`CreateDatabase`](https://docs.aws.amazon.com/glue/latest/webapi/API_CreateDatabase.html) API. +The following command will create a Glue database: {{< command >}} $ awslocal glue create-database \ --database-input "{\"Name\": \"$GLUE_DATABASE_NAME\"}" {{< / command >}} -You can create a connection to the RedShift cluster using the [`CreateConnection`](https://docs.aws.amazon.com/glue/latest/webapi/API_CreateConnection.html) API. The following command will create a Glue connection with the RedShift cluster: +You can create a connection to the RedShift cluster using the [`CreateConnection`](https://docs.aws.amazon.com/glue/latest/webapi/API_CreateConnection.html) API. +The following command will create a Glue connection with the RedShift cluster: {{< command >}} $ awslocal glue create-connection \ --connection-input "{\"Name\":\"$GLUE_CONNECTION_NAME\", \"ConnectionType\": \"JDBC\", \"ConnectionProperties\": {\"USERNAME\": \"$REDSHIFT_USERNAME\", \"PASSWORD\": \"$REDSHIFT_PASSWORD\", \"JDBC_CONNECTION_URL\": \"jdbc:redshift://$REDSHIFT_URL/$REDSHIFT_DATABASE_NAME\"}}" {{< / command >}} -Finally, you can create a Glue crawler using the [`CreateCrawler`](https://docs.aws.amazon.com/glue/latest/webapi/API_CreateCrawler.html) API. The following command will create a Glue crawler: +Finally, you can create a Glue crawler using the [`CreateCrawler`](https://docs.aws.amazon.com/glue/latest/webapi/API_CreateCrawler.html) API. +The following command will create a Glue crawler: {{< command >}} $ awslocal glue create-crawler \ @@ -87,7 +94,8 @@ $ awslocal glue create-crawler \ ### Create table in RedShift -You can create a table in RedShift using the [`CreateTable`](https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html) API. The following command will create a table in RedShift: +You can create a table in RedShift using the [`CreateTable`](https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html) API. +The following command will create a table in RedShift: {{< command >}} $ REDSHIFT_STATEMENT_ID=$(awslocal redshift-data execute-statement \ @@ -97,7 +105,8 @@ $ REDSHIFT_STATEMENT_ID=$(awslocal redshift-data execute-statement \ "create table $REDSHIFT_TABLE_NAME(salesid integer not null, listid integer not null, sellerid integer not null, buyerid integer not null, eventid integer not null, dateid smallint not null, qtysold smallint not null, pricepaid decimal(8,2), commission decimal(8,2), saletime timestamp)" | jq -r .Id) {{< / command >}} -You can check the status of the statement using the [`DescribeStatement`](https://docs.aws.amazon.com/redshift-data/latest/APIReference/API_DescribeStatement.html) API. The following command will check the status of the statement: +You can check the status of the statement using the [`DescribeStatement`](https://docs.aws.amazon.com/redshift-data/latest/APIReference/API_DescribeStatement.html) API. +The following command will check the status of the statement: {{< command >}} $ wait "awslocal redshift-data describe-statement \ @@ -106,21 +115,24 @@ $ wait "awslocal redshift-data describe-statement \ ### Run the crawler -You can run the crawler using the [`StartCrawler`](https://docs.aws.amazon.com/glue/latest/webapi/API_StartCrawler.html) API. The following command will run the crawler: +You can run the crawler using the [`StartCrawler`](https://docs.aws.amazon.com/glue/latest/webapi/API_StartCrawler.html) API. +The following command will run the crawler: {{< command >}} $ awslocal glue start-crawler \ --name $GLUE_CRAWLER_NAME {{< / command >}} -You can wait for the crawler to finish using the [`GetCrawler`](https://docs.aws.amazon.com/glue/latest/webapi/API_GetCrawler.html) API. The following command will wait for the crawler to finish: +You can wait for the crawler to finish using the [`GetCrawler`](https://docs.aws.amazon.com/glue/latest/webapi/API_GetCrawler.html) API. +The following command will wait for the crawler to finish: {{< command >}} $ wait "awslocal glue get-crawler \ --name $GLUE_CRAWLER_NAME" ".Crawler.State" "READY" {{< / command >}} -You can finally retrieve the schema of the table using the [`GetTable`](https://docs.aws.amazon.com/glue/latest/webapi/API_GetTable.html) API. The following command will retrieve the schema of the table: +You can finally retrieve the schema of the table using the [`GetTable`](https://docs.aws.amazon.com/glue/latest/webapi/API_GetTable.html) API. +The following command will retrieve the schema of the table: {{< command >}} $ awslocal glue get-table \ diff --git a/content/en/user-guide/aws/resource_groups/index.md b/content/en/user-guide/aws/resource_groups/index.md index e24837231c..c495ca5a0b 100644 --- a/content/en/user-guide/aws/resource_groups/index.md +++ b/content/en/user-guide/aws/resource_groups/index.md @@ -29,7 +29,8 @@ However, you can also use CloudFormation stack-based queries to create a resourc ### Create a Resource Group Resource Groups in AWS are built around the concept of queries, which serve as a fundamental component. -The tag-based queries list the resource types in the format `AWS::::` (e.g. `AWS::Lambda::Function` along with specified tags. A tag-based group is created based on a query of type `TAG_FILTERS_1_0`. +The tag-based queries list the resource types in the format `AWS::::` (e.g. `AWS::Lambda::Function` along with specified tags. +A tag-based group is created based on a query of type `TAG_FILTERS_1_0`. Use the [`CreateGroup`](https://docs.aws.amazon.com/resource-groups/latest/APIReference/API_CreateGroup.html) API to create a Resource Group. Run the following command to create a Resource Group named `my-resource-group`: diff --git a/content/en/user-guide/aws/route53/index.md b/content/en/user-guide/aws/route53/index.md index c3e5888893..e481014e53 100644 --- a/content/en/user-guide/aws/route53/index.md +++ b/content/en/user-guide/aws/route53/index.md @@ -8,24 +8,32 @@ persistence: supported ## Introduction -Route 53 is a highly scalable and reliable domain name system (DNS) web service provided by Amazon Web Services. Route 53 allows you to register domain names, and associate them with IP addresses or other resources. In addition to basic DNS functionality, Route 53 offers advanced features like health checks and DNS failover. Route 53 integrates seamlessly with other AWS services, such as route traffic to CloudFront distributions, S3 buckets configured for static website hosting, EC2 instances, and more. +Route 53 is a highly scalable and reliable domain name system (DNS) web service provided by Amazon Web Services. +Route 53 allows you to register domain names, and associate them with IP addresses or other resources. +In addition to basic DNS functionality, Route 53 offers advanced features like health checks and DNS failover. +Route 53 integrates seamlessly with other AWS services, such as route traffic to CloudFront distributions, S3 buckets configured for static website hosting, EC2 instances, and more. LocalStack allows you to use the Route53 APIs in your local environment to create hosted zones and to manage DNS entries. -The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_route53/), which provides information on the extent of Route53's integration with LocalStack. LocalStack Pro image integrates with our DNS server to respond to DNS queries with these domains. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_route53/), which provides information on the extent of Route53's integration with LocalStack. +LocalStack Pro image integrates with our DNS server to respond to DNS queries with these domains. {{< callout "note">}} -LocalStack CLI does not publish port `53` anymore by default. Use the CLI flag `--host-dns` to expose the port on the host. This would be required if you want to reach out to Route53 domain names from your host machine, using the LocalStack DNS server. +LocalStack CLI does not publish port `53` anymore by default. +Use the CLI flag `--host-dns` to expose the port on the host. +This would be required if you want to reach out to Route53 domain names from your host machine, using the LocalStack DNS server. {{< /callout >}} ## Getting started This guide is designed for users new to Route53 and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a hosted zone and query the DNS record with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a hosted zone and query the DNS record with the AWS CLI. ### Create a hosted zone -You can created a hosted zone for `example.com` using the [`CreateHostedZone`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHostedZone.html) API. Run the following command: +You can created a hosted zone for `example.com` using the [`CreateHostedZone`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHostedZone.html) API. +Run the following command: {{< command >}} $ zone_id=$(awslocal route53 create-hosted-zone \ @@ -42,7 +50,8 @@ The following output would be retrieved: ### Change resource record sets -You can now change the resource record sets for the hosted zone `example.com` using the [`ChangeResourceRecordSets`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html) API. Run the following command: +You can now change the resource record sets for the hosted zone `example.com` using the [`ChangeResourceRecordSets`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html) API. +Run the following command: {{< command >}} $ awslocal route53 change-resource-record-sets \ @@ -94,11 +103,17 @@ The DNS name `localhost.localstack.cloud`, along with its subdomains like `mybuc It facilitates communication between a LocalStack compute environment (such as a Lambda function) and the LocalStack APIs, as well as your containerised applications with the LocalStack APIs. For example configurations, see the [Network Troubleshooting guide]({{< ref "references/network-troubleshooting/endpoint-url/#from-your-container" >}}). -For most use-cases, the default configuration of the internal LocalStack DNS name requires no modification. It functions seamlessly in typical scenarios. However, there are instances where adjusting the external resolution of this DNS name becomes necessary. For instance, this might be required when your LocalStack instance operates on a distinct Docker network compared to your application code or even on a separate machine. +For most use-cases, the default configuration of the internal LocalStack DNS name requires no modification. +It functions seamlessly in typical scenarios. +However, there are instances where adjusting the external resolution of this DNS name becomes necessary. +For instance, this might be required when your LocalStack instance operates on a distinct Docker network compared to your application code or even on a separate machine. -Suppose you intend to achieve a scenario in which all subdomains in the format `*.localhost.localstack.cloud` resolve to the IP address `5.6.7.8`. This IP signifies the accessibility of your LocalStack instance. This can be accomplished using Route53. +Suppose you intend to achieve a scenario in which all subdomains in the format `*.localhost.localstack.cloud` resolve to the IP address `5.6.7.8`. +This IP signifies the accessibility of your LocalStack instance. +This can be accomplished using Route53. -Create a hosted zone for the domain `localhost.localstack.cloud` using the [`CreateHostedZone` API](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHostedZone.html) API. Run the following command: +Create a hosted zone for the domain `localhost.localstack.cloud` using the [`CreateHostedZone` API](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHostedZone.html) API. +Run the following command: {{< command >}} $ zone_id=$(awslocal route53 create-hosted-zone \ @@ -113,7 +128,8 @@ The following output would be retrieved: /hostedzone/3NF6SEGOB5EBHS1 ``` -You can now use the [`ChangeResourceRecordSets`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html) API to create a record set for the domain `localhost.localstack.cloud` using the `zone_id` retrieved in the previous step. Run the following command to accomplish this: +You can now use the [`ChangeResourceRecordSets`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html) API to create a record set for the domain `localhost.localstack.cloud` using the `zone_id` retrieved in the previous step. +Run the following command to accomplish this: {{< command >}} $ awslocal route53 change-resource-record-sets \ @@ -157,17 +173,23 @@ localhost.localstack.cloud. 300 IN A 5.6.7.8 ## Resource Browser -The LocalStack Web Application provides a Route53 for creating hosted zones and to manage DNS entries. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Route53** under the **Analytics** section. +The LocalStack Web Application provides a Route53 for creating hosted zones and to manage DNS entries. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Route53** under the **Analytics** section. Route53 Resource Browser The Resource Browser allows you to perform the following actions: -- **Create Hosted Zone**: Create a hosted zone for a domain name by clicking on the **Create Hosted Zone** button. This will open a modal where you can enter the name, VPC, and other parameters and click on the **Submit** button to create the hosted zone. -- **View Hosted Zone**: View the details of a hosted zone by clicking on the specific hosted zone name. This will open a modal where you can view the hosted zone details. -- **Create Record**: Click on the **Records** button on the individual hosted zone page, followed by clicking **Create Record** to create a record for the hosted zone. This will open a modal where you can enter the name, type, and other parameters and click on the **Submit** button to create the record. -- **Edit Record**: Click on the **Records** button on the individual hosted zone page, followed by clicking **Edit** on the specific record to edit the record. This will open a modal where you can edit the record details and click on the **Submit** button to save the changes.s -- **View Records**: Click on the **Records** button on the individual hosted zone page, followed by clicking on the specific record to view the record details. This will open a modal where you can view the record details. +- **Create Hosted Zone**: Create a hosted zone for a domain name by clicking on the **Create Hosted Zone** button. + This will open a modal where you can enter the name, VPC, and other parameters and click on the **Submit** button to create the hosted zone. +- **View Hosted Zone**: View the details of a hosted zone by clicking on the specific hosted zone name. + This will open a modal where you can view the hosted zone details. +- **Create Record**: Click on the **Records** button on the individual hosted zone page, followed by clicking **Create Record** to create a record for the hosted zone. + This will open a modal where you can enter the name, type, and other parameters and click on the **Submit** button to create the record. +- **Edit Record**: Click on the **Records** button on the individual hosted zone page, followed by clicking **Edit** on the specific record to edit the record. + This will open a modal where you can edit the record details and click on the **Submit** button to save the changes.s +- **View Records**: Click on the **Records** button on the individual hosted zone page, followed by clicking on the specific record to view the record details. + This will open a modal where you can view the record details. - **Delete Hosted Zone**: Select the hosted zones you want to delete by clicking on the checkbox next to the hosted zone name, followed by clicking on the **Actions** button and then clicking on **Remove Selected**. - **Delete Record**: Click on the **Records** button on the individual hosted zone page, followed by clicking on checkbox next to the specific record, and then clicking on the **Actions** button and then clicking on **Remove Selected**. @@ -175,4 +197,4 @@ The Resource Browser allows you to perform the following actions: The following code snippets and sample applications provide practical examples of how to use Route53 in LocalStack for various use cases: - - [DNS Failover with Route53 on LocalStack](https://github.com/localstack/localstack-pro-samples/tree/master/route53-dns-failover) +- [DNS Failover with Route53 on LocalStack](https://github.com/localstack/localstack-pro-samples/tree/master/route53-dns-failover) diff --git a/content/en/user-guide/aws/route53resolver/index.md b/content/en/user-guide/aws/route53resolver/index.md index 152bac96f0..a7ce9d649e 100644 --- a/content/en/user-guide/aws/route53resolver/index.md +++ b/content/en/user-guide/aws/route53resolver/index.md @@ -8,7 +8,9 @@ persistence: supported ## Introduction -Route 53 Resolver allows you to route DNS queries between your virtual private cloud (VPC) and your network. Route 53 Resolver forwards DNS queries for domain names to the appropriate DNS service based on the configuration you set up. Route 53 Resolver can be used to resolve domain names between your VPC and your network, and to resolve domain names between your VPCs. +Route 53 Resolver allows you to route DNS queries between your virtual private cloud (VPC) and your network. +Route 53 Resolver forwards DNS queries for domain names to the appropriate DNS service based on the configuration you set up. +Route 53 Resolver can be used to resolve domain names between your VPC and your network, and to resolve domain names between your VPCs. LocalStack allows you to use the Route 53 Resolver endpoints in your local environment. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_route53resolver/), which provides information on the extent of Route 53 Resolver's integration with LocalStack. @@ -17,7 +19,8 @@ The supported APIs are available on our [API coverage page](https://docs.localst This guide is designed for users new to Route53 Resolver and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a resolver endpoint, list the endpoints, and delete the endpoint with the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a resolver endpoint, list the endpoints, and delete the endpoint with the AWS CLI. ### Fetch the IP addresses & Security Group ID @@ -63,7 +66,8 @@ $ awslocal ec2 describe-subnets --subnet-ids subnet-bdd58a47 --query 'Subnets[*]
{{< / command >}} -Save the CIDR blocks of the subnets as you will need them later. Lastly fetch the security group ID of the default VPC: +Save the CIDR blocks of the subnets as you will need them later. +Lastly fetch the security group ID of the default VPC: {{< command >}} $ awslocal ec2 describe-security-groups \ @@ -107,7 +111,8 @@ Create a new file named `create-outbound-resolver-endpoint.json` and add the fol Replace the `Ip` and `SubnetId` values with the CIDR blocks and subnet IDs you fetched earlier. -You can now use the [`CreateResolverEndpoint`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverEndpoint.html) API to create an outbound resolver endpoint. Run the following command: +You can now use the [`CreateResolverEndpoint`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverEndpoint.html) API to create an outbound resolver endpoint. +Run the following command: {{< command >}} $ awslocal route53resolver create-resolver-endpoint \ @@ -139,7 +144,8 @@ The following output would be retrieved: ### List the resolver endpoints -You can list the resolver endpoints using the [`ListResolverEndpoints`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html) API. Run the following command: +You can list the resolver endpoints using the [`ListResolverEndpoints`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html) API. +Run the following command: {{< command >}} $ awslocal route53resolver list-resolver-endpoints @@ -173,7 +179,8 @@ The following output would be retrieved: ### Delete the resolver endpoint -You can delete the resolver endpoint using the [`DeleteResolverEndpoint`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DeleteResolverEndpoint.html) API. Run the following command: +You can delete the resolver endpoint using the [`DeleteResolverEndpoint`](https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DeleteResolverEndpoint.html) API. +Run the following command: {{< command >}} $ awslocal route53resolver delete-resolver-endpoint \ @@ -184,13 +191,18 @@ Replace `rslvr-out-5d61abaff9de06b99` with the ID of the resolver endpoint you w ## Resource Browser -The LocalStack Web Application provides a Route53 Resolver for creating and managing resolver endpoints. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Route53** under the **Analytics** section. Navigate to the **Resolver Endpoints** tab to view the resolver endpoints. +The LocalStack Web Application provides a Route53 Resolver for creating and managing resolver endpoints. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Route53** under the **Analytics** section. +Navigate to the **Resolver Endpoints** tab to view the resolver endpoints. Route53Resolver Resource Browser The Resource Browser allows you to perform the following actions: -- **Create resolver endpoint**: Create a resolver endpoint by clicking on the **Create Endpoint** button. This will open a modal where you can enter the name, VPC, and other parameters and click on the **Submit** button to create the resolver endpoint. -- **View resolver endpoint**: View the details of a resolver endpoint by clicking on the specific resolver endpoint name. This will open a modal where you can view the resolver endpoint details. -- **Edit resolver endpoint**: Edit the details of a resolver endpoint by clicking on the **Edit Endpoint** button in the specific resolver endpoint page. This will open a modal where you can edit the resolver endpoint details. +- **Create resolver endpoint**: Create a resolver endpoint by clicking on the **Create Endpoint** button. + This will open a modal where you can enter the name, VPC, and other parameters and click on the **Submit** button to create the resolver endpoint. +- **View resolver endpoint**: View the details of a resolver endpoint by clicking on the specific resolver endpoint name. + This will open a modal where you can view the resolver endpoint details. +- **Edit resolver endpoint**: Edit the details of a resolver endpoint by clicking on the **Edit Endpoint** button in the specific resolver endpoint page. + This will open a modal where you can edit the resolver endpoint details. - **Delete resolver endpoint**: Select the resolver endpoints you want to delete by clicking on the checkbox next to the resolver endpoint name, followed by clicking on the **Actions** button and then clicking on **Remove Selected**. diff --git a/content/en/user-guide/aws/s3/index.md b/content/en/user-guide/aws/s3/index.md index 88779a979c..a8aa681d92 100644 --- a/content/en/user-guide/aws/s3/index.md +++ b/content/en/user-guide/aws/s3/index.md @@ -8,25 +8,32 @@ persistence: supported ## Introduction -Simple Storage Service (S3) is an object storage service that provides a highly scalable and durable solution for storing and retrieving data. In S3, a bucket represents a directory, while an object corresponds to a file. Each object or file within S3 encompasses essential attributes such as a unique key denoting its name, the actual content it holds, a version ID for versioning support, and accompanying metadata. S3 can store unlimited objects, allowing you to store, retrieve, and manage your data in a highly adaptable and reliable manner. +Simple Storage Service (S3) is an object storage service that provides a highly scalable and durable solution for storing and retrieving data. +In S3, a bucket represents a directory, while an object corresponds to a file. +Each object or file within S3 encompasses essential attributes such as a unique key denoting its name, the actual content it holds, a version ID for versioning support, and accompanying metadata. +S3 can store unlimited objects, allowing you to store, retrieve, and manage your data in a highly adaptable and reliable manner. -LocalStack allows you to use the S3 APIs in your local environment to create new buckets, manage your S3 objects, and test your S3 configurations locally. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_s3/), which provides information on the extent of S3's integration with LocalStack. +LocalStack allows you to use the S3 APIs in your local environment to create new buckets, manage your S3 objects, and test your S3 configurations locally. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_s3/), which provides information on the extent of S3's integration with LocalStack. ## Getting started This guide is designed for users new to S3 and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create an S3 bucket, manage S3 objects, and generate pre-signed URLs for S3 objects. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create an S3 bucket, manage S3 objects, and generate pre-signed URLs for S3 objects. ### Create an S3 bucket -You can create an S3 bucket using the [`CreateBucket`](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html) API. Run the following command to create an S3 bucket named `sample-bucket`: +You can create an S3 bucket using the [`CreateBucket`](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html) API. +Run the following command to create an S3 bucket named `sample-bucket`: {{< command >}} $ awslocal s3api create-bucket --bucket sample-bucket {{< / command >}} -You can list your S3 buckets using the [`ListBuckets`](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-buckets.html) API. Run the following command to list your S3 buckets: +You can list your S3 buckets using the [`ListBuckets`](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-buckets.html) API. +Run the following command to list your S3 buckets: {{< command >}} $ awslocal s3api list-buckets @@ -51,7 +58,9 @@ On successful creation of the S3 bucket, you will see the following output: ### Managing S3 objects -To upload a file to your S3 bucket, you can use the [`PutObject`](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) API. Download a random image from the internet and save it as `image.jpg`. Run the following command to upload the file to your S3 bucket: +To upload a file to your S3 bucket, you can use the [`PutObject`](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) API. +Download a random image from the internet and save it as `image.jpg`. +Run the following command to upload the file to your S3 bucket: {{< command >}} $ awslocal s3api put-object \ @@ -60,7 +69,8 @@ $ awslocal s3api put-object \ --body image.jpg {{< / command >}} -You can list the objects in your S3 bucket using the [`ListObjects`](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-objects.html) API. Run the following command to list the objects in your S3 bucket: +You can list the objects in your S3 bucket using the [`ListObjects`](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-objects.html) API. +Run the following command to list the objects in your S3 bucket: {{< command >}} $ awslocal s3api list-objects \ @@ -100,7 +110,8 @@ $ awslocal s3api put-object --bucket sample-bucket --key index.html --body index ### Generate a pre-signed URL for S3 object -You can generate a pre-signed URL for your S3 object using the [`presign`](https://docs.aws.amazon.com/cli/latest/reference/s3/presign.html) command. Pre-signed URL allows anyone to retrieve the S3 object with an HTTP GET request. +You can generate a pre-signed URL for your S3 object using the [`presign`](https://docs.aws.amazon.com/cli/latest/reference/s3/presign.html) command. +Pre-signed URL allows anyone to retrieve the S3 object with an HTTP GET request. Run the following command to generate a pre-signed URL for your S3 object: @@ -108,7 +119,8 @@ Run the following command to generate a pre-signed URL for your S3 object: $ awslocal s3 presign s3://sample-bucket/image.jpg {{< / command >}} -You will see a generated pre-signed URL for your S3 object. You can use [`cURL`](https://curl.se/) or [`wget`](https://www.gnu.org/software/wget/) to retrieve the S3 object using the pre-signed URL. +You will see a generated pre-signed URL for your S3 object. +You can use [`cURL`](https://curl.se/) or [`wget`](https://www.gnu.org/software/wget/) to retrieve the S3 object using the pre-signed URL. ## Path-Style and Virtual Hosted-Style Requests @@ -125,7 +137,7 @@ http://localhost:4566// # path-style request A **Virtual-Hosted style** request will have the `bucket` as part of the `Host` header of your request. In order for LocalStack to be able to parse the bucket name from your request, your endpoint needs to be prefixed with `s3.`, like `s3.localhost.localstack.cloud`. -If your endpoint cannot be prefixed with `s3.`, you should configure your SDK to use **Path style** request instead, and make the bucket part of the path. +If your endpoint cannot be prefixed with `s3.`, you should configure your SDK to use **Path style** request instead, and make the bucket part of the path. By default, most SDKs will try to use **Virtual-Hosted style** requests and prepend your endpoint with the bucket name. However, if the endpoint is not prefixed by `s3.`, LocalStack will not be able to understand the request and it will most likely result in an error. @@ -134,7 +146,8 @@ You can either change the endpoint to an S3-specific one, or configure your SDK Check out our [SDK documentation](https://docs.localstack.cloud/user-guide/integrations/sdks/) to learn how you can configure AWS SDKs to access LocalStack and S3. {{< callout "tip" >}} -While using [AWS SDKs](https://aws.amazon.com/developer/tools/#SDKs), you would need to configure the `ForcePathStyle` parameter to `true` in the S3 client configuration to use **Path style** requests. If you want to use virtual host addressing of buckets, you can remove `ForcePathStyle` from the configuration. +While using [AWS SDKs](https://aws.amazon.com/developer/tools/#SDKs), you would need to configure the `ForcePathStyle` parameter to `true` in the S3 client configuration to use **Path style** requests. +If you want to use virtual host addressing of buckets, you can remove `ForcePathStyle` from the configuration. The `ForcePathStyle` parameter name can vary between SDK and languages, please check our [SDK documentation](https://docs.localstack.cloud/user-guide/integrations/sdks/) {{< /callout >}} @@ -143,9 +156,15 @@ Using the `s3.localhost.localstack.cloud` endpoint URL is recommended for all re ## Configuring Cross-Origin Resource Sharing on S3 -You can configure Cross-Origin Resource Sharing (CORS) on a LocalStack S3 bucket using AWS Command Line Interface (CLI). It would allow your local application to communicate directly with an S3 bucket in LocalStack. By default, LocalStack will apply specific CORS rules to all requests to allow you to display and access your resources through [LocalStack Web Application](https://app.localstack.cloud). If no CORS rules are configured for your S3 bucket, LocalStack will apply default rules unless specified otherwise. +You can configure Cross-Origin Resource Sharing (CORS) on a LocalStack S3 bucket using AWS Command Line Interface (CLI). +It would allow your local application to communicate directly with an S3 bucket in LocalStack. +By default, LocalStack will apply specific CORS rules to all requests to allow you to display and access your resources through [LocalStack Web Application](https://app.localstack.cloud). +If no CORS rules are configured for your S3 bucket, LocalStack will apply default rules unless specified otherwise. -To configure CORS rules for your S3 bucket, you can use the `awslocal` wrapper. Optionally, you can run a local web application on [localhost:3000](http://localhost:3000). You can emulate the same behaviour with an AWS SDK or an integration you use. Follow this step-by-step guide to configure CORS rules on your S3 bucket. +To configure CORS rules for your S3 bucket, you can use the `awslocal` wrapper. +Optionally, you can run a local web application on [localhost:3000](http://localhost:3000). +You can emulate the same behaviour with an AWS SDK or an integration you use. +Follow this step-by-step guide to configure CORS rules on your S3 bucket. Run the following command on your terminal to create your S3 bucket: @@ -156,7 +175,8 @@ $ awslocal s3api create-bucket --bucket cors-bucket } {{< / command >}} -Next, create a JSON file with the CORS configuration. The file should have the following format: +Next, create a JSON file with the CORS configuration. +The file should have the following format: ```json { @@ -175,7 +195,8 @@ Next, create a JSON file with the CORS configuration. The file should have the f Note that this configuration is a sample, and you can tailor it to fit your needs better, for example, restricting the **AllowedHeaders** to specific ones. {{< /callout >}} -Save the file locally with a name of your choice, for example, `cors-config.json`. Run the following command to apply the CORS configuration to your S3 bucket: +Save the file locally with a name of your choice, for example, `cors-config.json`. +Run the following command to apply the CORS configuration to your S3 bucket: {{< command >}} $ awslocal s3api put-bucket-cors --bucket cors-bucket --cors-configuration file://cors-config.json @@ -187,9 +208,11 @@ You can further verify that the CORS configuration was applied successfully by r $ awslocal s3api get-bucket-cors --bucket cors-bucket {{< / command >}} -On applying the configuration successfully, you should see the same JSON configuration file you created earlier. Your S3 bucket is configured to allow cross-origin resource sharing, and if you try to send requests from your local application running on [localhost:3000](http://localhost:3000), they should be successful. +On applying the configuration successfully, you should see the same JSON configuration file you created earlier. +Your S3 bucket is configured to allow cross-origin resource sharing, and if you try to send requests from your local application running on [localhost:3000](http://localhost:3000), they should be successful. -However, if you try to access your bucket from [LocalStack Web Application](https://app.localstack.cloud), you'll see errors, and your bucket won't be accessible anymore. We can edit the JSON file `cors-config.json` you created earlier with the following configuration and save it: +However, if you try to access your bucket from [LocalStack Web Application](https://app.localstack.cloud), you'll see errors, and your bucket won't be accessible anymore. +We can edit the JSON file `cors-config.json` you created earlier with the following configuration and save it: ```json { @@ -219,7 +242,8 @@ You can try again to upload files in your bucket from the [LocalStack Web Applic ## S3 Docker image -LocalStack provides a Docker image for S3, which you can use to run S3 in a Docker container. The image is available on [Docker Hub](https://hub.docker.com/r/localstack/localstack) and can be pulled using the following command: +LocalStack provides a Docker image for S3, which you can use to run S3 in a Docker container. +The image is available on [Docker Hub](https://hub.docker.com/r/localstack/localstack) and can be pulled using the following command: {{< command >}} $ docker pull localstack/localstack:s3-latest @@ -254,18 +278,21 @@ docker run \ {{< /tab >}} {{< /tabpane >}} -The S3 Docker image has similar parity with the S3 APIs supported by LocalStack Docker image. You can use similar [configuration options](https://docs.localstack.cloud/references/configuration/#s3) to alter the behaviour of the S3 Docker image, such as `DEBUG` or `S3_SKIP_SIGNATURE_VALIDATION`. +The S3 Docker image has similar parity with the S3 APIs supported by LocalStack Docker image. +You can use similar [configuration options](https://docs.localstack.cloud/references/configuration/#s3) to alter the behaviour of the S3 Docker image, such as `DEBUG` or `S3_SKIP_SIGNATURE_VALIDATION`. ## Resource Browser -The LocalStack Web Application provides a [Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) for managing S3 buckets & configurations. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **S3** under the **Storage** section. +The LocalStack Web Application provides a [Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) for managing S3 buckets & configurations. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **S3** under the **Storage** section. S3 Resource Browser The Resource Browser allows you to perform the following actions: - **Create Bucket**: Create a new S3 bucket by specifying a **Bucket Name**, **Bucket Configuration**, **ACL**, **Object Ownership**, and more. -- **Objects & Permissions**: View, upload, download, and delete objects in your S3 buckets. You can also view and edit the permissions, like the CORS Configuration for the bucket. +- **Objects & Permissions**: View, upload, download, and delete objects in your S3 buckets. + You can also view and edit the permissions, like the CORS Configuration for the bucket. - **Create Folder**: Create a new folder in your S3 bucket by clicking on the **Create Folder** button and specifying a **Folder Name**. - **Delete Bucket**: Delete an S3 bucket by selecting the S3 bucket and clicking on **Actions** button and clicking on **Remove Selected**. diff --git a/content/en/user-guide/aws/sagemaker/index.md b/content/en/user-guide/aws/sagemaker/index.md index ba21e4767a..a7cc5c00b0 100644 --- a/content/en/user-guide/aws/sagemaker/index.md +++ b/content/en/user-guide/aws/sagemaker/index.md @@ -5,14 +5,18 @@ description: Get started with SageMaker on LocalStack tags: ["Pro image"] --- -## Introduction +## Introduction -Amazon SageMaker is a fully managed service provided by Amazon Web Services (AWS) that provides the tools to build, train, and deploy machine-learning models in the cloud for predictive analytics applications. It streamlines the machine learning development process, reduces the time and effort required to build and deploy models, and offers the scalability and flexibility needed for large-scale machine learning projects in the AWS cloud. +Amazon SageMaker is a fully managed service provided by Amazon Web Services (AWS) that provides the tools to build, train, and deploy machine-learning models in the cloud for predictive analytics applications. +It streamlines the machine learning development process, reduces the time and effort required to build and deploy models, and offers the scalability and flexibility needed for large-scale machine learning projects in the AWS cloud. -LocalStack Pro provides a local version of the SageMaker API, which allows running jobs to create machine learning models (e.g., using PyTorch) and to deploy them. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_sagemaker/), which provides information on the extent of Sagemaker's integration with LocalStack. +LocalStack Pro provides a local version of the SageMaker API, which allows running jobs to create machine learning models (e.g., using PyTorch) and to deploy them. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_sagemaker/), which provides information on the extent of Sagemaker's integration with LocalStack. {{< callout >}} -LocalStack supports custom-built models in SageMaker. You can push your Docker image to LocalStack's Elastic Container Registry (ECR) and use it in SageMaker. LocalStack will use the local ECR image to create a SageMaker model. +LocalStack supports custom-built models in SageMaker. +You can push your Docker image to LocalStack's Elastic Container Registry (ECR) and use it in SageMaker. +LocalStack will use the local ECR image to create a SageMaker model. {{< /callout >}} ## Getting started @@ -26,7 +30,8 @@ We will demonstrate an application illustrating running a machine learning job u - Invokes the endpoint directly on the container via Boto3 {{< callout >}} -SageMaker is a fairly comprehensive API for now. Currently a subset of the functionality is provided locally, but new features are being added on a regular basis. +SageMaker is a fairly comprehensive API for now. +Currently a subset of the functionality is provided locally, but new features are being added on a regular basis. {{< /callout >}} ### Download the sample application @@ -58,7 +63,8 @@ $ docker pull 763104351884.dkr.ecr.us-east-1.amazonaws.com/pytorch-inference:1.5 ### Run the sample application -Start your LocalStack container using your preferred method. Run the sample application by executing the following command: +Start your LocalStack container using your preferred method. +Run the sample application by executing the following command: {{< command >}} $ python3 main.,py @@ -86,9 +92,11 @@ You can also invoke a serverless endpoint, by navigating to `main.py` and uncomm ## Resource Browser -The LocalStack Web Application provides a [Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) for managing Lambda resources. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Sagemaker** under the **Compute** section. +The LocalStack Web Application provides a [Resource Browser](https://docs.localstack.cloud/user-guide/web-application/resource-browser/) for managing Lambda resources. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Sagemaker** under the **Compute** section. -The Resource Browser displays Models, Endpoint Configurations and Endpoint. You can click on individual resources to view their details. +The Resource Browser displays Models, Endpoint Configurations and Endpoint. +You can click on individual resources to view their details. Sagemaker Resource Browser @@ -96,13 +104,12 @@ The Resource Browser allows you to perform the following actions: - **Create and Remove Models**: You can remove existing model and create a new model with the required configuration - Sagemaker Resource Browser +- **Endpoint Configurations & Endpoints**: You can create endpoints from the resource browser that hosts your deployed machine learning model. + You can also create endpoint configuration that specifies the type and number of instances that will be used to serve your model on an endpoint. -- **Endpoint Configurations & Endpoints**: You can create endpoints from the resource browser that hosts your deployed machine learning model. You can also create endpoint configuration that specifies the type and number of instances that will be used to serve your model on an endpoint. - -## Examples +## Examples The following code snippets and sample applications provide practical examples of how to use Sagemaker in LocalStack for various use cases: diff --git a/content/en/user-guide/aws/scheduler/index.md b/content/en/user-guide/aws/scheduler/index.md index 539a710981..a20562d61a 100644 --- a/content/en/user-guide/aws/scheduler/index.md +++ b/content/en/user-guide/aws/scheduler/index.md @@ -6,26 +6,32 @@ description: Get started with EventBridge Scheduler on LocalStack ## Introduction -EventBridge Scheduler is a service that enables you to schedule the execution of your AWS Lambda functions, Amazon ECS tasks, and Amazon Batch jobs. You can use EventBridge Scheduler to create schedules that run at a specific time or at regular intervals. You can also use EventBridge Scheduler to create schedules that run within a flexible time window. +EventBridge Scheduler is a service that enables you to schedule the execution of your AWS Lambda functions, Amazon ECS tasks, and Amazon Batch jobs. +You can use EventBridge Scheduler to create schedules that run at a specific time or at regular intervals. +You can also use EventBridge Scheduler to create schedules that run within a flexible time window. -LocalStack allows you to use the Scheduler APIs in your local environment to create and run schedules. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_scheduler/), which provides information on the extent of EventBridge Scheduler's integration with LocalStack. +LocalStack allows you to use the Scheduler APIs in your local environment to create and run schedules. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_scheduler/), which provides information on the extent of EventBridge Scheduler's integration with LocalStack. ## Getting started This guide is designed for users new to EventBridge Scheduler and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a new schedule, list all schedules, and tag a schedule using the EventBridge Scheduler APIs. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a new schedule, list all schedules, and tag a schedule using the EventBridge Scheduler APIs. ### Create a new SQS queue -You can create a new SQS queue using the [`CreateQueue`](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) API. Run the following command to create a new SQS queue: +You can create a new SQS queue using the [`CreateQueue`](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) API. +Run the following command to create a new SQS queue: {{< command >}} $ awslocal sqs create-queue --queue-name local-notifications {{< /command >}} -You can fetch the Queue ARN using the [`GetQueueAttributes`](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueAttributes.html) API. Run the following command to fetch the Queue ARN by specifying the Queue URL: - +You can fetch the Queue ARN using the [`GetQueueAttributes`](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueAttributes.html) API. +Run the following command to fetch the Queue ARN by specifying the Queue URL: + {{< command >}} $ awslocal sqs get-queue-attributes \ --queue-url http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/local-notifications \ @@ -36,7 +42,8 @@ Save the Queue ARN for later use. ### Create a new schedule -You can create a new schedule using the [`CreateSchedule`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_CreateSchedule.html) API. Run the following command to create a new schedule: +You can create a new schedule using the [`CreateSchedule`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_CreateSchedule.html) API. +Run the following command to create a new schedule: {{< command >}} $ awslocal scheduler create-schedule \ @@ -56,7 +63,8 @@ The following output is displayed: ### List all schedules -You can list all schedules using the [`ListSchedules`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_ListSchedules.html) API. Run the following command to list all schedules: +You can list all schedules using the [`ListSchedules`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_ListSchedules.html) API. +Run the following command to list all schedules: {{< command >}} $ awslocal scheduler list-schedules @@ -84,7 +92,8 @@ The following output is displayed: ### Tag a schedule -You can tag a schedule using the [`TagResource`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_TagResource.html) API. Run the following command to tag a schedule: +You can tag a schedule using the [`TagResource`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_TagResource.html) API. +Run the following command to tag a schedule: {{< command >}} $ awslocal scheduler tag-resource \ @@ -92,7 +101,8 @@ $ awslocal scheduler tag-resource \ --tags Key=Name,Value=Test {{< /command >}} -You can view the tags associated with a schedule using the [`ListTagsForResource`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_ListTagsForResource.html) API. Run the following command to list the tags associated with a schedule: +You can view the tags associated with a schedule using the [`ListTagsForResource`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_ListTagsForResource.html) API. +Run the following command to list the tags associated with a schedule: {{< command >}} $ awslocal scheduler list-tags-for-resource \ diff --git a/content/en/user-guide/aws/secretsmanager/index.md b/content/en/user-guide/aws/secretsmanager/index.md index c693d22cf2..c8b33d1288 100644 --- a/content/en/user-guide/aws/secretsmanager/index.md +++ b/content/en/user-guide/aws/secretsmanager/index.md @@ -46,7 +46,8 @@ $ awslocal secretsmanager create-secret \ --secret-string file://secrets.json {{}} -Upon successful execution, the output will provide you with the ARN of the newly created secret. This identifier will be useful for further operations or integrations. +Upon successful execution, the output will provide you with the ARN of the newly created secret. +This identifier will be useful for further operations or integrations. The following output would be retrieved: @@ -104,7 +105,7 @@ $ awslocal secretsmanager get-secret-value \ {{}} The following output would be retrieved: - + {{}} { "ARN": "arn:aws:secretsmanager:us-east-1:000000000000:secret:test-secret-pyfjVP", @@ -129,7 +130,8 @@ $ awslocal secretsmanager tag-resource \ ### Rotate the secret -To rotate a secret, you need a Lambda function that can rotate the secret. You can copy the code from a [Secrets Manager template](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) or you can use a [generic Lambda function](https://github.com/aws-samples/aws-secrets-manager-rotation-lambdas/blob/master/SecretsManagerRotationTemplate/lambda_function.py) that rotates the secret. +To rotate a secret, you need a Lambda function that can rotate the secret. +You can copy the code from a [Secrets Manager template](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) or you can use a [generic Lambda function](https://github.com/aws-samples/aws-secrets-manager-rotation-lambdas/blob/master/SecretsManagerRotationTemplate/lambda_function.py) that rotates the secret. Zip the Lambda function and create a Lambda function using the [`CreateFunction`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) API. Execute the following command: @@ -165,7 +167,7 @@ Execute the following command: $ awslocal secretsmanager rotate-secret \ --secret-id MySecret \ --rotation-lambda-arn arn:aws:lambda:us-east-1:000000000000:function:my-rotation-function \ - --rotation-rules "{\"ScheduleExpression\": \"cron(0 16 1,15 * ? *)\", \"Duration\": \"2h\"}" + --rotation-rules "{\"ScheduleExpression\": \"cron(0 16 1,15 *?*)\", \"Duration\": \"2h\"}" {{}} ## Resource Browser @@ -179,7 +181,8 @@ You can access the Resource Browser by opening the LocalStack Web Application in The Resource Browser allows you to perform the following actions: - **Create Secret**: Create a new secret by clicking **Add a Secret** and providing the required details, such as Name, Tags, Kms Key Id, Secret String, and more. -- **View Secrets**: View the details of a secret by clicking on the secret name. You can also see the secret value by clicking on **Display Secret**. +- **View Secrets**: View the details of a secret by clicking on the secret name. + You can also see the secret value by clicking on **Display Secret**. - **Edit Secret**: Edit the details of a secret by clicking on the secret name and then clicking **Edit Secret** and adding the new secret value. - **Delete Secret**: Delete a secret by clicking on the secret name and then clicking **Actions** and then **Remove Selected**. diff --git a/content/en/user-guide/aws/serverlessrepo/index.md b/content/en/user-guide/aws/serverlessrepo/index.md index b744d6babf..7a8e0af817 100644 --- a/content/en/user-guide/aws/serverlessrepo/index.md +++ b/content/en/user-guide/aws/serverlessrepo/index.md @@ -30,7 +30,7 @@ To create a sample SAM application using the `samlocal` CLI, execute the followi $ samlocal init --runtime python3.9 {{< /command >}} -This command downloads a sample SAM application template and generates a `template.yml` file in the current directory. +This command downloads a sample SAM application template and generates a `template.yml` file in the current directory. The template includes a Lambda function and an API Gateway endpoint that supports a `GET` operation. ### Package the SAM application diff --git a/content/en/user-guide/aws/servicediscovery/index.md b/content/en/user-guide/aws/servicediscovery/index.md index c29001788f..31f8287ef4 100644 --- a/content/en/user-guide/aws/servicediscovery/index.md +++ b/content/en/user-guide/aws/servicediscovery/index.md @@ -196,6 +196,7 @@ This API allows you to query the DNS records associated with the service and per To explore the DNS records of your service and perform other operations, refer to the [AWS CLI documentation](https://docs.aws.amazon.com/cli/latest/reference/servicediscovery/index.html) for comprehensive instructions and examples. ### Using filters + Filters can be used to narrow down the results of a list operation. Filters are supported for the following operations: diff --git a/content/en/user-guide/aws/ses/index.md b/content/en/user-guide/aws/ses/index.md index ac4badbb02..85bba613cb 100644 --- a/content/en/user-guide/aws/ses/index.md +++ b/content/en/user-guide/aws/ses/index.md @@ -56,7 +56,9 @@ $ awslocal ses send-email \ {{< /command >}} {{< callout >}} -In the Community image, no emails are actually sent. Hence this operation is mocked. In the Pro image however, emails are sent via an SMTP server. +In the Community image, no emails are actually sent. +Hence this operation is mocked. +In the Pro image however, emails are sent via an SMTP server. {{< /callout >}} ## Sent Emails @@ -97,10 +99,10 @@ $ curl --silent localhost.localstack.cloud:4566/_aws/ses?email=hello@example.com - **Filesystem:** All messages are saved to the state directory (see [filesystem layout]({{< ref "filesystem" >}})). The files are saved as JSON in the `ses/` subdirectory and named by the message ID. - ## SMTP Integration -The Pro image supports sending emails via an SMTP server. Refer to the [Configuration]({{< ref "configuration#emails" >}}) guide for instructions on how to configure the connection parameters of your SMTP server (`SMTP_HOST`/`SMTP_USER`/`SMTP_PASS`). +The Pro image supports sending emails via an SMTP server. +Refer to the [Configuration]({{< ref "configuration#emails" >}}) guide for instructions on how to configure the connection parameters of your SMTP server (`SMTP_HOST`/`SMTP_USER`/`SMTP_PASS`). {{< callout "tip" >}} If you do not have access to a proper SMTP server, you can use tools like [MailDev](https://github.com/maildev/maildev) or [smtp4dev](https://github.com/rnwood/smtp4dev). @@ -118,7 +120,8 @@ LocalStack Web Application provides a resource browser for managing email identi The Resource Browser allows you to perform following actions: - **Create Email Identity**: Create an email identity by clicking **Create Identity** and specifying the email address. -- **View Sent Emails**: View all sent emails from an email identity by clicking the email address. You can the view the details of a sent email by selecting them from the list. +- **View Sent Emails**: View all sent emails from an email identity by clicking the email address. + You can the view the details of a sent email by selecting them from the list. - **Send Emails**: On selecting an email identity, click **Send Message** and specify destination fields (To, CC and BCC addresses) and the body (Plaintext, HTML) to send an email. ## Current Limitations diff --git a/content/en/user-guide/aws/sns/index.md b/content/en/user-guide/aws/sns/index.md index 3d4133d7d1..c8a4df4da3 100644 --- a/content/en/user-guide/aws/sns/index.md +++ b/content/en/user-guide/aws/sns/index.md @@ -8,25 +8,31 @@ persistence: supported ## Introduction -Simple Notification Service (SNS) is a serverless messaging service that can distribute a massive number of messages to multiple subscribers and can be used to send messages to mobile devices, email addresses, and HTTP(s) endpoints. SNS employs the Publish/Subscribe, an asynchronous messaging pattern that decouples services that produce events from services that process events. +Simple Notification Service (SNS) is a serverless messaging service that can distribute a massive number of messages to multiple subscribers and can be used to send messages to mobile devices, email addresses, and HTTP(s) endpoints. +SNS employs the Publish/Subscribe, an asynchronous messaging pattern that decouples services that produce events from services that process events. -LocalStack allows you to use the SNS APIs in your local environment to coordinate the delivery of messages to subscribing endpoints or clients. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_sns/), which provides information on the extent of SNS's integration with LocalStack. +LocalStack allows you to use the SNS APIs in your local environment to coordinate the delivery of messages to subscribing endpoints or clients. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_sns/), which provides information on the extent of SNS's integration with LocalStack. ## Getting started -This guide is intended for users who wish to get more acquainted with SNS over LocalStack. It assumes you have basic knowledge of the AWS CLI (and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script). +This guide is intended for users who wish to get more acquainted with SNS over LocalStack. +It assumes you have basic knowledge of the AWS CLI (and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script). -Start your LocalStack container using your preferred method. We will demonstrate how to create an SNS topic, publish messages, and subscribe to the topic. +Start your LocalStack container using your preferred method. +We will demonstrate how to create an SNS topic, publish messages, and subscribe to the topic. ### Create an SNS topic -To create an SNS topic, use the [`CreateTopic`](https://docs.aws.amazon.com/sns/latest/api/API_CreateTopic.html) API. Run the following command to create a topic named `localstack-topic`: +To create an SNS topic, use the [`CreateTopic`](https://docs.aws.amazon.com/sns/latest/api/API_CreateTopic.html) API. +Run the following command to create a topic named `localstack-topic`: {{< command >}} $ awslocal sns create-topic --name localstack-topic {{< /command >}} -You can set the SNS topic attribute using the SNS topic you created previously by using the [`SetTopicAttributes`](https://docs.aws.amazon.com/sns/latest/api/API_SetTopicAttributes.html) API. Run the following command to set the `DisplayName` attribute for the topic: +You can set the SNS topic attribute using the SNS topic you created previously by using the [`SetTopicAttributes`](https://docs.aws.amazon.com/sns/latest/api/API_SetTopicAttributes.html) API. +Run the following command to set the `DisplayName` attribute for the topic: {{< command >}} $ awslocal sns set-topic-attributes \ @@ -35,7 +41,8 @@ $ awslocal sns set-topic-attributes \ --attribute-value MyTopicDisplayName {{< /command >}} -You can list all the SNS topics using the [`ListTopics`](https://docs.aws.amazon.com/sns/latest/api/API_ListTopics.html) API. Run the following command to list all the SNS topics: +You can list all the SNS topics using the [`ListTopics`](https://docs.aws.amazon.com/sns/latest/api/API_ListTopics.html) API. +Run the following command to list all the SNS topics: {{< command >}} $ awslocal sns list-topics @@ -43,7 +50,8 @@ $ awslocal sns list-topics ### Get attributes and publish messages to SNS topic -You can get attributes for a single SNS topic using the [`GetTopicAttributes`](https://docs.aws.amazon.com/sns/latest/api/API_GetTopicAttributes.html) API. Run the following command to get the attributes for the SNS topic: +You can get attributes for a single SNS topic using the [`GetTopicAttributes`](https://docs.aws.amazon.com/sns/latest/api/API_GetTopicAttributes.html) API. +Run the following command to get the attributes for the SNS topic: {{< command >}} $ awslocal sns get-topic-attributes \ @@ -52,7 +60,8 @@ $ awslocal sns get-topic-attributes \ You can change the `topic-arn` to the ARN of the SNS topic you created previously. -To publish messages to the SNS topic, create a new file named `messages.txt` in your current directory and add some content. Run the following command to publish messages to the SNS topic using the [`Publish`](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) API: +To publish messages to the SNS topic, create a new file named `messages.txt` in your current directory and add some content. +Run the following command to publish messages to the SNS topic using the [`Publish`](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) API: {{< command >}} $ awslocal sns publish \ @@ -62,7 +71,8 @@ $ awslocal sns publish \ ### Subscribing to SNS topics and setting subscription attributes -You can subscribe to the SNS topic using the [`Subscribe`](https://docs.aws.amazon.com/sns/latest/api/API_Subscribe.html) API. Run the following command to subscribe to the SNS topic: +You can subscribe to the SNS topic using the [`Subscribe`](https://docs.aws.amazon.com/sns/latest/api/API_Subscribe.html) API. +Run the following command to subscribe to the SNS topic: {{< command >}} $ awslocal sns subscribe \ @@ -152,7 +162,7 @@ $ awslocal sns unsubscribe --subscription-arn "arn:aws:sns:us-east-1:00000000000 ## Developer endpoints LocalStack’s SNS implementation offers additional endpoints for developers located at `/_aws/sns`. -These endpoints provide the ability to access different SNS internals, like Platform Endpoint messages which are not sent to those platforms, or Subscription Tokens which you might not be able to retrieve otherwise. +These endpoints provide the ability to access different SNS internals, like Platform Endpoint messages which are not sent to those platforms, or Subscription Tokens which you might not be able to retrieve otherwise. ### Platform Endpoint messages @@ -161,7 +171,7 @@ To learn more about SNS mobile push notifications, refer to the [AWS documentati You can access these messages in JSON format through `GET /_aws/sns/platform-endpoint-messages`. To retrieve specific messages, you can use query parameters to filter by `accountId`, `region`, and `endpointArn`. -You can also call `DELETE /_aws/sns/platform-endpoint-messages` to clear the messages. +You can also call `DELETE /_aws/sns/platform-endpoint-messages` to clear the messages. #### Query parameters @@ -176,16 +186,18 @@ You can also call `DELETE /_aws/sns/platform-endpoint-messages` to clear the mes | Attribute | Description | | - | - | | `platform_endpoint_messages` | Contains endpoints ARN as field names. Each endpoint will have its messages in an Array. | -| `region` | The region of the endpoints and messages. | +| `region` | The region of the endpoints and messages. |
-In this example, we will create a platform endpoint in SNS and publish a message to it. Run the following commands to create a platform endpoint: +In this example, we will create a platform endpoint in SNS and publish a message to it. +Run the following commands to create a platform endpoint: {{< command >}} $ awslocal sns create-platform-application --name app-test --platform APNS --attributes {} {{< /command >}} An example response is shown below: + ```json { "PlatformApplicationArn": "arn:aws:sns:us-east-1:000000000000:app/APNS/app-test" @@ -196,6 +208,7 @@ Using the `PlatformApplicationArn` from the previous call: {{< command >}} $ awslocal sns create-platform-endpoint --platform-application-arn "arn:aws:sns:us-east-1:000000000000:app/APNS/app-test" --token my-fake-token {{< /command >}} + ```json { "EndpointArn": "arn:aws:sns:us-east-1:000000000000:endpoint/APNS/app-test/c25f353e-856b-4b02-a725-6bde35e6e944" @@ -207,6 +220,7 @@ Publish a message to the platform endpoint: {{< command >}} $ awslocal sns publish --target-arn "arn:aws:sns:us-east-1:000000000000:endpoint/APNS/app-test/c25f353e-856b-4b02-a725-6bde35e6e944" --message '{"APNS_PLATFORM": "{\"aps\": {\"content-available\": 1}}"}' --message-structure json {{< /command >}} + ```json { "MessageId": "ed501a7a-caab-45aa-a941-2fcc64b5c227" @@ -218,6 +232,7 @@ Retrieve the messages published to the platform endpoint using `cURL`: {{< command >}} $ curl "http://localhost:4566/_aws/sns/platform-endpoint-messages" | jq . {{< /command >}} + ```json { "platform_endpoint_messages": { @@ -235,8 +250,8 @@ $ curl "http://localhost:4566/_aws/sns/platform-endpoint-messages" | jq . } ``` - -With those same filters, you can reset the saved messages at `DELETE /_aws/sns/platform-endpoint-messages`. Run the following command to reset the saved messages: +With those same filters, you can reset the saved messages at `DELETE /_aws/sns/platform-endpoint-messages`. +Run the following command to reset the saved messages: {{< command >}} $ curl -X "DELETE" "http://localhost:4566/_aws/sns/platform-endpoint-messages" @@ -245,6 +260,7 @@ We can now check that the messages have been properly deleted: {{< command >}} $ curl "http://localhost:4566/_aws/sns/platform-endpoint-messages" | jq . {{< /command >}} + ```json { "platform_endpoint_messages": {}, @@ -274,7 +290,7 @@ You can also call `DELETE /_aws/sns/sms-messages` to clear the messages. | Attribute | Description | | - | - | | `sms_messages` | Contains phone numbers as field names. Each phone number will have its messages in an Array. | -| `region` | The region from where the messages were sent. | +| `region` | The region from where the messages were sent. |
@@ -286,6 +302,7 @@ Publish a message to a phone number: $ awslocal sns publish --phone-number "" --message "Hello World!" {{< /command >}} An example response is shown below: + ```json { "MessageId": "9ce56934-dcc4-45f5-ba40-13691329fc67" @@ -297,6 +314,7 @@ Retrieve the message published using `cURL` and `jq`: {{< command >}} $ curl "http://localhost:4566/_aws/sns/sms-messages" | jq . {{< /command >}} + ```json { "sms_messages": { @@ -328,6 +346,7 @@ We can now check that the messages have been properly deleted: {{< command >}} $ curl "http://localhost:4566/_aws/sns/sms-messages" | jq . {{< /command >}} + ```json { "sms_messages": {}, @@ -337,12 +356,14 @@ $ curl "http://localhost:4566/_aws/sns/sms-messages" | jq . ### Subscription Tokens -In case of email and HTTP(S) subscriptions, a special message is sent to the subscriber with a link to confirm the subscription so that it will be able to receive the messages afterwards. SNS does not send messages to endpoints pending confirmation. +In case of email and HTTP(S) subscriptions, a special message is sent to the subscriber with a link to confirm the subscription so that it will be able to receive the messages afterwards. +SNS does not send messages to endpoints pending confirmation. However, when working with external integrations, the link sent will most probably point to your local environment, which won't be accessible from the external integration to confirm. To still be able to test your external integrations, we expose the subscription tokens so that you can manually confirm the subscription. -The subscription tokens are never deleted from memory, because they can be re-used. To manually confirm the subscription, you will use [`ConfirmSubscription`](https://docs.aws.amazon.com/sns/latest/api/API_ConfirmSubscription.html). +The subscription tokens are never deleted from memory, because they can be re-used. +To manually confirm the subscription, you will use [`ConfirmSubscription`](https://docs.aws.amazon.com/sns/latest/api/API_ConfirmSubscription.html). To learn more about confirming subscriptions, refer to the [AWS documentation](https://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.confirm.html). @@ -359,7 +380,7 @@ You can access the subscription tokens in JSON format through `GET /_aws/sns/sub | Attribute | Description | | - | - | | `subscription_token` | The Subscription token to be used with `ConfirmSubscription`. | -| `subscription_arn` | The Subscription ARN provided. | +| `subscription_arn` | The Subscription ARN provided. |
@@ -370,25 +391,30 @@ Create an SNS topic, and create a subscription to a external HTTP SNS integratio {{< command >}} awslocal sns create-topic --name "test-external-integration" {{< /command >}} + ```json { "TopicArn": "arn:aws:sns:us-east-1:000000000000:test-external-integration" } ``` + We now create an HTTP SNS subscription to an external endpoint: {{< command >}} awslocal sns subscribe --topic-arn "arn:aws:sns:us-east-1:000000000000:test-external-integration" --protocol https --notification-endpoint "https://api.opsgenie.com/v1/json/amazonsns?apiKey=b13fd59a-9" --return-subscription-arn {{< /command >}} + ```json { "SubscriptionArn": "arn:aws:sns:us-east-1:000000000000:test-external-integration:c3ab47f3-b964-461d-84eb-903d8765b0c8" } ``` -Now, we can check the `PendingConfirmation` status of our subscription, showing our endpoint did not confirm the subscription. You will need to use the `SubscriptionArn` from the response of your subscribe call: +Now, we can check the `PendingConfirmation` status of our subscription, showing our endpoint did not confirm the subscription. +You will need to use the `SubscriptionArn` from the response of your subscribe call: {{< command >}} awslocal sns get-subscription-attributes --subscription-arn "arn:aws:sns:us-east-1:000000000000:test-external-integration:c3ab47f3-b964-461d-84eb-903d8765b0c8" {{< /command >}} + ```json { "Attributes": { @@ -408,6 +434,7 @@ To manually confirm the subscription, we will fetch its token with our developer {{< command >}} curl "http://localhost:4566/_aws/sns/subscription-tokens/arn:aws:sns:us-east-1:000000000000:test-external-integration:c3ab47f3-b964-461d-84eb-903d8765b0c8" | jq . {{< /command >}} + ```json { "subscription_token": "75732d656173742d312f3b875fb03b875fb03b875fb03b875fb03b875fb03b87", @@ -419,6 +446,7 @@ We can now use this token to manually confirm the subscription: {{< command >}} awslocal sns confirm-subscription --topic-arn "arn:aws:sns:us-east-1:000000000000:test-external-integration" --token 75732d656173742d312f3b875fb03b875fb03b875fb03b875fb03b875fb03b87 {{< /command >}} + ```json { "SubscriptionArn": "arn:aws:sns:us-east-1:000000000000:test-external-integration:c3ab47f3-b964-461d-84eb-903d8765b0c8" @@ -429,6 +457,7 @@ We can now finally verify the subscription has been confirmed: {{< command >}} awslocal sns get-subscription-attributes --subscription-arn "arn:aws:sns:us-east-1:000000000000:test-external-integration:c3ab47f3-b964-461d-84eb-903d8765b0c8" {{< /command >}} + ```json { "Attributes": { @@ -449,7 +478,8 @@ SNS will now publish messages to your HTTP endpoint, even if it did not confirm ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing SNS topics. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **SNS** under the **App Integration** section. +The LocalStack Web Application provides a Resource Browser for managing SNS topics. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **SNS** under the **App Integration** section. SNS Resource Browser @@ -457,7 +487,8 @@ The Resource Browser allows you to perform the following actions: - **Create Topic**: Create a new SNS topic by specifying a topic name, attributes, and tags. - **View Details and Subscription**: View details and subscription of an SNS topic by selecting the topic name and navigating to the **Details** and **Subscriptions** tabs. -- **Create Subscription**: Create a new subscription for an SNS topic by selecting the topic name, navigating to the **Subscriptions** tab, and clicking the **Create Subscription** button. Fill in the required details such as protocol, endpoint, and attributes, delivery policy, return subscription ARN, and click **Create**. +- **Create Subscription**: Create a new subscription for an SNS topic by selecting the topic name, navigating to the **Subscriptions** tab, and clicking the **Create Subscription** button. + Fill in the required details such as protocol, endpoint, and attributes, delivery policy, return subscription ARN, and click **Create**. - **Delete Topic**: Delete an SNS topic by selecting the topic name and clicking the **Action** button, followed by **Delete Selected**. ## Examples @@ -471,4 +502,5 @@ The following code snippets and sample applications provide practical examples o ## Current Limitations -- LocalStack does not support the `cidr` operator for filter policies. However, [other policies](https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html) are supported. +- LocalStack does not support the `cidr` operator for filter policies. + However, [other policies](https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html) are supported. diff --git a/content/en/user-guide/aws/sqs/index.md b/content/en/user-guide/aws/sqs/index.md index 14ff24ac69..1b50ead1a5 100644 --- a/content/en/user-guide/aws/sqs/index.md +++ b/content/en/user-guide/aws/sqs/index.md @@ -113,7 +113,8 @@ $ awslocal sqs purge-queue --queue-url http://sqs.us-east-1.localhost.localstack LocalStack's SQS implementation supports both regular [dead-letter queues (DLQ)](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) and [DLQ redrive](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-dead-letter-queue-redrive.html) via move message tasks. Here's an end-to-end example of how to use message move tasks to test DLQ redrive. -First, create three queues. One will serve as original input queue, one as DLQ, and the third as target for DLQ redrive. +First, create three queues. +One will serve as original input queue, one as DLQ, and the third as target for DLQ redrive. {{< command >}} $ awslocal sqs create-queue --queue-name input-queue $ awslocal sqs create-queue --queue-name dead-letter-queue @@ -129,7 +130,6 @@ $ awslocal sqs create-queue --queue-name recovery-queue } {{< /command >}} - Configure `dead-letter-queue` to be a DLQ for `input-queue`: {{< command >}} $ awslocal sqs set-queue-attributes \ @@ -151,7 +151,8 @@ $ awslocal sqs receive-message --visibility-timeout 0 --queue-url http://sqs.us- {{< /command >}} In the localstack logs you should see something like the following line, indicating the message was moved to the DLQ: -``` + +```bash 2024-01-24T13:51:16.824 DEBUG --- [ asgi_gw_1] l.services.sqs.models : message SqsMessage(id=5be95a04-93f0-4b9d-8bd5-6695f34758cf,group=None) has been received 2 times, marking it for DLQ ``` @@ -194,7 +195,6 @@ $ awslocal sqs receive-message --queue-url http://sqs.us-east-1.localhost.locals } {{< /command >}} - ## SQS Query API The [SQS Query API](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html), provides SQS Queue URLs as endpoints, enabling direct HTTP requests to the queues. @@ -213,14 +213,14 @@ You will see the following output: ```xml - - c6be4e95a26409675447367b3e79f663 - 466144ab-1d03-4ec5-8d70-97535b2957fb - - - JU40AF5GORK0WSR75MOY3VNQ1KZ3TAI7S5KAJYGK9C5P4W4XKMGF - + xmlns="http://queue.amazonaws.com/doc/2012-11-05/"> + + c6be4e95a26409675447367b3e79f663 + 466144ab-1d03-4ec5-8d70-97535b2957fb + + + JU40AF5GORK0WSR75MOY3VNQ1KZ3TAI7S5KAJYGK9C5P4W4XKMGF + ``` @@ -237,15 +237,15 @@ The response will be in JSON format: ```json { - "SendMessageResponse": { - "SendMessageResult": { - "MD5OfMessageBody": "c6be4e95a26409675447367b3e79f663", - "MessageId": "748297f2-4abd-4ec2-afc0-4d1a497fe604" - }, - "ResponseMetadata": { - "RequestId": "XEA5L5AX16RTPET25U3TIRIASN6KNIT820WIT3EY7RCH7164W68T" - } - } + "SendMessageResponse": { + "SendMessageResult": { + "MD5OfMessageBody": "c6be4e95a26409675447367b3e79f663", + "MessageId": "748297f2-4abd-4ec2-afc0-4d1a497fe604" + }, + "ResponseMetadata": { + "RequestId": "XEA5L5AX16RTPET25U3TIRIASN6KNIT820WIT3EY7RCH7164W68T" + } + } } ``` @@ -309,9 +309,12 @@ If you wish to disable all CloudWatch metrics for SQS, including the `Approximat ## Accessing queues from Lambdas or other containers -Using the SQS Query API, Queue URLs act as accessible endpoints via HTTP. Several SDKs, such as the Java SDK, leverage the SQS Query API for SQS interaction. +Using the SQS Query API, Queue URLs act as accessible endpoints via HTTP. +Several SDKs, such as the Java SDK, leverage the SQS Query API for SQS interaction. -By default, Queue URLs are configured to point to `http://localhost:4566`. This configuration can pose problems when Lambdas or other containers attempt to make direct calls to these queue URLs. These issues arise due to the fact that a Lambda function operates within a separate Docker container, and LocalStack is not accessible at the `localhost` address within that container. +By default, Queue URLs are configured to point to `http://localhost:4566`. +This configuration can pose problems when Lambdas or other containers attempt to make direct calls to these queue URLs. +These issues arise due to the fact that a Lambda function operates within a separate Docker container, and LocalStack is not accessible at the `localhost` address within that container. For instance, users of the Java SDK often encounter the following error when trying to access an SQS queue from their Lambda functions: @@ -325,15 +328,18 @@ To address this issue, you can consider the steps documented below. ### Lambda -When utilizing the SQS Query API in Lambdas, we suggest configuring `SQS_ENDPOINT_STRATEGY=domain`. This configuration results in queue URLs using `*.queue.localhost.localstack.cloud` as their domain names. Our Lambda implementation automatically resolves these URLs to the LocalStack container, ensuring smooth interaction between your code and the SQS service. +When utilizing the SQS Query API in Lambdas, we suggest configuring `SQS_ENDPOINT_STRATEGY=domain`. +This configuration results in queue URLs using `*.queue.localhost.localstack.cloud` as their domain names. +Our Lambda implementation automatically resolves these URLs to the LocalStack container, ensuring smooth interaction between your code and the SQS service. ### Other containers -When your code run within different containers like ECS tasks or your custom ones, it's advisable to establish your Docker network setup. You can follow these steps: +When your code run within different containers like ECS tasks or your custom ones, it's advisable to establish your Docker network setup. +You can follow these steps: -1. Override the `LOCALSTACK_HOST` variable as outlined in our [network troubleshooting guide]({{< ref "endpoint-url" >}}). -2. Ensure that your containers can resolve `LOCALSTACK_HOST` to the LocalStack container within the Docker network. -3. We recommend employing `SQS_ENDPOINT_STRATEGY=path`, which generates queue URLs in the format `http:///queue/...`. +1. Override the `LOCALSTACK_HOST` variable as outlined in our [network troubleshooting guide]({{< ref "endpoint-url" >}}). +2. Ensure that your containers can resolve `LOCALSTACK_HOST` to the LocalStack container within the Docker network. +3. We recommend employing `SQS_ENDPOINT_STRATEGY=path`, which generates queue URLs in the format `http:///queue/...`. ## Developer endpoints @@ -352,12 +358,12 @@ The endpoint ignores any additional parameters from the `ReceiveMessage` operati You can call the `/_aws/sqs/messages` endpoint in two different ways: -1. Using the query argument `QueueUrl`, like this: +1. Using the query argument `QueueUrl`, like this: {{< command >}} $ http://localhost.localstack.cloud:4566/_aws/sqs/messages?QueueUrl=http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/my-queue - {{< / command >}} - -2. Utilizing the path-based endpoint, as shown in this example: + {{< / command >}} + +2. Utilizing the path-based endpoint, as shown in this example: {{< command >}} $ http://localhost.localstack.cloud:4566/_aws/sqs/messages/us-east-1/000000000000/my-queue {{< / command >}} @@ -641,4 +647,4 @@ The following code snippets and sample applications provide practical examples o ## Current Limitations -* Updating a queue's `MessageRetentionPeriod` currently has no effect on existing messages +- Updating a queue's `MessageRetentionPeriod` currently has no effect on existing messages diff --git a/content/en/user-guide/aws/ssm/index.md b/content/en/user-guide/aws/ssm/index.md index 1df205d959..60b8d2e3f2 100644 --- a/content/en/user-guide/aws/ssm/index.md +++ b/content/en/user-guide/aws/ssm/index.md @@ -9,26 +9,34 @@ persistence: supported ## Introduction -Systems Manager (SSM) is a management service provided by Amazon Web Services that helps you effectively manage and control your infrastructure resources. SSM simplifies tasks related to system and application management, patching, configuration, and automation, allowing you to maintain the health and compliance of your environment. +Systems Manager (SSM) is a management service provided by Amazon Web Services that helps you effectively manage and control your infrastructure resources. +SSM simplifies tasks related to system and application management, patching, configuration, and automation, allowing you to maintain the health and compliance of your environment. -LocalStack allows you to use the SSM APIs in your local environment to run operational tasks on the Dockerized instances. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_ssm/), which provides information on the extent of SSM's integration with LocalStack. +LocalStack allows you to use the SSM APIs in your local environment to run operational tasks on the Dockerized instances. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_ssm/), which provides information on the extent of SSM's integration with LocalStack. ## Getting started This guide is designed for users new to Systems Manager (SSM) and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method with an additional `EC2_VM_MANAGER=docker` configuration variable. We will demonstrate how to use EC2 and SSM functionalities when using the Docker backend with LocalStack with the AWS CLI. +Start your LocalStack container using your preferred method with an additional `EC2_VM_MANAGER=docker` configuration variable. +We will demonstrate how to use EC2 and SSM functionalities when using the Docker backend with LocalStack with the AWS CLI. ### Create an EC2 instance -To get started, pull the `ubuntu:focal` image from Docker Hub and tag it as `localstack-ec2/ubuntu-focal-docker-ami:ami-00a001`. LocalStack uses a naming scheme to recognise and manage the containers and images associated with it. The container are named `localstack-ec2.`, while images are tagged `localstack-ec2/:`. +To get started, pull the `ubuntu:focal` image from Docker Hub and tag it as `localstack-ec2/ubuntu-focal-docker-ami:ami-00a001`. +LocalStack uses a naming scheme to recognise and manage the containers and images associated with it. +The container are named `localstack-ec2.`, while images are tagged `localstack-ec2/:`. {{< command >}} $ docker pull ubuntu:focal $ docker tag ubuntu:focal localstack-ec2/ubuntu-focal-docker-ami:ami-00a001 {{< / command >}} -LocalStack's Docker backend treats Docker images with the above naming scheme as AMIs. The AMI ID is the last part of the image tag, `ami-00a001` in this case. You can run an EC2 instance using the [`RunInstances`](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_RunInstances.html) API. Execute the following command to create an EC2 instance using the `ami-00a001` AMI. +LocalStack's Docker backend treats Docker images with the above naming scheme as AMIs. +The AMI ID is the last part of the image tag, `ami-00a001` in this case. +You can run an EC2 instance using the [`RunInstances`](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_RunInstances.html) API. +Execute the following command to create an EC2 instance using the `ami-00a001` AMI. {{< command >}} $ awslocal ec2 run-instances \ @@ -61,7 +69,8 @@ You can copy the `InstanceId` value and use it in the following commands. ### Send command using SSM -You can use the [`SendCommand`](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_SendCommand.html) API to send commands to the EC2 instance. The following command sends a `cat lsb-release` command in the `/etc` directory to the EC2 instance. +You can use the [`SendCommand`](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_SendCommand.html) API to send commands to the EC2 instance. +The following command sends a `cat lsb-release` command in the `/etc` directory to the EC2 instance. {{< command >}} $ awslocal ssm send-command --document-name "AWS-RunShellScript" \ @@ -90,7 +99,8 @@ You can copy the `CommandId` value and use it in the following commands. ### Retrieve the command output -You can use the [`GetCommandInvocation`](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetCommandInvocation.html) API to retrieve the command output. The following command retrieves the output of the command sent in the previous step. +You can use the [`GetCommandInvocation`](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetCommandInvocation.html) API to retrieve the command output. +The following command retrieves the output of the command sent in the previous step. {{< command >}} $ awslocal ssm get-command-invocation \ @@ -98,7 +108,8 @@ $ awslocal ssm get-command-invocation \ --instance-id i-abf6920789a06dd84 {{< / command >}} -Change the `CommandId` and `InstanceId` values to the ones you received in the previous step. The following output would be retrieved: +Change the `CommandId` and `InstanceId` values to the ones you received in the previous step. +The following output would be retrieved: ```bash { @@ -114,7 +125,8 @@ Change the `CommandId` and `InstanceId` values to the ones you received in the p ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing SSM System Parameters. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Simple Systems Manager (SSM)** under the **Management/Governance** section. +The LocalStack Web Application provides a Resource Browser for managing SSM System Parameters. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Simple Systems Manager (SSM)** under the **Management/Governance** section. SSM Resource Browser @@ -136,6 +148,6 @@ The following table highlights some differences between LocalStack SSM and AWS S The other limitations of LocalStack SSM are: -- Dockerized instances only support `AWS-RunShellScript` commands. -- Commands returning non-zero codes won't capture standard output or error streams, leaving them empty. -- Shell constructs such as job controls (`&&`, `||`), and redirection (`>`) are not supported. +- Dockerized instances only support `AWS-RunShellScript` commands. +- Commands returning non-zero codes won't capture standard output or error streams, leaving them empty. +- Shell constructs such as job controls (`&&`, `||`), and redirection (`>`) are not supported. diff --git a/content/en/user-guide/aws/stepfunctions/index.md b/content/en/user-guide/aws/stepfunctions/index.md index 9fb598ddcf..aef63d6625 100644 --- a/content/en/user-guide/aws/stepfunctions/index.md +++ b/content/en/user-guide/aws/stepfunctions/index.md @@ -7,19 +7,25 @@ description: > ## Introduction -Step Functions is a serverless workflow engine that enables the orchestrating of multiple AWS services. It provides a JSON-based structured language called Amazon States Language (ASL) which allows to specify how to manage a sequence of tasks and actions that compose the application's workflow. Thus making it easier to build and maintain complex and distributed applications. +Step Functions is a serverless workflow engine that enables the orchestrating of multiple AWS services. +It provides a JSON-based structured language called Amazon States Language (ASL) which allows to specify how to manage a sequence of tasks and actions that compose the application's workflow. +Thus making it easier to build and maintain complex and distributed applications. -LocalStack allows you to use the Step Functions APIs in your local environment to create, execute, update, and delete state machines locally. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_stepfunctions/), which provides information on the extent of Step Function's integration with LocalStack. +LocalStack allows you to use the Step Functions APIs in your local environment to create, execute, update, and delete state machines locally. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_stepfunctions/), which provides information on the extent of Step Function's integration with LocalStack. ## Getting started This guide is designed for users new to Step Functions and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a state machine, execute it, and check the status of the execution. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a state machine, execute it, and check the status of the execution. ### Create a state machine -You can create a state machine using the [`CreateStateMachine`](https://docs.aws.amazon.com/step-functions/latest/apireference/API_CreateStateMachine.html) API. The API requires the name of the state machine, the state machine definition, and the role ARN that the state machine will assume to call AWS services. Run the following command to create a state machine: +You can create a state machine using the [`CreateStateMachine`](https://docs.aws.amazon.com/step-functions/latest/apireference/API_CreateStateMachine.html) API. +The API requires the name of the state machine, the state machine definition, and the role ARN that the state machine will assume to call AWS services. +Run the following command to create a state machine: {{< command >}} $ awslocal stepfunctions create-state-machine \ @@ -57,7 +63,9 @@ The output of the above command is the ARN of the state machine: ### Execute the state machine -You can execute the state machine using the [`StartExecution`](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) API. The API requires the state machine's ARN and the state machine's input. Run the following command to execute the state machine: +You can execute the state machine using the [`StartExecution`](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) API. +The API requires the state machine's ARN and the state machine's input. +Run the following command to execute the state machine: {{< command >}} $ awslocal stepfunctions start-execution \ @@ -75,7 +83,8 @@ The output of the above command is the execution ARN: ### Check the execution status -To check the status of the execution, you can use the [`DescribeExecution`](https://docs.aws.amazon.com/step-functions/latest/apireference/API_DescribeExecution.html) API. Run the following command to describe the execution: +To check the status of the execution, you can use the [`DescribeExecution`](https://docs.aws.amazon.com/step-functions/latest/apireference/API_DescribeExecution.html) API. +Run the following command to describe the execution: {{< command >}} $ awslocal stepfunctions describe-execution \ @@ -107,7 +116,8 @@ The output of the above command is the execution status: ## Supported services and operations -Step Functions integrates with AWS services, allowing you to invoke API actions for each service within your workflow. LocalStack's Step Functions emulation supports the following AWS services: +Step Functions integrates with AWS services, allowing you to invoke API actions for each service within your workflow. +LocalStack's Step Functions emulation supports the following AWS services: | Supported service integrations | Service | Request Response | Run a Job (.sync) | Run a Job (.sync2) | Wait for Callback (.waitForTaskToken) | |--------------------------------|-------------------------|:---: |:---: |:---: |:---: | @@ -124,7 +134,8 @@ Step Functions integrates with AWS services, allowing you to invoke API actions ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing Step Functions state machines. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Step Functions** under the **App Integration** section. +The LocalStack Web Application provides a Resource Browser for managing Step Functions state machines. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Step Functions** under the **App Integration** section. Step Functions Resource Browser
diff --git a/content/en/user-guide/aws/support/index.md b/content/en/user-guide/aws/support/index.md index 1f71171e89..3250453279 100644 --- a/content/en/user-guide/aws/support/index.md +++ b/content/en/user-guide/aws/support/index.md @@ -8,23 +8,30 @@ persistence: supported ## Introduction -AWS Support is a service provided by Amazon Web Services (AWS) that offers technical assistance and resources to help you optimize your AWS environment, troubleshoot issues, and maintain operational efficiency. Support APIs provide programmatic access to AWS Support services, including the ability to create and manage support cases programmatically. You can further automate your support workflow using various AWS services, such as Lambda, CloudWatch, and EventBridge. +AWS Support is a service provided by Amazon Web Services (AWS) that offers technical assistance and resources to help you optimize your AWS environment, troubleshoot issues, and maintain operational efficiency. +Support APIs provide programmatic access to AWS Support services, including the ability to create and manage support cases programmatically. +You can further automate your support workflow using various AWS services, such as Lambda, CloudWatch, and EventBridge. -LocalStack allows you to use the Support APIs in your local environment to create and manage new cases, while testing your configurations locally. LocalStack provides a mock implementation via a mock Support Center provided by [Moto](https://docs.getmoto.org/en/latest/docs/services/support.html), and does not create real cases in the AWS. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_support/), which provides information on the extent of Support API's integration with LocalStack. +LocalStack allows you to use the Support APIs in your local environment to create and manage new cases, while testing your configurations locally. +LocalStack provides a mock implementation via a mock Support Center provided by [Moto](https://docs.getmoto.org/en/latest/docs/services/support.html), and does not create real cases in the AWS. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_support/), which provides information on the extent of Support API's integration with LocalStack. {{< callout >}} -For technical support with LocalStack, you can reach out through our [support channels](https://docs.localstack.cloud/getting-started/help-and-support/). It's important to note that LocalStack doesn't offer a programmatic interface to create support cases, and this documentation is only intended to demonstrate how you can use and mock the AWS Support APIs in your local environment. +For technical support with LocalStack, you can reach out through our [support channels](https://docs.localstack.cloud/getting-started/help-and-support/). +It's important to note that LocalStack doesn't offer a programmatic interface to create support cases, and this documentation is only intended to demonstrate how you can use and mock the AWS Support APIs in your local environment. {{< /callout >}} ## Getting started This guide is designed for users new to Support and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a case in the mock Support Center using the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a case in the mock Support Center using the AWS CLI. ### Create a support case -To create a support case, you can use the [`CreateCase`](https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/CreateCase) API. The following example creates a case with the subject "Test case" and the description "This is a test case" in the category "General guidance". +To create a support case, you can use the [`CreateCase`](https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/CreateCase) API. +The following example creates a case with the subject "Test case" and the description "This is a test case" in the category "General guidance". {{< command >}} $ awslocal support create-case \ @@ -44,7 +51,8 @@ The following output would be retrieved: ### List support cases -To list all support cases, you can use the [`DescribeCases`](https://docs.aws.amazon.com/awssupport/latest/APIReference/API_DescribeCases.html) API. The following example lists all cases in the category "General guidance". +To list all support cases, you can use the [`DescribeCases`](https://docs.aws.amazon.com/awssupport/latest/APIReference/API_DescribeCases.html) API. +The following example lists all cases in the category "General guidance". {{< command >}} $ awslocal support describe-cases @@ -78,14 +86,16 @@ The following output would be retrieved: ### Resolve a support case -To resolve a support case, you can use the [`ResolveCase`](https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/ResolveCase) API. The following example resolves the case created in the previous step. +To resolve a support case, you can use the [`ResolveCase`](https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/ResolveCase) API. +The following example resolves the case created in the previous step. {{< command >}} $ awslocal support resolve-case \ --case-id "case-12345678910-2020-kEa16f90bJE766J4" {{< / command >}} -Replace the case ID with the ID of the case you want to resolve. The following output would be retrieved: +Replace the case ID with the ID of the case you want to resolve. +The following output would be retrieved: ```bash { diff --git a/content/en/user-guide/aws/swf/index.md b/content/en/user-guide/aws/swf/index.md index ac0794e5a0..f15afb1f6b 100644 --- a/content/en/user-guide/aws/swf/index.md +++ b/content/en/user-guide/aws/swf/index.md @@ -7,19 +7,24 @@ description: > ## Introduction -Simple Workflow Service (SWF) is a fully managed service offered by Amazon Web Services (AWS) that enables you to build and manage applications with distributed components and complex workflows. SWF allows you to define workflows in a way that's separate from the actual application code, making it easier to modify and adapt workflows without changing the application logic. SWF also provides a programming framework to design, coordinate, and execute workflows that involve multiple tasks, steps, and decision points. +Simple Workflow Service (SWF) is a fully managed service offered by Amazon Web Services (AWS) that enables you to build and manage applications with distributed components and complex workflows. +SWF allows you to define workflows in a way that's separate from the actual application code, making it easier to modify and adapt workflows without changing the application logic. +SWF also provides a programming framework to design, coordinate, and execute workflows that involve multiple tasks, steps, and decision points. -LocalStack allows you to use the SWF APIs in your local environment to monitor and manage workflow design, task coordination, activity implementation, and error handling. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_swf/), which provides information on the extent of SWF's integration with LocalStack. +LocalStack allows you to use the SWF APIs in your local environment to monitor and manage workflow design, task coordination, activity implementation, and error handling. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_swf/), which provides information on the extent of SWF's integration with LocalStack. ## Getting started This guide is designed for users new to Simple Workflow Service and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to register an SWF domain and workflow using the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to register an SWF domain and workflow using the AWS CLI. ### Registering a domain -You can register an SWF domain using the [`RegisterDomain`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterDomain.html) API. Execute the following command to register a domain named `test-domain`: +You can register an SWF domain using the [`RegisterDomain`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterDomain.html) API. +Execute the following command to register a domain named `test-domain`: {{< command >}} $ awslocal swf register-domain \ @@ -27,7 +32,8 @@ $ awslocal swf register-domain \ --workflow-execution-retention-period-in-days 1 {{< /command >}} -You can use the [`DescribeDomain`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DescribeDomain.html) API to verify that the domain was registered successfully. Run the following command to describe the `test-domain` domain: +You can use the [`DescribeDomain`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DescribeDomain.html) API to verify that the domain was registered successfully. +Run the following command to describe the `test-domain` domain: {{< command >}} $ awslocal swf describe-domain \ @@ -51,17 +57,19 @@ The following output would be retrieved: ### List the domains -You can list all registered domains using the [`ListDomains`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_ListDomains.html) API. Run the following command to list all registered domains: +You can list all registered domains using the [`ListDomains`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_ListDomains.html) API. +Run the following command to list all registered domains: {{< command >}} $ awslocal swf list-domains --registration-status REGISTERED {{< /command >}} -To deprecate a domain, use the [`DeprecateDomain`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DeprecateDomain.html) API. Run the following command to deprecate the `test-domain` domain: +To deprecate a domain, use the [`DeprecateDomain`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DeprecateDomain.html) API. +Run the following command to deprecate the `test-domain` domain: {{< command >}} $ awslocal swf deprecate-domain \ - --name test-domain + --name test-domain {{< /command >}} You can now list the deprecated domains using the `--registration-status DEPRECATED` flag: @@ -72,7 +80,8 @@ $ awslocal swf list-domains --registration-status DEPRECATED ### Registering a workflow -You can register a workflow using the [`RegisterWorkflowType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterWorkflowType.html) API. Execute the following command to register a workflow named `test-workflow`: +You can register a workflow using the [`RegisterWorkflowType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterWorkflowType.html) API. +Execute the following command to register a workflow named `test-workflow`: {{< command >}} $ awslocal swf register-workflow-type \ @@ -85,7 +94,8 @@ $ awslocal swf register-workflow-type \ --workflow-version "1.0" {{< /command >}} -You can use the [`DescribeWorkflowType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DescribeWorkflowType.html) API to verify that the workflow was registered successfully. Run the following command to describe the `test-workflow` workflow: +You can use the [`DescribeWorkflowType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DescribeWorkflowType.html) API to verify that the workflow was registered successfully. +Run the following command to describe the `test-workflow` workflow: {{< command >}} $ awslocal swf describe-workflow-type \ @@ -118,7 +128,8 @@ The following output would be retrieved: ### Registering an activity -You can register an activity using the [`RegisterActivityType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterActivityType.html) API. Execute the following command to register an activity named `test-activity`: +You can register an activity using the [`RegisterActivityType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_RegisterActivityType.html) API. +Execute the following command to register an activity named `test-activity`: {{< command >}} $ awslocal swf register-activity-type \ @@ -132,7 +143,8 @@ $ awslocal swf register-activity-type \ --activity-version "1.0" {{< /command >}} -You can use the [`DescribeActivityType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DescribeActivityType.html) API to verify that the activity was registered successfully. Run the following command to describe the `test-activity` activity: +You can use the [`DescribeActivityType`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_DescribeActivityType.html) API to verify that the activity was registered successfully. +Run the following command to describe the `test-activity` activity: {{< command >}} $ awslocal swf describe-activity-type \ @@ -166,7 +178,8 @@ The following output would be retrieved: ### Starting a workflow execution -You can start a workflow execution using the [`StartWorkflowExecution`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_StartWorkflowExecution.html) API. Execute the following command to start a workflow execution for the `test-workflow` workflow: +You can start a workflow execution using the [`StartWorkflowExecution`](https://docs.aws.amazon.com/amazonswf/latest/apireference/API_StartWorkflowExecution.html) API. +Execute the following command to start a workflow execution for the `test-workflow` workflow: {{< command >}} $ awslocal swf start-workflow-execution \ diff --git a/content/en/user-guide/aws/textract/index.md b/content/en/user-guide/aws/textract/index.md index e04b5cb40e..76683d12a1 100644 --- a/content/en/user-guide/aws/textract/index.md +++ b/content/en/user-guide/aws/textract/index.md @@ -7,19 +7,23 @@ persistence: supported --- -Textract is a machine learning service that automatically extracts text, forms, and tables from scanned documents. It simplifies the process of extracting valuable information from a variety of document types, enabling applications to quickly analyze and understand document content. +Textract is a machine learning service that automatically extracts text, forms, and tables from scanned documents. +It simplifies the process of extracting valuable information from a variety of document types, enabling applications to quickly analyze and understand document content. -LocalStack allows you to mock Textract APIs in your local environment. The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_textract/), providing details on the extent of Textract's integration with LocalStack. +LocalStack allows you to mock Textract APIs in your local environment. +The supported APIs are available on our [API coverage page](https://docs.localstack.cloud/references/coverage/coverage_textract/), providing details on the extent of Textract's integration with LocalStack. ## Getting started This guide is tailored for users new to Textract and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to perform basic Textract operations, such as mocking text detection in a document. +Start your LocalStack container using your preferred method. +We will demonstrate how to perform basic Textract operations, such as mocking text detection in a document. ### Detect document text -You can use the [`DetectDocumentText`](https://docs.aws.amazon.com/textract/latest/dg/API_DetectDocumentText.html) API to identify and extract text from a document. Execute the following command: +You can use the [`DetectDocumentText`](https://docs.aws.amazon.com/textract/latest/dg/API_DetectDocumentText.html) API to identify and extract text from a document. +Execute the following command: {{< command >}} $ awslocal textract detect-document-text \ @@ -42,7 +46,8 @@ The following output would be retrieved: ### Start document text detection job -You can use the [`StartDocumentTextDetection`](https://docs.aws.amazon.com/textract/latest/dg/API_StartDocumentTextDetection.html) API to asynchronously detect text in a document. Execute the following command: +You can use the [`StartDocumentTextDetection`](https://docs.aws.amazon.com/textract/latest/dg/API_StartDocumentTextDetection.html) API to asynchronously detect text in a document. +Execute the following command: {{< command >}} $ awslocal textract start-document-text-detection \ @@ -61,14 +66,16 @@ Save the `JobId` value to use in the next command. ### Get document text detection job -You can use the [`GetDocumentTextDetection`](https://docs.aws.amazon.com/textract/latest/dg/API_GetDocumentTextDetection.html) API to retrieve the results of a document text detection job. Execute the following command: +You can use the [`GetDocumentTextDetection`](https://docs.aws.amazon.com/textract/latest/dg/API_GetDocumentTextDetection.html) API to retrieve the results of a document text detection job. +Execute the following command: {{< command >}} $ awslocal textract get-document-text-detection \ --job-id "501d7251-1249-41e0-a0b3-898064bfc506" {{< /command >}} -Replace `501d7251-1249-41e0-a0b3-898064bfc506` with the `JobId` value retrieved from the previous command. The following output would be retrieved: +Replace `501d7251-1249-41e0-a0b3-898064bfc506` with the `JobId` value retrieved from the previous command. +The following output would be retrieved: ```bash { diff --git a/content/en/user-guide/aws/timestream/index.md b/content/en/user-guide/aws/timestream/index.md index 1c18cbc32b..c6649c171d 100644 --- a/content/en/user-guide/aws/timestream/index.md +++ b/content/en/user-guide/aws/timestream/index.md @@ -54,7 +54,8 @@ $ awslocal timestream-query query --query-string "SELECT CREATE_TIME_SERIES(time ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing Timestream databases. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Timestream** under the **Database** section. +The LocalStack Web Application provides a Resource Browser for managing Timestream databases. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resources** section, and then clicking on **Timestream** under the **Database** section. Timestream Resource Browser
@@ -62,11 +63,11 @@ The LocalStack Web Application provides a Resource Browser for managing Timestre The Resource Browser allows you to perform the following actions: -- **Create Database**: Create a new Timestream database by clicking on the **Create Database** button and providing a name for the database among other optional details. -- **Create Table**: Create a new Timestream table by clicking on the **Create Table** button in the database view and providing a name for the table among other optional details. -- **Run Query**: Run a Timestream query by clicking on the **Run Query** button in the table view and providing a query string. -- **View Database/Table Details**: Click on a database or table to view its details, including the schema, retention policy, and other metadata. -- **Delete Database/Table**: Delete the Timestream database/table by selecting it and clicking on the **Actions** button followed by **Remove Selected** button. +* **Create Database**: Create a new Timestream database by clicking on the **Create Database** button and providing a name for the database among other optional details. +* **Create Table**: Create a new Timestream table by clicking on the **Create Table** button in the database view and providing a name for the table among other optional details. +* **Run Query**: Run a Timestream query by clicking on the **Run Query** button in the table view and providing a query string. +* **View Database/Table Details**: Click on a database or table to view its details, including the schema, retention policy, and other metadata. +* **Delete Database/Table**: Delete the Timestream database/table by selecting it and clicking on the **Actions** button followed by **Remove Selected** button. ## Current Limitations diff --git a/content/en/user-guide/aws/transcribe/index.md b/content/en/user-guide/aws/transcribe/index.md index c78095de4c..6576239fdd 100644 --- a/content/en/user-guide/aws/transcribe/index.md +++ b/content/en/user-guide/aws/transcribe/index.md @@ -8,25 +8,32 @@ persistence: supported ## Introduction -Transcribe is a service provided by Amazon Web Services (AWS) that offers automatic speech recognition (ASR) capabilities. It enables developers to convert spoken language into written text, making it valuable for a wide range of applications, from transcription services to voice analytics. +Transcribe is a service provided by Amazon Web Services (AWS) that offers automatic speech recognition (ASR) capabilities. +It enables developers to convert spoken language into written text, making it valuable for a wide range of applications, from transcription services to voice analytics. -LocalStack allows you to use the Transcribe APIs for offline speech-to-text jobs in your local environment. The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_transcribe/), which provides information on the extent of Transcribe integration with LocalStack. +LocalStack allows you to use the Transcribe APIs for offline speech-to-text jobs in your local environment. +The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_transcribe/), which provides information on the extent of Transcribe integration with LocalStack. {{< callout >}} -LocalStack's Transcribe relies on the offline speech-to-text service called [Vosk](https://alphacephei.com/vosk/). Therefore, LocalStack requires an internet connection during the initial creation of a transcription job for a specific language. This initial connection is required to download and cache the language model. +LocalStack's Transcribe relies on the offline speech-to-text service called [Vosk](https://alphacephei.com/vosk/). +Therefore, LocalStack requires an internet connection during the initial creation of a transcription job for a specific language. +This initial connection is required to download and cache the language model. -Once the language model is cached, subsequent transcriptions for the same language can be performed offline. These language models typically have a size of around 50 MiB, and they are saved to the cache directory (for more details, refer to the [Filesystem Layout]({{< ref "filesystem" >}}) section). +Once the language model is cached, subsequent transcriptions for the same language can be performed offline. +These language models typically have a size of around 50 MiB, and they are saved to the cache directory (for more details, refer to the [Filesystem Layout]({{< ref "filesystem" >}}) section). {{< /callout >}} ## Getting Started This guide is designed for users new to Transcribe and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how to create a transcription job and view the transcript in an S3 bucket using the AWS CLI. +Start your LocalStack container using your preferred method. +We will demonstrate how to create a transcription job and view the transcript in an S3 bucket using the AWS CLI. ### Create an S3 bucket -You can create an S3 bucket using the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) command. Run the following command to create a bucket named `foo` to upload a sample audio file named `example.wav`: +You can create an S3 bucket using the [`mb`](https://docs.aws.amazon.com/cli/latest/reference/s3/mb.html) command. +Run the following command to create a bucket named `foo` to upload a sample audio file named `example.wav`: {{< command >}} $ awslocal s3 mb s3://foo @@ -35,7 +42,8 @@ $ awslocal s3 cp ~/example.wav s3://foo/example.wav ### Create a transcription job -You can create a transcription job using the [`StartTranscriptionJob`](https://docs.aws.amazon.com/transcribe/latest/APIReference/API_StartTranscriptionJob.html) API. Run the following command to create a transcription job named `example` for the audio file `example.wav`: +You can create a transcription job using the [`StartTranscriptionJob`](https://docs.aws.amazon.com/transcribe/latest/APIReference/API_StartTranscriptionJob.html) API. +Run the following command to create a transcription job named `example` for the audio file `example.wav`: {{< command >}} $ awslocal transcribe start-transcription-job \ @@ -44,11 +52,12 @@ $ awslocal transcribe start-transcription-job \ --language-code en-IN {{< / command >}} -You can list the transcription jobs using the [`ListTranscriptionJobs`](https://docs.aws.amazon.com/transcribe/latest/APIReference/API_ListTranscriptionJobs.html) API. Run the following command to list the transcription jobs: +You can list the transcription jobs using the [`ListTranscriptionJobs`](https://docs.aws.amazon.com/transcribe/latest/APIReference/API_ListTranscriptionJobs.html) API. +Run the following command to list the transcription jobs: {{< command >}} $ awslocal transcribe list-transcription-jobs - + { "TranscriptionJobSummaries": [ { @@ -65,11 +74,12 @@ $ awslocal transcribe list-transcription-jobs ### View the transcript -After the job is complete, the transcript can be retrieved from the S3 bucket using the [`GetTranscriptionJob`](https://docs.aws.amazon.com/transcribe/latest/APIReference/API_GetTranscriptionJob.html) API. Run the following command to get the transcript: +After the job is complete, the transcript can be retrieved from the S3 bucket using the [`GetTranscriptionJob`](https://docs.aws.amazon.com/transcribe/latest/APIReference/API_GetTranscriptionJob.html) API. +Run the following command to get the transcript: {{< command >}} $ awslocal transcribe get-transcription-job --transcription-job example - + { "TranscriptionJob": { "TranscriptionJobName": "example", @@ -97,7 +107,8 @@ $ jq .results.transcripts[0].transcript 7844aaa5.json ## Resource Browser -The LocalStack Web Application provides a Resource Browser for managing Transcribe Transcription Jobs. You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Transcribe Service** under the **Machine Learning** section. +The LocalStack Web Application provides a Resource Browser for managing Transcribe Transcription Jobs. +You can access the Resource Browser by opening the LocalStack Web Application in your browser, navigating to the **Resource Browser** section, and then clicking on **Transcribe Service** under the **Machine Learning** section. Transcribe Resource Browser
diff --git a/content/en/user-guide/aws/transfer/index.md b/content/en/user-guide/aws/transfer/index.md index 9866e17778..09f75ea3fc 100644 --- a/content/en/user-guide/aws/transfer/index.md +++ b/content/en/user-guide/aws/transfer/index.md @@ -16,6 +16,7 @@ This functionality streamlines file management processes, making it simpler and Whether you're looking to facilitate file transfers or enhance your data access capabilities, the AWS Transfer API simplifies the process and extends the versatility of your cloud storage infrastructure. ## Getting started + This Python code demonstrates a basic workflow for transferring a file between a local machine and AWS S3 using the AWS Transfer Family service and FTP (File Transfer Protocol). ```python @@ -73,10 +74,12 @@ Please note that this code is a simplified example for demonstration purposes. In a production environment, you should use more secure practices, including setting proper IAM roles and handling sensitive credentials securely. Additionally, error handling and cleanup code may be needed to ensure the script behaves robustly in all scenarios. -## Current Limitations +## Current Limitations + The Transfer API does not provide a way to return the endpoint URL of created FTP servers. Hence, in order to determine the server endpoint, the local port is encoded as a suffix within the `ServerId` attribute, constituting the only numeric digits within the ID string. For example, assume the following is the response from the `CreateServer` API call, then the FTP server is accessible on port `4511` (i.e., `ftp://localhost:4511`): + ```json { "ServerId": "s-afcedbffaecca4511" diff --git a/content/en/user-guide/aws/waf/index.md b/content/en/user-guide/aws/waf/index.md index 354abc3ddf..f8d610d397 100644 --- a/content/en/user-guide/aws/waf/index.md +++ b/content/en/user-guide/aws/waf/index.md @@ -7,19 +7,23 @@ tags: ["Pro image"] ## Introduction -Web Application Firewall (WAF) is a service provided by Amazon Web Services (AWS) that helps protect your web applications from common web exploits that could affect application availability, compromise security, or consume excessive resources. WAFv2 is the latest version of WAF, and it allows you to specify a single set of rules to protect your web applications, APIs, and mobile applications from common attack patterns, such as SQL injection and cross-site scripting. +Web Application Firewall (WAF) is a service provided by Amazon Web Services (AWS) that helps protect your web applications from common web exploits that could affect application availability, compromise security, or consume excessive resources. +WAFv2 is the latest version of WAF, and it allows you to specify a single set of rules to protect your web applications, APIs, and mobile applications from common attack patterns, such as SQL injection and cross-site scripting. -LocalStack allows you to use the WAFv2 APIs for offline web application firewall jobs in your local environment. The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_wafv2/), which provides information on the extent of WAFv2 integration with LocalStack. +LocalStack allows you to use the WAFv2 APIs for offline web application firewall jobs in your local environment. +The supported APIs are available on our [API Coverage Page](https://docs.localstack.cloud/references/coverage/coverage_wafv2/), which provides information on the extent of WAFv2 integration with LocalStack. ## Getting started -This guide is for users who are familiar with the AWS CLI and [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. +This guide is for users who are familiar with the AWS CLI and [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will walk you through creating, listing, tagging, and viewing tags for Web Access Control Lists (WebACLs) using the Web Application Firewall (WAF) service in a LocalStack environment using the AWS CLI. +Start your LocalStack container using your preferred method. +We will walk you through creating, listing, tagging, and viewing tags for Web Access Control Lists (WebACLs) using the Web Application Firewall (WAF) service in a LocalStack environment using the AWS CLI. ### Create a WebACL -Start by creating a Web Access Control List (WebACL) using the [`CreateWebACL`](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateWebACL.html) API. Run the following command to create a WebACL named `TestWebAcl`: +Start by creating a Web Access Control List (WebACL) using the [`CreateWebACL`](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateWebACL.html) API. +Run the following command to create a WebACL named `TestWebAcl`: {{< command >}} $ awslocal wafv2 create-web-acl \ @@ -43,7 +47,8 @@ Note the `Id` and `ARN` from the output, as they will be needed for subsequent c ### List WebACLs -To view all the WebACLs you have created, use the [`ListWebACLs`](https://docs.aws.amazon.com/waf/latest/APIReference/API_ListWebACLs.html) API. Run the following command to list the WebACLs: +To view all the WebACLs you have created, use the [`ListWebACLs`](https://docs.aws.amazon.com/waf/latest/APIReference/API_ListWebACLs.html) API. +Run the following command to list the WebACLs: {{< command >}} $ awslocal wafv2 list-web-acls --scope REGIONAL @@ -64,7 +69,9 @@ $ awslocal wafv2 list-web-acls --scope REGIONAL ### Tag a WebACL -Tagging resources in AWS WAF helps you manage and identify them. Use the [`TagResource`](https://docs.aws.amazon.com/waf/latest/APIReference/API_TagResource.html) API to add tags to a WebACL. Run the following command to add a tag to the WebACL created in the previous step: +Tagging resources in AWS WAF helps you manage and identify them. +Use the [`TagResource`](https://docs.aws.amazon.com/waf/latest/APIReference/API_TagResource.html) API to add tags to a WebACL. +Run the following command to add a tag to the WebACL created in the previous step: {{< command >}} $ awslocal wafv2 tag-resource \ @@ -72,7 +79,9 @@ $ awslocal wafv2 tag-resource \ --tags Key=Name,Value=AWSWAF {{< /command >}} -After tagging your resources, you may want to view these tags. Use the [`ListTagsForResource`](https://docs.aws.amazon.com/waf/latest/APIReference/API_ListTagsForResource.html) API to list the tags for a WebACL. Run the following command to list the tags for the WebACL created in the previous step: +After tagging your resources, you may want to view these tags. +Use the [`ListTagsForResource`](https://docs.aws.amazon.com/waf/latest/APIReference/API_ListTagsForResource.html) API to list the tags for a WebACL. +Run the following command to list the tags for the WebACL created in the previous step: {{< command >}} $ awslocal wafv2 list-tags-for-resource \ diff --git a/content/en/user-guide/aws/xray/index.md b/content/en/user-guide/aws/xray/index.md index 5d7215b6b0..7b690f40a0 100644 --- a/content/en/user-guide/aws/xray/index.md +++ b/content/en/user-guide/aws/xray/index.md @@ -8,8 +8,10 @@ tags: ["Pro image"] ## Introduction [X-Ray](https://docs.aws.amazon.com/xray/latest/devguide/aws-xray.html) is a distributed tracing service that -helps to understand cross-service interactions and facilitates debugging of performance bottlenecks. Instrumented applications generate trace data by recording trace segments with information about the work tasks of an -application, such as timestamps, tasks names, or metadata. X-Ray supports different ways of [instrumenting your application](https://docs.aws.amazon.com/xray/latest/devguide/xray-instrumenting-your-app.html) including +helps to understand cross-service interactions and facilitates debugging of performance bottlenecks. +Instrumented applications generate trace data by recording trace segments with information about the work tasks of an +application, such as timestamps, tasks names, or metadata. +X-Ray supports different ways of [instrumenting your application](https://docs.aws.amazon.com/xray/latest/devguide/xray-instrumenting-your-app.html) including the [AWS X-Ray SDK](https://docs.aws.amazon.com/xray/latest/devguide/xray-instrumenting-your-app.html#xray-instrumenting-xray-sdk) and the [AWS Distro for OpenTelemetry (ADOT)](https://docs.aws.amazon.com/xray/latest/devguide/xray-instrumenting-your-app.html#xray-instrumenting-opentel). [X-Ray daemon](https://docs.aws.amazon.com/xray/latest/devguide/xray-daemon.html) is an application that gathers @@ -26,14 +28,18 @@ which provides information on the extent of X-Ray integration with LocalStack. This guide is designed for users new to X-Ray and assumes basic knowledge of the AWS CLI and our `awslocal` wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can create a minimal [trace segment](https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-fields) -and manually send it to the X-Ray API. Notice that this trace ingestion typically happens in the background, for example by the X-Ray SDK and X-Ray daemon. +Start your LocalStack container using your preferred method. +We will demonstrate how you can create a minimal [trace segment](https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-fields) +and manually send it to the X-Ray API. +Notice that this trace ingestion typically happens in the background, for example by the X-Ray SDK and X-Ray daemon. [PutTraceSegments](https://docs.aws.amazon.com/xray/latest/api/API_PutTraceSegments.html). ### Sending trace segments -You can generates a unique trace ID and constructs a JSON document with trace information. It then sends this trace segment to the AWS X-Ray API using the [PutTraceSegments](https://docs.aws.amazon.com/xray/latest/api/API_PutTraceSegments.html) API. Run the following commands in your terminal: +You can generates a unique trace ID and constructs a JSON document with trace information. +It then sends this trace segment to the AWS X-Ray API using the [PutTraceSegments](https://docs.aws.amazon.com/xray/latest/api/API_PutTraceSegments.html) API. +Run the following commands in your terminal: {{< command >}} $ START_TIME=$(date +%s) @@ -57,7 +63,8 @@ Sending trace segment to X-Ray API: {"trace_id": "1-6501ee11-056ec85fafff21f648e ### Retrieve trace summaries -You can now retrieve the trace summaries from the last 10 minutes using the [GetTraceSummaries](https://docs.aws.amazon.com/xray/latest/api/API_GetTraceSummaries.html) API. Run the following commands in your terminal: +You can now retrieve the trace summaries from the last 10 minutes using the [GetTraceSummaries](https://docs.aws.amazon.com/xray/latest/api/API_GetTraceSummaries.html) API. +Run the following commands in your terminal: {{< command >}} $ EPOCH=$(date +%s) @@ -86,7 +93,8 @@ $ awslocal xray get-trace-summaries --start-time $(($EPOCH-600)) --end-time $(($ ### Retrieve full trace -You can retrieve the full trace by providing the `TRACE_ID` using the [BatchGetTraces](https://docs.aws.amazon.com/xray/latest/api/API_BatchGetTraces.html) API. Run the following commands in your terminal (use the same terminal as for the first command): +You can retrieve the full trace by providing the `TRACE_ID` using the [BatchGetTraces](https://docs.aws.amazon.com/xray/latest/api/API_BatchGetTraces.html) API. +Run the following commands in your terminal (use the same terminal as for the first command): {{< command >}} $ awslocal xray batch-get-traces --trace-ids $TRACE_ID diff --git a/content/en/user-guide/chaos-engineering/_index.md b/content/en/user-guide/chaos-engineering/_index.md index a96714f90a..e02de7be1c 100644 --- a/content/en/user-guide/chaos-engineering/_index.md +++ b/content/en/user-guide/chaos-engineering/_index.md @@ -8,21 +8,24 @@ cascade: type: docs --- -The best way to understand concepts is through practice, so dive into our chaos engineering tutorials. Learn how to [build resilient software -by detecting potential outages with the Fault Injection Service]({{< ref "tutorials/fault-injection-service-experiments" >}}), create a +The best way to understand concepts is through practice, so dive into our chaos engineering tutorials. +Learn how to [build resilient software +by detecting potential outages with the Fault Injection Service]({{< ref "tutorials/fault-injection-service-experiments" >}}), create a [strong architecture through Route53 failover experiments]({{< ref "tutorials/route53-failover-with-fis" >}}), and [simulate outages in your application stack]({{< ref "tutorials/simulating-outages-in-your-application-stack" >}}) . ## Introduction -Chaos engineering via LocalStack is a method to enhance system resilience by deliberately introducing controlled disruptions. This technique takes different forms depending on the team: +Chaos engineering via LocalStack is a method to enhance system resilience by deliberately introducing controlled disruptions. +This technique takes different forms depending on the team: - Software developers focus on application behavior and error response - Architects concentrate on the strength of system design -- Operations teams investigate the dependability of infrastructure setup. +- Operations teams investigate the dependability of infrastructure setup. -Integrating chaos tests early in the development process helps identify and mitigate potential flaws, leading to systems that are more robust under stress and can withstand -turbulent conditions. Chaos engineering in LocalStack encompasses the following features: +Integrating chaos tests early in the development process helps identify and mitigate potential flaws, leading to systems that are more robust under stress and can withstand +turbulent conditions. +Chaos engineering in LocalStack encompasses the following features: - **Application behavior and error management** through Fault Injection Service (FIS) experiments. - **Robust architecture** tested via failover scenarios using FIS. diff --git a/content/en/user-guide/chaos-engineering/fault-injection-service/index.md b/content/en/user-guide/chaos-engineering/fault-injection-service/index.md index 34fb81be9d..c873effc6d 100644 --- a/content/en/user-guide/chaos-engineering/fault-injection-service/index.md +++ b/content/en/user-guide/chaos-engineering/fault-injection-service/index.md @@ -10,11 +10,14 @@ tags: ["Pro image"] The [AWS Fault Injection Service](https://aws.amazon.com/fis/) is a fully managed service designed to help you improve the resilience of your applications by simulating -real-world outages and operational issues. This service allows you to conduct controlled experiments on your AWS +real-world outages and operational issues. +This service allows you to conduct controlled experiments on your AWS infrastructure, injecting -faults and observing how your system responds under various conditions. By using the Fault Injection Service, you can +faults and observing how your system responds under various conditions. +By using the Fault Injection Service, you can identify weaknesses, -test recovery procedures, and ensure that your applications can withstand unexpected disruptions. This proactive +test recovery procedures, and ensure that your applications can withstand unexpected disruptions. +This proactive approach to reliability engineering enables you to enhance system robustness, minimize downtime, and maintain a high level of service availability for your users. @@ -55,7 +58,7 @@ $ awslocal dynamodb create-table \ --key-schema AttributeName=id,KeyType=HASH \ --billing-mode PAY_PER_REQUEST \ --region us-east-1 - + { "TableDescription": { "AttributeDefinitions": [ @@ -103,14 +106,14 @@ $ awslocal dynamodb put-item --table-name Students --region us-east-1 --item '{ "year": {"S": "Junior"}, "enrolment date": {"S": "2023-03-19"} }' - + $ awslocal dynamodb put-item --table-name Students --region us-east-1 --item '{ "id": {"S": "1748"}, "first name": {"S": "John"}, "last name": {"S": "Doe"}, "year": {"S": "Senior"}, "enrolment date": {"S": "2022-03-19"} - }' + }' {{< /command >}} And then we can look up one of the students by ID, also using the AWS local CLI: @@ -144,27 +147,37 @@ $ awslocal dynamodb get-item --table-name Students --key '{"id": {"S": "1216"}}' Some of the most important concepts associated with a FIS experiment, that we'll see in the following, are: -**1. Experiment Templates**: Experiment templates define the actions, targets, and any stop conditions for your experiment. They serve as -blueprints for conducting fault injection experiments, allowing you to specify what resources are targeted, what faults are injected, +**1. +Experiment Templates**: Experiment templates define the actions, targets, and any stop conditions for your experiment. +They serve as +blueprints for conducting fault injection experiments, allowing you to specify what resources are targeted, what faults are injected, and under what conditions the experiment should automatically stop. -**2. Actions**: Actions are the specific fault injection operations that the experiment performs on the target resources. These can be -injecting latency or throttling to API requests, completely blocking access to instances, etc. Actions define the type of fault, parameters for +**2. +Actions**: Actions are the specific fault injection operations that the experiment performs on the target resources. +These can be +injecting latency or throttling to API requests, completely blocking access to instances, etc. Actions define the type of fault, parameters for the fault injection, and the targets affected. -**3. Targets**: Targets are the AWS resources on which the experiment actions will be applied. To make things even more fine-grained, a specific operation +**3. +Targets**: Targets are the AWS resources on which the experiment actions will be applied. +To make things even more fine-grained, a specific operation of the service can be targeted. -**4. Stop Conditions**: Stop conditions are criteria that, when met, will automatically stop the experiment. +**4. +Stop Conditions**: Stop conditions are criteria that, when met, will automatically stop the experiment. -**5. IAM Roles and Permissions**: To run experiments, AWS FIS requires specific IAM roles and permissions. These are necessary for AWS FIS to +**5. +IAM Roles and Permissions**: To run experiments, AWS FIS requires specific IAM roles and permissions. +These are necessary for AWS FIS to perform actions on your behalf, like injecting faults into your resources. -**6. Experiment Execution**: When you start an experiment, AWS FIS executes the actions defined in the experiment template against the specified targets, -adhering to any defined stop conditions. The execution process is logged, and detailed information about the experiment's progress and outcome is +**6. +Experiment Execution**: When you start an experiment, AWS FIS executes the actions defined in the experiment template against the specified targets, +adhering to any defined stop conditions. +The execution process is logged, and detailed information about the experiment's progress and outcome is provided. - ### Getting started with FIS #### Service Unavailability @@ -179,7 +192,7 @@ This failure will happen 100% of the times the method is called. "actionId": "localstack:generic:api-error", "parameters": { "service": "dynamodb", - "operation": "GetItem", + "operation": "GetItem", "percentage": "100", "exception": "Service Unavailable", "errorCode": "503" @@ -314,13 +327,13 @@ $ awslocal dynamodb put-item --table-name Students --region us-east-1 --item '{ Finally, the experiment can be stopped using the experiment's ID with the following command: ```bash -$ awslocal fis stop-experiment --id 1a01327a-79d5-4202-8132-e56e55c9391b +awslocal fis stop-experiment --id 1a01327a-79d5-4202-8132-e56e55c9391b ``` - #### Region Unavailability -This sort of experiment involves disabling entire regions to simulate regional outages and failovers. Let's see what that would look like, +This sort of experiment involves disabling entire regions to simulate regional outages and failovers. +Let's see what that would look like, in a separate file, `regional-experiment.json`: ```bash @@ -348,6 +361,7 @@ in a separate file, `regional-experiment.json`: "roleArn": "arn:aws:iam:000000000000:role/ExperimentRole" } ``` + This template defines actions to simulate internal server errors (HTTP 503) in both `us-east-1` and `us-west-2` regions, without specific stop conditions. These outages will affect all the resources within the regions. @@ -484,7 +498,6 @@ $ awslocal dynamodb create-table \ {{< /command >}} - ```bash awslocal dynamodb put-item --table-name Students --region eu-central-1 --item '{ "id": {"S": "1111"}, @@ -500,12 +513,13 @@ awslocal dynamodb put-item --table-name Students --region eu-central-1 --item '{ Just as with the earlier experiment, this one should be stopped by running the following command: ```bash -$ awslocal fis stop-experiment --id e49283c1-c2e0-492b-b69f-9fbd710bc1e3 +awslocal fis stop-experiment --id e49283c1-c2e0-492b-b69f-9fbd710bc1e3 ``` #### Service Latency -Let's now add some latency to our DynamoDB API calls. First the definition of a new experiment template in another file, `latency-experiment.json`: +Let's now add some latency to our DynamoDB API calls. +First the definition of a new experiment template in another file, `latency-experiment.json`: ```bash { @@ -581,21 +595,23 @@ $ awslocal fis start-experiment --experiment-template-id 1f6e0ce8-57ed-4987-a7e5 {{< /command >}} -This FIS experiment introduces a delay of 5 seconds to all DynamoDB API calls within the `us-east-1` region. Tables located in the `eu-central-1` region, -or any other service, remain unaffected. To extend the latency effect to a regional level, the specific service constraint can be omitted, +This FIS experiment introduces a delay of 5 seconds to all DynamoDB API calls within the `us-east-1` region. +Tables located in the `eu-central-1` region, +or any other service, remain unaffected. +To extend the latency effect to a regional level, the specific service constraint can be omitted, thereby applying the latency to all resources within the selected region. As always, remember to stop your experiment, so it does not cause unexpected issues down the line: ```bash -$ awslocal fis stop-experiment --id dd598567-56e6-4d00-9ef5-15c7e90e7851 +awslocal fis stop-experiment --id dd598567-56e6-4d00-9ef5-15c7e90e7851 ``` Remember to replace the IDs with your own corresponding values. #### Experiment overview -If you want to keep track of all your experiments and make sure nothing is running in the background to hinder any other work, you can get an overview by using +If you want to keep track of all your experiments and make sure nothing is running in the background to hinder any other work, you can get an overview by using the command: {{< command >}} @@ -641,8 +657,3 @@ $ awslocal fis list-experiments {{< /command >}} For extra information or limitations of the LocalStack FIS service, please refer to the dedicated service [documentation]({{< ref "user-guide/aws/fis" >}}). - - - - - diff --git a/content/en/user-guide/chaos-engineering/outages-extension/index.md b/content/en/user-guide/chaos-engineering/outages-extension/index.md index 31cb0faa22..4a6a18c107 100644 --- a/content/en/user-guide/chaos-engineering/outages-extension/index.md +++ b/content/en/user-guide/chaos-engineering/outages-extension/index.md @@ -9,14 +9,16 @@ tags: ["Enterprise plan"] ## Introduction The [LocalStack Outages Extension](https://pypi.org/project/localstack-extension-outages/) allows you to mimic outages across any AWS region or service. -By integrating the Outages Extension using the [LocalStack Extension mechanism](https://docs.localstack.cloud/user-guide/extensions/), you can assess -your infrastructure's robustness. Intentionally triggering service outages and monitoring the system's response in situations -where the infrastructure is compromised offers a powerful way to test. This strategy helps gauge the effectiveness of the system's +By integrating the Outages Extension using the [LocalStack Extension mechanism](https://docs.localstack.cloud/user-guide/extensions/), you can assess +your infrastructure's robustness. +Intentionally triggering service outages and monitoring the system's response in situations +where the infrastructure is compromised offers a powerful way to test. +This strategy helps gauge the effectiveness of the system's deployment procedures and its resilience against infrastructure disruptions, which is a key element of chaos engineering. - {{< callout >}} -Outages Extension is currently available as part of the **LocalStack Enterprise** plan. If you'd like to try it out, please [contact us](https://www.localstack.cloud/demo) to request access. +Outages Extension is currently available as part of the **LocalStack Enterprise** plan. +If you'd like to try it out, please [contact us](https://www.localstack.cloud/demo) to request access. {{< /callout >}} ### Prerequisites @@ -27,10 +29,10 @@ The general prerequisites for this guide are: - [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) - [Python](https://www.python.org/downloads/) - ### Installing the extension -To install the LocalStack Outages Extension, first set up your LocalStack Auth Token in your environment. Once the token is configured, use the command below to install the extension: +To install the LocalStack Outages Extension, first set up your LocalStack Auth Token in your environment. +Once the token is configured, use the command below to install the extension: {{< command >}} $ localstack auth set-token @@ -43,8 +45,9 @@ Follow our [Managing Extensions documentation](https://docs.localstack.cloud/use ### Configuration -The extension is set up through an API endpoint, where the setup involves specifying a set of rules that are applied in order. -Each rule includes two key pieces of information: the service name and its region. You have the option to use the `*` wildcard +The extension is set up through an API endpoint, where the setup involves specifying a set of rules that are applied in order. +Each rule includes two key pieces of information: the service name and its region. +You have the option to use the `*` wildcard for flexibility in either attribute. To initiate an outage for specific service/region combinations, you can make a POST request as described below: @@ -69,7 +72,6 @@ curl --location --request POST 'http://outages.localhost.localstack.cloud:4566/o ]' {{< /command >}} - Once activated, any API requests to the impacted services and regions will result in an HTTP 503 Service Unavailable error. In the given example, the services and regions affected include: @@ -96,7 +98,8 @@ make_bucket: test-bucket
{{< /command >}} -Outages may be stopped by using empty list in the configuration. The following request will clear the current configuration: +Outages may be stopped by using empty list in the configuration. +The following request will clear the current configuration: {{< command >}} curl --location --request POST 'http://outages.localhost.localstack.cloud:4566/outages' \ diff --git a/content/en/user-guide/chaos-engineering/special-configs/index.md b/content/en/user-guide/chaos-engineering/special-configs/index.md index ddf13b4c58..1138896103 100644 --- a/content/en/user-guide/chaos-engineering/special-configs/index.md +++ b/content/en/user-guide/chaos-engineering/special-configs/index.md @@ -8,13 +8,18 @@ tags: ["Enterprise plan"] ## Introduction -LocalStack allows users to inject intentional errors, particularly in Kinesis and DynamoDB. You can introduce controlled chaos into your development environment enhance to enhance service resilience. By configuring environment variables, you can simulate disruptions. This simple setup helps improve the response mechanisms of these key AWS services, ensuring robust architecture under challenging conditions with minimal initial configuration. +LocalStack allows users to inject intentional errors, particularly in Kinesis and DynamoDB. +You can introduce controlled chaos into your development environment enhance to enhance service resilience. +By configuring environment variables, you can simulate disruptions. +This simple setup helps improve the response mechanisms of these key AWS services, ensuring robust architecture under challenging conditions with minimal initial configuration. -This guide demonstrates the `DYNAMODB_ERROR_PROBABILITY` and `KINESIS_ERROR_PROBABILITY` configuration flags. The guide assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. +This guide demonstrates the `DYNAMODB_ERROR_PROBABILITY` and `KINESIS_ERROR_PROBABILITY` configuration flags. +The guide assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. ## Kinesis Error Probability -The `KINESIS_ERROR_PROBABILITY` setting allows users to introduce `ProvisionedThroughputExceededException` errors randomly into Kinesis API responses. The value for this setting ranges from 0.0 (default) to 1.0. +The `KINESIS_ERROR_PROBABILITY` setting allows users to introduce `ProvisionedThroughputExceededException` errors randomly into Kinesis API responses. +The value for this setting ranges from 0.0 (default) to 1.0. To demonstrate, set up LocalStack with `KINESIS_ERROR_PROBABILITY` at 0.5, indicating a 50% chance of receiving a `ProvisionedThroughputExceededException` from Kinesis. @@ -22,7 +27,8 @@ To demonstrate, set up LocalStack with `KINESIS_ERROR_PROBABILITY` at 0.5, indic $ KINESIS_ERROR_PROBABILITY=0.5 localstack start {{< /command >}} -Next, create a Kinesis stream using the AWS CLI with the [`CreateStream`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_CreateStream.html) API. For example, to create a stream named "ProductsStream" with one shard, use: +Next, create a Kinesis stream using the AWS CLI with the [`CreateStream`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_CreateStream.html) API. +For example, to create a stream named "ProductsStream" with one shard, use: {{< command >}} $ awslocal kinesis create-stream \ @@ -39,7 +45,8 @@ $ awslocal kinesis put-record \ --data "eyJwcm9kdWN0SWQiOiIxMjMiLCJwcm9kdWN0TmFtZSI6IlN1cGVyV2lkZ2V0IiwicHJvZHVjdFByaWNlIjoiMTk5Ljk5In0=" {{< /command >}} -After performing similar operations repeatedly, you can check the logs to verify that the configuration is working as intended. Remember, records will only be added during successful calls. +After performing similar operations repeatedly, you can check the logs to verify that the configuration is working as intended. +Remember, records will only be added during successful calls. ```bash 2023-11-09T23:33:49.867 INFO --- [ asgi_gw_0] localstack.request.aws : AWS kinesis.CreateStream => 200 @@ -54,7 +61,8 @@ After performing similar operations repeatedly, you can check the logs to verify ## DynamoDB Error Probability -The `DYNAMODB_ERROR_PROBABILITY` setting, similar to the Kinesis configuration, allows for random `ProvisionedThroughputExceededException` responses from the DynamoDB service. It also accepts a decimal value between 0.0 (default) and 1.0. +The `DYNAMODB_ERROR_PROBABILITY` setting, similar to the Kinesis configuration, allows for random `ProvisionedThroughputExceededException` responses from the DynamoDB service. +It also accepts a decimal value between 0.0 (default) and 1.0. To start LocalStack with a high error probability for DynamoDB, set `DYNAMODB_ERROR_PROBABILITY` to 0.8: @@ -62,7 +70,8 @@ To start LocalStack with a high error probability for DynamoDB, set `DYNAMODB_ER $ DYNAMODB_ERROR_PROBABILITY=0.8 localstack start {{< /command >}} -Next, create a DynamoDB table using the AWS CLI with the [`CreateTable`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html) API. For example, to create a table named "Products" with a primary key of "ProductId", use: +Next, create a DynamoDB table using the AWS CLI with the [`CreateTable`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html) API. +For example, to create a table named "Products" with a primary key of "ProductId", use: ```bash $ awslocal dynamodb create-table \ @@ -72,7 +81,8 @@ $ awslocal dynamodb create-table \ --provisioned-throughput ReadCapacityUnits=1,WriteCapacityUnits=1 ``` -You can add items to the table using the [`PutItem`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html) API. For example, to add a product with an ID of "123", a name of "SuperWidget", and a price of "199.99", use: +You can add items to the table using the [`PutItem`](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html) API. +For example, to add a product with an ID of "123", a name of "SuperWidget", and a price of "199.99", use: ```bash awslocal dynamodb put-item \ diff --git a/content/en/user-guide/chaos-engineering/web-application-dashboard/index.md b/content/en/user-guide/chaos-engineering/web-application-dashboard/index.md index 9623562c3a..37a7589ffc 100644 --- a/content/en/user-guide/chaos-engineering/web-application-dashboard/index.md +++ b/content/en/user-guide/chaos-engineering/web-application-dashboard/index.md @@ -14,17 +14,19 @@ You can find this **Pro** feature in the web app by navigating to [**app.localst ## Web Application FIS Dashboard -LocalStack Web Application provides a dashboard for conducting FIS experiments in user stacks. This control panel offers various FIS experiment options, which includes: +LocalStack Web Application provides a dashboard for conducting FIS experiments in user stacks. +This control panel offers various FIS experiment options, which includes: -- **500 Internal Error**: This experiment randomly terminates incoming requests, returning an `Internal Server Error` with a response code of 500. -- **Service Unavailable**: This test causes all calls to specified services to receive a 503 `Service Unavailable` response. -- **AWS Region Unavailable**: This experiment simulates regional outages and failovers by disabling entire AWS regions. -- **Latency**: This test introduces specified latency to every API call, useful for simulating network latency or degraded network performance. +- **500 Internal Error**: This experiment randomly terminates incoming requests, returning an `Internal Server Error` with a response code of 500. +- **Service Unavailable**: This test causes all calls to specified services to receive a 503 `Service Unavailable` response. +- **AWS Region Unavailable**: This experiment simulates regional outages and failovers by disabling entire AWS regions. +- **Latency**: This test introduces specified latency to every API call, useful for simulating network latency or degraded network performance. {{< figure src="fis-dashboard.png" width="900" >}} - This LocalStack dashboard is not just an easy-to-use testing tool, it's a foundation for building reusable Fault Injection -Simulation (FIS) templates. By defining experiments using this interface, you create a set of -customizable templates that can be seamlessly integrated into any future automation workflows. It's a time-saving -feature, ensuring consistent and efficient testing across various stages of your application's development lifecycle. \ No newline at end of file +Simulation (FIS) templates. +By defining experiments using this interface, you create a set of +customizable templates that can be seamlessly integrated into any future automation workflows. +It's a time-saving +feature, ensuring consistent and efficient testing across various stages of your application's development lifecycle. diff --git a/content/en/user-guide/ci/_index.md b/content/en/user-guide/ci/_index.md index e1d9ab6b5c..fd382d6964 100644 --- a/content/en/user-guide/ci/_index.md +++ b/content/en/user-guide/ci/_index.md @@ -9,24 +9,36 @@ cascade: slug: ci --- -LocalStack enables organizations to automate their application testing and integration process through DevOps practices, such as continuous integration (CI). To meet your organizational needs, LocalStack lets you move away from complicated AWS testing and staging environments by enabling a key component of testing and delivering cloud-native applications. +LocalStack enables organizations to automate their application testing and integration process through DevOps practices, such as continuous integration (CI). +To meet your organizational needs, LocalStack lets you move away from complicated AWS testing and staging environments by enabling a key component of testing and delivering cloud-native applications. -You can easily integrate LocalStack with your existing CI platform. We provide native plugins for CircleCI and a generic driver for any other CI platform you might use. This enables you to incorporate LocalStack's local AWS cloud emulation in your CI pipelines, use advanced features like Cloud Pods and CI analytics, and run your test & integration suite before pushing to production. +You can easily integrate LocalStack with your existing CI platform. +We provide native plugins for CircleCI and a generic driver for any other CI platform you might use. +This enables you to incorporate LocalStack's local AWS cloud emulation in your CI pipelines, use advanced features like Cloud Pods and CI analytics, and run your test & integration suite before pushing to production. ## Hypothetical CI workflow -Let's assume that your team has an automated CI workflow into which you want to integrate end-to-end cloud testing with LocalStack. As an example, consider the following pipeline, which represents part of a simple CI workflow: +Let's assume that your team has an automated CI workflow into which you want to integrate end-to-end cloud testing with LocalStack. +As an example, consider the following pipeline, which represents part of a simple CI workflow: {{< figure src="localstack-in-ci.svg" alt="An example CI/CD workflow using LocalStack" width="90%">}} -The CI build is triggered by pushing code to a version control repository, like GitHub. The CI runner starts LocalStack and executes the test suite. You can also use the same Infrastructure-as-Code (IaC) configuration that you use to set up AWS in your production environment to set up LocalStack in the CI environment. You can also pre-seed state into the local AWS services (e.g., DynamoDB entries or S3 files) provided by LocalStack in your CI environment via [Cloud Pods]({{< ref "user-guide/state-management/cloud-pods" >}}). +The CI build is triggered by pushing code to a version control repository, like GitHub. +The CI runner starts LocalStack and executes the test suite. +You can also use the same Infrastructure-as-Code (IaC) configuration that you use to set up AWS in your production environment to set up LocalStack in the CI environment. +You can also pre-seed state into the local AWS services (e.g., DynamoDB entries or S3 files) provided by LocalStack in your CI environment via [Cloud Pods]({{< ref "user-guide/state-management/cloud-pods" >}}). -After a successful test run, you can execute the more expensive AWS CodeBuild pipeline for deploying your application. You can enrich the test reports created by your testing framework with traces and analytics generated inside LocalStack. +After a successful test run, you can execute the more expensive AWS CodeBuild pipeline for deploying your application. +You can enrich the test reports created by your testing framework with traces and analytics generated inside LocalStack. ## CI integrations -The steps required for the integration differ slightly depending on your preferred CI platform. Please refer to the relevant sections of the [CI keys settings page](https://app.localstack.cloud/workspace/ci-keys) in the [LocalStack Web app](https://app.localstack.cloud). +The steps required for the integration differ slightly depending on your preferred CI platform. +Please refer to the relevant sections of the [CI keys settings page](https://app.localstack.cloud/workspace/ci-keys) in the [LocalStack Web app](https://app.localstack.cloud). ## CI images -LocalStack Docker images can be used in your CI environment by adding a CI Key. The images are available on [Docker Hub](https://hub.docker.com/r/localstack/localstack/tags), and comprehensive documentation is available on our [Docker images](https://docs.localstack.cloud/references/docker-images/) documentation. Community users can use the `localstack/localstack` image, while licensed users can use the `localstack/localstack-pro` image. For Big Data jobs that require services such as EMR, Athena, and Glue, we provide a mono-container that uses the `localstack/localstack-pro:2.0.2-bigdata` image, which bakes in the required dependencies, such as Hadoop, Hive, Presto, into the LocalStack image. +LocalStack Docker images can be used in your CI environment by adding a CI Key. +The images are available on [Docker Hub](https://hub.docker.com/r/localstack/localstack/tags), and comprehensive documentation is available on our [Docker images](https://docs.localstack.cloud/references/docker-images/) documentation. +Community users can use the `localstack/localstack` image, while licensed users can use the `localstack/localstack-pro` image. +For Big Data jobs that require services such as EMR, Athena, and Glue, we provide a mono-container that uses the `localstack/localstack-pro:2.0.2-bigdata` image, which bakes in the required dependencies, such as Hadoop, Hive, Presto, into the LocalStack image. diff --git a/content/en/user-guide/ci/bitbucket/index.md b/content/en/user-guide/ci/bitbucket/index.md index 2af584b58e..fa27546a49 100644 --- a/content/en/user-guide/ci/bitbucket/index.md +++ b/content/en/user-guide/ci/bitbucket/index.md @@ -8,7 +8,8 @@ description: > ## Introduction -[BitBucket Pipeline](https://bitbucket.org/product/features/pipelines) is a CI/CD tool that allows you to build, test, and deploy your code directly from BitBucket. This guide will show you how to use LocalStack in BitBucket Pipelines. +[BitBucket Pipeline](https://bitbucket.org/product/features/pipelines) is a CI/CD tool that allows you to build, test, and deploy your code directly from BitBucket. +This guide will show you how to use LocalStack in BitBucket Pipelines. ## Setting up the BitBucket Pipeline @@ -54,7 +55,8 @@ pipelines: ## Configuring a CI key -You can enable LocalStack Pro by using the `localstack/localstack-pro` image and adding your CI key to the project's environment variables. The LocalStack container will automatically pick it up and activate the Pro features. +You can enable LocalStack Pro by using the `localstack/localstack-pro` image and adding your CI key to the project's environment variables. +The LocalStack container will automatically pick it up and activate the Pro features. To add a CI key to your BitBucket Pipeline: @@ -80,8 +82,10 @@ pipelines: - docker run -d --rm -p 4566:4566 -p 4510-4559:4510-4559 -e LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:?} -e DEBUG=1 -e LS_LOG=trace -e DOCKER_SOCK=tcp://${BITBUCKET_DOCKER_HOST_INTERNAL}:2375 -e DOCKER_HOST=tcp://${BITBUCKET_DOCKER_HOST_INTERNAL}:2375 --name localstack-main localstack/localstack-pro ... ``` + ## Current Limitations ### Mounting Volumes -BitBucket Pipelines does not support mounting volumes, so you cannot mount a volume to the LocalStack container. This limitation prevents you from mounting the Docker Socket to the LocalStack container, which is required to create compute resources, such as Lambda functions or ECS tasks. +BitBucket Pipelines does not support mounting volumes, so you cannot mount a volume to the LocalStack container. +This limitation prevents you from mounting the Docker Socket to the LocalStack container, which is required to create compute resources, such as Lambda functions or ECS tasks. diff --git a/content/en/user-guide/ci/circle-ci/index.md b/content/en/user-guide/ci/circle-ci/index.md index d77945be4c..871e22ee43 100644 --- a/content/en/user-guide/ci/circle-ci/index.md +++ b/content/en/user-guide/ci/circle-ci/index.md @@ -12,12 +12,12 @@ description: > [CircleCI](https://circleci.com) is a continuous integration and continuous delivery (CI/CD) platform which uses a configuration file (usually named `.circleci/config.yml`) to define the build, test, and deployment workflows. LocalStack supports CircleCI out of the box and can be easily integrated into your pipeline to run your tests against a local cloud emulator. - ## Snippets ### Start up LocalStack #### Default + ```yaml version: '2.1' orbs: @@ -36,6 +36,7 @@ workflows: ``` #### Async + ```yaml version: '2.1' orbs: @@ -54,11 +55,13 @@ workflows: ``` ### Configuration + To configure LocalStack use the `environment` key on the job level or a shell command, where the latter takes higher precedence. Read more about the [configuration options](/references/configuration/) of LocalStack. #### Job level + ```yaml ... jobs: @@ -72,6 +75,7 @@ jobs: ``` #### Shell command + ```yaml ... jobs: @@ -87,7 +91,7 @@ jobs: ### Configuring a CI key To enable LocalStack Pro+, you need to add your LocalStack CI key to the project's environment variables. -The LocalStack container will automatically pick it up and activate the licensed features. +The LocalStack container will automatically pick it up and activate the licensed features. Go to the [CI Key Page](https://app.localstack.cloud/workspace/ci-keys) page and copy your CI key. To add the CI key to your CircleCI project, follow these steps: @@ -103,6 +107,7 @@ To add the CI key to your CircleCI project, follow these steps: After the above steps, just start up LocalStack using our official orb as usual. ### Dump LocalStack logs + ```yaml ... jobs: @@ -127,11 +132,13 @@ To be able to use any of the below samples, you must [set a valid CI key](#confi _Note: For best result we recommend to use a combination of the below techniques and you should familiarise yourself with CircleCI's data persistance approach, see their [official documentation](https://circleci.com/docs/persist-data/)._ #### Cloud Pods + Cloud Pods providing an easy solution to persist LocalStack's state, even between workflows or projects. Find more information about [Cloud Pods](/user-guide/state-management/cloud-pods/). ##### Multiple projects + Update or create the Cloud Pod in it's own project (ie in a separate Infrastructure as Code repo), this would create a base Cloud Pod, which you can use in the future without any configuration or deployment. _Note: If there is a previously created Cloud Pod which doesn't need updating this step can be skipped._ @@ -187,6 +194,7 @@ workflows: ``` ##### Same project + To use a dynamically updated Cloud Pod in multiple workflows but in the same project, you must eliminate the race conditions between the update workflow and the others. Before you are able to use any stored artifacts in your pipeline, you must provide either a valid [project API token](https://circleci.com/docs/managing-api-tokens/#creating-a-project-api-token) or a [personal API token](https://circleci.com/docs/managing-api-tokens/#creating-a-personal-api-token) to CircleCI. @@ -266,9 +274,11 @@ workflows: ``` #### Ephemeral Instance (Preview) + Find out more about [Ephemeral Instances](/user-guide/cloud-sandbox/). -##### Same job +##### Same job + ```yaml orbs: localstack: localstack/platform@2.2 @@ -295,6 +305,7 @@ workflows: ``` ##### Multiple jobs + ```yaml ... jobs: @@ -340,6 +351,7 @@ workflows: ``` #### Workspace + This strategy persist LocalStack's state between jobs for the current workflow. ```yaml @@ -379,9 +391,11 @@ jobs: - localstack-save-state - localstack-load-state ``` + More information about Localstack's [state import/export](/user-guide/state-management/export-import-state). #### Cache + To preserve state between workflow runs, you can take leverage of CircleCI's caching too. This strategy will persist LocalStack's state for every workflow re-runs, but not for different workflows. @@ -431,4 +445,5 @@ workflows: - localstack-do-work ... ``` + More information about [state management](/user-guide/state-management/export-import-state). diff --git a/content/en/user-guide/ci/codebuild/index.md b/content/en/user-guide/ci/codebuild/index.md index 595bd77318..1c4b228ea8 100644 --- a/content/en/user-guide/ci/codebuild/index.md +++ b/content/en/user-guide/ci/codebuild/index.md @@ -12,7 +12,6 @@ description: Use LocalStack in CodeBuild CodeBuild allows you to define your build project, set the source code location, and handles the building and testing, while supporting various programming languages, build tools, and runtime environments. LocalStack supports CodeBuild out of the box and can be easily integrated into your pipeline to run your tests against a cloud emulator. - ## Snippets CodeBuild has the capability to use LocalStack's GitHub Action. @@ -28,7 +27,7 @@ phases: pre_build: commands: - pip3 install localstack awscli - - docker pull public.ecr.aws/localstack/localstack:latest + - docker pull public.ecr.aws/localstack/localstack:latest - localstack start -d - localstack wait -t 30 ``` @@ -50,12 +49,12 @@ phases: install-awslocal: 'true' ``` - ### Configuration Get know more about the LocalStack [config options](/references/configuration/). #### Native Runner + ```yml version: 0.2 @@ -89,7 +88,6 @@ phases: ... ``` - ### Configuring a CI key To enable LocalStack Pro features, you need to add your LocalStack CI API key to the project's environment variables. @@ -114,7 +112,7 @@ phases: pre_build: commands: - pip3 install localstack awscli - - docker pull public.ecr.aws/localstack/localstack-pro:latest + - docker pull public.ecr.aws/localstack/localstack-pro:latest ... ``` @@ -135,8 +133,8 @@ phases: ... ``` - ### Dump LocalStack logs + ```yaml ... artifacts: @@ -163,7 +161,6 @@ artifact: - localstack.log ``` - ### Store LocalStack state #### Cloud Pods @@ -171,6 +168,7 @@ artifact: Find more information about cloud pods [here](/user-guide/state-management/cloud-pods/). ##### Native Runner + ```yml ... phases: @@ -211,6 +209,7 @@ phases: ``` #### Ephemeral Instances (Preview) + ```yml ... phases: @@ -253,7 +252,9 @@ artifact: files: - ls-state-pod.zip ``` + Alternatively save as a secondary artifact: + ```yml ... artifact: @@ -264,6 +265,7 @@ artifact: - ls-state-pod.zip ... ``` + To use previously stored artifacts as inputs, set them as a source in the project. #### Cache @@ -271,6 +273,7 @@ To use previously stored artifacts as inputs, set them as a source in the projec Additional information about [state export and import](/user-guide/state-management/export-import-state/). ##### Native Runner + ```yml ... phases: @@ -287,6 +290,7 @@ cache: ``` ##### GitHub Actions Runner + ```yml ... phases: @@ -301,14 +305,16 @@ cache: - 'ls-state-pod.zip' ``` - ## Current Limitations -- We recommend using the `public.ecr.aws/localstack/localstack:latest` image to start LocalStack, instead of the `localstack/localstack:latest` image. LocalStack mirrors the Docker Hub image to the public ECR repository. +- We recommend using the `public.ecr.aws/localstack/localstack:latest` image to start LocalStack, instead of the `localstack/localstack:latest` image. + LocalStack mirrors the Docker Hub image to the public ECR repository. You can use the Docker Hub image as well, though you may run into the following error: + ```bash toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit ``` + To resolve this use your Docker Hub account credentials to pull the image. - LocalStack depends on the Docker socket to emulate your infrastructure. To enable it, update your project by ticking **Environment > Additional Configuration > Privileged > Enable this flag if you want to build Docker Images or want your builds to get elevated privileges**. diff --git a/content/en/user-guide/ci/github-actions/index.md b/content/en/user-guide/ci/github-actions/index.md index b45cd1f031..cfcd82a94d 100644 --- a/content/en/user-guide/ci/github-actions/index.md +++ b/content/en/user-guide/ci/github-actions/index.md @@ -19,9 +19,11 @@ This page contains easily customisable snippets to show you how to manage LocalS image-tag: 'latest' install-awslocal: 'true' ``` + ### Configuration -To set LocalStack configuration options, you can use the `configuration` input parameter. For example, to set the `DEBUG` configuration option, you can use the following configuration: +To set LocalStack configuration options, you can use the `configuration` input parameter. +For example, to set the `DEBUG` configuration option, you can use the following configuration: ```yml - name: Start LocalStack @@ -36,12 +38,14 @@ You can add extra configuration options by separating them with a comma. ### Configure a CI key -To enable LocalStack Pro+, you need to add your LocalStack CI API key to the project's environment variables. The LocalStack container will automatically pick it up and activate the licensed features. +To enable LocalStack Pro+, you need to add your LocalStack CI API key to the project's environment variables. +The LocalStack container will automatically pick it up and activate the licensed features. -Go to the [CI Key Page](https://app.localstack.cloud/workspace/ci-keys) page and copy your CI key. To add the CI key to your GitHub project, follow these steps: +Go to the [CI Key Page](https://app.localstack.cloud/workspace/ci-keys) page and copy your CI key. +To add the CI key to your GitHub project, follow these steps: - Navigate to your repository **Settings > Secrets** and press **New repository secret**. -- Enter `LOCALSTACK_API_KEY` as the name of the secret and paste your CI key as the value. +- Enter `LOCALSTACK_API_KEY` as the name of the secret and paste your CI key as the value. Click **Add secret** to save your secret. Adding the LocalStack CI key as secret in GitHub @@ -62,6 +66,7 @@ Additionally, you need to modify your GitHub Action workflow to use the `localst ``` ### Dump Localstack logs + ```yaml - name: Show localstack logs run: | @@ -73,6 +78,7 @@ Additionally, you need to modify your GitHub Action workflow to use the `localst You can preserve your AWS infrastructure with Localstack in various ways. #### Cloud Pods + ```yaml ... # Localstack is up and running already @@ -103,6 +109,7 @@ Find more information about cloud pods [here](/user-guide/state-management/cloud Our Github Action contains the prebuilt functionality to spin up an ephemeral instance. First you need to deploy the preview: + ```yaml name: Create PR Preview @@ -137,6 +144,7 @@ jobs: Find out more about ephemeral instances [here](/user-guide/cloud-sandbox/). #### Artifact + ```yaml ... - name: Start LocalStack and Load State diff --git a/content/en/user-guide/ci/gitlab-ci/index.md b/content/en/user-guide/ci/gitlab-ci/index.md index 750d8f95bd..45eb949ae6 100644 --- a/content/en/user-guide/ci/gitlab-ci/index.md +++ b/content/en/user-guide/ci/gitlab-ci/index.md @@ -10,6 +10,7 @@ This page contains easily customisable snippets to show you how to manage LocalS ## Snippets ### Start up Localstack + {{< callout "tip" >}} While working with a Docker-in-Docker (`dind`) setup, the Docker runner requires `privileged` mode. You must always use `privileged = true` in your GitLab CI's `config.toml` file while setting up LocalStack in GitLab CI runners. @@ -24,6 +25,7 @@ HOSTNAME_EXTERNAL: localhost.localstack.cloud.
#### Service + ```yaml ... variables: @@ -41,6 +43,7 @@ services: ``` #### Container + ```yaml image: docker:latest @@ -73,6 +76,7 @@ job: ``` ### Configure a CI key + You can easily enable LocalStack Pro by using the `localstack/localstack-pro` image and adding your CI key to the repository's environment variables. Go to your project's **Settings > CI/CD** and expand the **Variables** section. Select the **Add Variable** button and fill in the necessary details. @@ -91,10 +95,12 @@ services: alias: localstack ... ``` + You can check the logs of the LocalStack container to see if the activation was successful. If the CI key activation fails, LocalStack container will exit with an error code. ### Dump Localstack logs + ```yaml ... job: @@ -104,6 +110,7 @@ job: - localstack logs | tee localstack.log ... ``` + In case of the service setup `LOCALSTACK_HOST` will be `localstack:4566`. ### Store Localstack state @@ -111,6 +118,7 @@ In case of the service setup `LOCALSTACK_HOST` will be `localstack:4566`. You can preserve your AWS infrastructure with Localstack in various ways. #### Artifact + ```yaml ... job: @@ -125,9 +133,11 @@ job: - $CI_PROJECT_DIR/ls-state-pod.zip ... ``` + More info about Localstack's state export and import [here](/user-guide/state-management/export-import-state/). #### Cache + ```yaml ... job: @@ -146,9 +156,11 @@ job: - $CI_PROJECT_DIR/ls-state-pod.zip ... ``` + Additional information about state export and import [here](/user-guide/state-management/export-import-state/). #### Cloud Pod + ```yaml ... job: @@ -159,6 +171,7 @@ job: - localstack pod save ... ``` + Find more information about cloud pods [here](/user-guide/state-management/cloud-pods/). #### Ephemeral Instance (Preview) diff --git a/content/en/user-guide/ci/travis-ci/index.md b/content/en/user-guide/ci/travis-ci/index.md index d04d267473..8ee4bfe9b6 100644 --- a/content/en/user-guide/ci/travis-ci/index.md +++ b/content/en/user-guide/ci/travis-ci/index.md @@ -12,7 +12,8 @@ This guide shows how to start and use LocalStack in your Travis CI jobs. When you want to integrate LocalStack into your job configuration, you just have to execute the following steps: - Install the LocalStack CLI (and maybe also `awslocal`). - Make sure your LocalStack docker image is up-to-date by pulling the latest version. -- Use the LocalStack CLI to start LocalStack. Make sure to use the `-d` flag to start the LocalStack docker container in detached mode. +- Use the LocalStack CLI to start LocalStack. + Make sure to use the `-d` flag to start the LocalStack docker container in detached mode. - Wait for the container to report that it is up and running. The following example Travis CI job config (`.travis.yaml`) executes these steps, creates a new S3 bucket, and prints a nice message in the end: @@ -47,7 +48,8 @@ script: ## Configuring a CI key -You can easily enable LocalStack Pro by using the `localstack/localstack-pro` image and adding your CI key to the project's environment variables. The LocalStack CLI will automatically pick it up and activate the Pro features. +You can easily enable LocalStack Pro by using the `localstack/localstack-pro` image and adding your CI key to the project's environment variables. +The LocalStack CLI will automatically pick it up and activate the Pro features. Just go to the project settings in Travis CI (`More options` → `Settings`), scroll down to the `Environment Variables` section, and add your CI key: diff --git a/content/en/user-guide/cloud-sandbox/_index.md b/content/en/user-guide/cloud-sandbox/_index.md index d0d0ab1c8d..f30ea8fff7 100644 --- a/content/en/user-guide/cloud-sandbox/_index.md +++ b/content/en/user-guide/cloud-sandbox/_index.md @@ -10,7 +10,8 @@ cascade: ## Introduction -LocalStack Cloud Sandbox allow you to run an LocalStack instance in the cloud. LocalStack Cloud Sandbox encompasses the following features: +LocalStack Cloud Sandbox allow you to run an LocalStack instance in the cloud. +LocalStack Cloud Sandbox encompasses the following features: - Deploy LocalStack as an ephemeral instance for dev&test loops without running it locally. - Create easy previews by enabling preview-per-PR type workflows for application change. diff --git a/content/en/user-guide/cloud-sandbox/application-previews/index.md b/content/en/user-guide/cloud-sandbox/application-previews/index.md index f99b752316..54322ee68d 100644 --- a/content/en/user-guide/cloud-sandbox/application-previews/index.md +++ b/content/en/user-guide/cloud-sandbox/application-previews/index.md @@ -7,7 +7,9 @@ description: Create an Application Preview to deploy your application changes in ## Introduction -Application Preview allows you to generate an preview environment from GitHub Pull Request (PR) builds. You can use Application Preview to temporarily deploy your AWS-powered application to a LocalStack Ephemeral Instance and preview your application changes. Currently, the Application Preview are only supported for GitHub repositories using GitHub Actions. +Application Preview allows you to generate an preview environment from GitHub Pull Request (PR) builds. +You can use Application Preview to temporarily deploy your AWS-powered application to a LocalStack Ephemeral Instance and preview your application changes. +Currently, the Application Preview are only supported for GitHub repositories using GitHub Actions. {{< callout >}} Application Preview is currently available on invite-only preview. @@ -16,9 +18,11 @@ If you'd like to try it out, please [contact us](https://www.localstack.cloud/de ## Getting started -This guide is designed for users new to Application Preview and assumes basic knowledge of GitHub Actions. We will configure a CI pipeline that runs on pull requests using GitHub Actions. +This guide is designed for users new to Application Preview and assumes basic knowledge of GitHub Actions. +We will configure a CI pipeline that runs on pull requests using GitHub Actions. -To get started with a ready-to-use template, you can fork the [`bref-localstack-sample`](https://github.com/localstack-samples/bref-localstack-sample) repository. The sample application deploys a serverless PHP application using Bref and the Serverless Framework. +To get started with a ready-to-use template, you can fork the [`bref-localstack-sample`](https://github.com/localstack-samples/bref-localstack-sample) repository. +The sample application deploys a serverless PHP application using Bref and the Serverless Framework. ### Prerequisites @@ -29,7 +33,10 @@ To get started with a ready-to-use template, you can fork the [`bref-localstack- To create an Application Preview, you can use the [`LocalStack/setup-localstack/ephemeral/startup` action](https://github.com/localstack/setup-localstack). -The sample repository has been configured to use the workflow described above. For your custom repository, create a new file named `ci-pipeline.yml` in the `.github/workflows` directory. This file will contain the CI pipeline that runs on every pull request. This pipeline deploys the application to a LocalStack Ephemeral Instance. +The sample repository has been configured to use the workflow described above. +For your custom repository, create a new file named `ci-pipeline.yml` in the `.github/workflows` directory. +This file will contain the CI pipeline that runs on every pull request. +This pipeline deploys the application to a LocalStack Ephemeral Instance. The workflow file to create the Application Preview looks like this: @@ -69,13 +76,17 @@ jobs: echo "Open URL: $AWS_ENDPOINT_URL/restapis/$apiId/dev/_user_request_/" ``` -You will also need to configure the `LOCALSTACK_API_KEY` as a repository secret. You can find the API key on the [LocalStack Web Application](https://app.localstack.cloud/account/apikeys). The `GITHUB_TOKEN` is automatically created by GitHub and you can use it without any additional configuration. +You will also need to configure the `LOCALSTACK_API_KEY` as a repository secret. +You can find the API key on the [LocalStack Web Application](https://app.localstack.cloud/account/apikeys). +The `GITHUB_TOKEN` is automatically created by GitHub and you can use it without any additional configuration. ### Attach the Preview URL You can now attach the Preview URL to the pull request by using the [`LocalStack/setup-localstack/finish` action](https://github.com/localstack/setup-localstack). -The sample repository has been configured to use the workflow described above. For your custom repository, create a new file named `ci-finalize.yml` in the `.github/workflows` directory. This file contains the CI pipeline that attaches a comment to the pull request with the Preview URL of the deployed application. +The sample repository has been configured to use the workflow described above. +For your custom repository, create a new file named `ci-finalize.yml` in the `.github/workflows` directory. +This file contains the CI pipeline that attaches a comment to the pull request with the Preview URL of the deployed application. The workflow file to attach the Preview URL looks like this: @@ -101,8 +112,11 @@ jobs: ### Open a Pull Request -Once your changes are in your repository, open a new pull request. GitHub will receive the request and trigger your workflow. You can track the workflow's status and logs in the **Checks** section of the pull request. +Once your changes are in your repository, open a new pull request. +GitHub will receive the request and trigger your workflow. +You can track the workflow's status and logs in the **Checks** section of the pull request. -After a short delay, the workflow will update the pull request with the URL of your preview environment. Just click on it to see the changes in real-time. +After a short delay, the workflow will update the pull request with the URL of your preview environment. +Just click on it to see the changes in real-time. Each time the branch is updated, the same workflow will automatically refresh the preview environment. diff --git a/content/en/user-guide/cloud-sandbox/ephemeral-instance/index.md b/content/en/user-guide/cloud-sandbox/ephemeral-instance/index.md index 9be1a76e25..3cf509b653 100644 --- a/content/en/user-guide/cloud-sandbox/ephemeral-instance/index.md +++ b/content/en/user-guide/cloud-sandbox/ephemeral-instance/index.md @@ -7,7 +7,8 @@ description: Create an Ephemeral Instance in the cloud using the LocalStack Web ## Introduction -LocalStack Ephemeral Instance allows you to run an LocalStack instance in the cloud. You can interact with these remote instances via the LocalStack Web Application, or by configuring your integrations and developer tools with the endpoint URL of the remote instance. +LocalStack Ephemeral Instance allows you to run an LocalStack instance in the cloud. +You can interact with these remote instances via the LocalStack Web Application, or by configuring your integrations and developer tools with the endpoint URL of the remote instance. {{< callout >}} Ephemeral Instance is available on invite-only preview. @@ -16,15 +17,18 @@ If you'd like to try it out, please [contact us](https://www.localstack.cloud/de ## Getting started -This guide is designed for users new to Ephemeral Instance and assumes basic knowledge of the LocalStack Web Application. In this guide, we will create an Ephemeral Instance and interact with it via the LocalStack Web Application and the AWS CLI. +This guide is designed for users new to Ephemeral Instance and assumes basic knowledge of the LocalStack Web Application. +In this guide, we will create an Ephemeral Instance and interact with it via the LocalStack Web Application and the AWS CLI. ### Create a new Ephemeral Instance -Navigate to the [**LocalStack Instance Management**](https://app.localstack.cloud/instances) and click on the **Ephemeral (Team)** tab. Click on the **Create Ephemeral Instance** button. +Navigate to the [**LocalStack Instance Management**](https://app.localstack.cloud/instances) and click on the **Ephemeral (Team)** tab. +Click on the **Create Ephemeral Instance** button. Creating an Ephemeral Instance -You will be able to specify the name of the new Epheemeral Instance. Click on **Create Instance** to create the new Ephemeral Instance. +You will be able to specify the name of the new Epheemeral Instance. +Click on **Create Instance** to create the new Ephemeral Instance. ### Interact with the Ephemeral Instance @@ -43,7 +47,8 @@ You will also be able to access the following with your Ephemeral Instance: ### Access the Ephemeral Instance via AWS CLI -You can access the Ephemeral Instance via the AWS CLI by configuring the AWS CLI with the endpoint URL of the Ephemeral Instance. You can find the endpoint URL of the Ephemeral Instance in the **LocalStack Instance Management** page. +You can access the Ephemeral Instance via the AWS CLI by configuring the AWS CLI with the endpoint URL of the Ephemeral Instance. +You can find the endpoint URL of the Ephemeral Instance in the **LocalStack Instance Management** page. To create an S3 bucket in the Ephemeral Instance, run the following command: @@ -51,25 +56,32 @@ To create an S3 bucket in the Ephemeral Instance, run the following command: $ aws --endpoint-url= s3 mb s3:// {{< /command >}} -You can replace `` with the endpoint URL of the Ephemeral Instance and `` with the name of the S3 bucket you want to create. To query the list of S3 buckets in the Ephemeral Instance, run the following command: +You can replace `` with the endpoint URL of the Ephemeral Instance and `` with the name of the S3 bucket you want to create. +To query the list of S3 buckets in the Ephemeral Instance, run the following command: {{< command >}} $ aws --endpoint-url= s3 ls {{< /command >}} -You can further use integrations, such as [CDK](https://docs.localstack.cloud/user-guide/integrations/aws-cdk/), [SAM CLI](https://docs.localstack.cloud/user-guide/integrations/aws-sam/), and [Terraform](https://docs.localstack.cloud/user-guide/integrations/terraform/), to interact with the Ephemeral Instance. In these integrations, you can change the `LOCALSTACK_HOSTNAME` environment variable to the endpoint URL of the Ephemeral Instance. +You can further use integrations, such as [CDK](https://docs.localstack.cloud/user-guide/integrations/aws-cdk/), [SAM CLI](https://docs.localstack.cloud/user-guide/integrations/aws-sam/), and [Terraform](https://docs.localstack.cloud/user-guide/integrations/terraform/), to interact with the Ephemeral Instance. +In these integrations, you can change the `LOCALSTACK_HOSTNAME` environment variable to the endpoint URL of the Ephemeral Instance. ### Remove the Ephemeral Instance -You can delete the Ephemeral Instance by clicking on the **Remove** button in the **LocalStack Instance Management** page. These instances are automatically terminated after 90 minutes, in case you don`t remove them manually. +You can delete the Ephemeral Instance by clicking on the **Remove** button in the **LocalStack Instance Management** page. +These instances are automatically terminated after 90 minutes, in case you don`t remove them manually. ## Load Cloud Pod into an Ephmeral Instance -You can load a Cloud Pod into an Ephemeral Instance to seed your pre-existing cloud resources into a freshly created Ephemeral Instance. You can further use AWS CLI or other integrations to interact with these resources, along with using other features of the LocalStack Web Application. +You can load a Cloud Pod into an Ephemeral Instance to seed your pre-existing cloud resources into a freshly created Ephemeral Instance. +You can further use AWS CLI or other integrations to interact with these resources, along with using other features of the LocalStack Web Application. -To load a pre-defined Cloud Pod, navigate to the **Cloud Pods Details** for the specific Cloud Pod and click on the **Browse Version** button. Clicking on the button will open a modal where you will be asked to confirm that this will start a new LocalStack instance and load the selected Cloud Pod version. +To load a pre-defined Cloud Pod, navigate to the **Cloud Pods Details** for the specific Cloud Pod and click on the **Browse Version** button. +Clicking on the button will open a modal where you will be asked to confirm that this will start a new LocalStack instance and load the selected Cloud Pod version. Cloud Pod Details page

-Click **Continue** to start the Ephemeral Instance and load the Cloud Pod. You will be able to see the Ephemeral Instance on the sidebar. You can copy the endpoint URL of the Ephemeral Instance and use it to manage your cloud resources. +Click **Continue** to start the Ephemeral Instance and load the Cloud Pod. +You will be able to see the Ephemeral Instance on the sidebar. +You can copy the endpoint URL of the Ephemeral Instance and use it to manage your cloud resources. diff --git a/content/en/user-guide/extensions/_index.md b/content/en/user-guide/extensions/_index.md index 359425ddb8..f0d569a26c 100644 --- a/content/en/user-guide/extensions/_index.md +++ b/content/en/user-guide/extensions/_index.md @@ -12,9 +12,11 @@ slug: extensions {{< figure src="https://user-images.githubusercontent.com/3996682/184503940-c30bfcac-e049-4ee4-b905-207b340111d1.png" >}} LocalStack Extensions allow developers to extend and customize LocalStack. -Extensions are a feature of our paid offering. LocalStack Extensions enable you to start custom services with LocalStack in the same container, while leveraging the existing features in the ecosystem. +Extensions are a feature of our paid offering. +LocalStack Extensions enable you to start custom services with LocalStack in the same container, while leveraging the existing features in the ecosystem. -Developers can add new services, extend existing services, and even add custom functionality. The Extensions API allows developers to easily plug in their own custom logic and services into the LocalStack container. +Developers can add new services, extend existing services, and even add custom functionality. +The Extensions API allows developers to easily plug in their own custom logic and services into the LocalStack container. You can use LocalStack Extensions to: @@ -22,7 +24,8 @@ You can use LocalStack Extensions to: - Instrumenting AWS requests with additional information before they reach your Lambdas. - Logging AWS API calls to custom data backends. -The officially supported [LocalStack Extensions]({{< ref "user-guide/extensions/official-extensions" >}}) can be discovered on our [Extension Library](https://app.localstack.cloud/extensions/library). To install and use extensions, you need an active LocalStack license. +The officially supported [LocalStack Extensions]({{< ref "user-guide/extensions/official-extensions" >}}) can be discovered on our [Extension Library](https://app.localstack.cloud/extensions/library). +To install and use extensions, you need an active LocalStack license. {{< callout >}} The feature and the API are currently in a preview stage and may be subject to change. diff --git a/content/en/user-guide/extensions/developing-extensions/index.md b/content/en/user-guide/extensions/developing-extensions/index.md index eef69b53f0..4a26dcec89 100644 --- a/content/en/user-guide/extensions/developing-extensions/index.md +++ b/content/en/user-guide/extensions/developing-extensions/index.md @@ -98,10 +98,9 @@ class ReadyAnnouncerExtension(Extension): def on_platform_ready(self): LOG.setLevel(logging.INFO) - LOG.info("my plugin is loaded and localstack is ready to roll!") + LOG.info("my plugin is loaded and localstack is ready to roll!") ``` - {{< callout >}} A note on importing LocalStack modules: since extensions run in the same Python process as the LocalStack runtime, you can also import other LocalStack modules outside the `localstack.extensions.api` module, and work with them. @@ -112,7 +111,8 @@ Your extension may break in unexpected ways, and we cannot provide support for i ## Packaging extensions Your extensions needs to be packaged as a Python distribution with a -`setup.cfg` or `setup.py` config. LocalStack uses the +`setup.cfg` or `setup.py` config. +LocalStack uses the [Plux](https://github.com/localstack/plux) code loading framework to load your code from a Python [entry point](https://packaging.python.org/en/latest/specifications/entry-points/). You can either use Plux to discover the entrypoints from your code when @@ -141,10 +141,10 @@ localstack.extensions = ``` The entry point group is the Plux namespace `locastack.extensions`, and the -entry point name is the plugin name `my_ready_announcer`. The object +entry point name is the plugin name `my_ready_announcer`. +The object reference points to the plugin class. - ## Using the extensions developer CLI The extensions CLI has a set of developer commands that allow you to create new extensions, and toggle local dev mode for extensions. @@ -184,10 +184,9 @@ github_username [janedoe]: version [0.1.0]: {{< / command >}} - This will create a new Python project with the following layout: -``` +```bash my-localstack-extension ├── Makefile ├── my_localstack_extension @@ -208,15 +207,16 @@ To start LocalStack with the extension in dev mode, first enable it by running: $ localstack extensions dev enable ./my-localstack-extension {{< / command >}} - Then, start LocalStack with `EXTENSION_DEV_MODE=1` {{< command >}} -$ EXTENSION_DEV_MODE=1 LOCALSTACK_AUTH_TOKEN=... localstack start +$ EXTENSION_DEV_MODE=1 LOCALSTACK_AUTH_TOKEN=... +localstack start {{< / command >}} In the LocalStack logs you should then see something like: -``` + +```bash ================================================== 👷 LocalStack extension developer mode enabled 🏗 - mounting extension /opt/code/extensions/my-localstack-extension @@ -230,7 +230,7 @@ Now, when you make changes to your extensions, you just need to restart LocalSta Once your extension is ready to be used, release it on a public GitHub repository. To make your extension easily installable for everyone generate an extension badge for your extension on this page. -The resulting badge should look like this . +The resulting badge should look like this Extension badge. You can create a one-click installer for your extension using our [Extension Installer](https://app.localstack.cloud/extensions/remote). {{< figure src="extension-installer.png" >}} diff --git a/content/en/user-guide/extensions/extensions-library/index.md b/content/en/user-guide/extensions/extensions-library/index.md index 5f29eb9792..05b6aa8de9 100644 --- a/content/en/user-guide/extensions/extensions-library/index.md +++ b/content/en/user-guide/extensions/extensions-library/index.md @@ -9,7 +9,8 @@ aliases: ## Introduction -LocalStack extensions allows you to extend and customize LocalStack. A LocalStack extension is a Python application that runs together with LocalStack within the LocalStack container. +LocalStack extensions allows you to extend and customize LocalStack. +A LocalStack extension is a Python application that runs together with LocalStack within the LocalStack container. LocalStack extensions are available to licensed users, and the list of available extensions can be found in the [Extensions Library](https://app.localstack.cloud/extensions/library). @@ -17,12 +18,16 @@ LocalStack extensions are available to licensed users, and the list of available ## Installing an Extension -To install an extension using the LocalStack Extensions Library, you can navigate to the [**app.localstack.cloud/extensions/library**](https://app.localstack.cloud/extensions/library) and click on the **Go to Instance** button to open the list of available instances. If you are running your LocalStack instance locally, you can click on the **Default** option. +To install an extension using the LocalStack Extensions Library, you can navigate to the [**app.localstack.cloud/extensions/library**](https://app.localstack.cloud/extensions/library) and click on the **Go to Instance** button to open the list of available instances. +If you are running your LocalStack instance locally, you can click on the **Default** option. -You will be redirected to the LocalStack instance page, where you can directly click the **Install** button to install the Extension. The installation process will take a few seconds, and **will restart your LocalStack instance**. Click **Continue** to proceed. +You will be redirected to the LocalStack instance page, where you can directly click the **Install** button to install the Extension. +The installation process will take a few seconds, and **will restart your LocalStack instance**. +Click **Continue** to proceed. ## Managing Extensions -You can further manage the installed extensions by navigating to the **Extensions** tab in the LocalStack Instance page. You can remove an Extension by clicking the **Remove** button. +You can further manage the installed extensions by navigating to the **Extensions** tab in the LocalStack Instance page. +You can remove an Extension by clicking the **Remove** button. Installed LocalStack Extensions Library diff --git a/content/en/user-guide/extensions/getting-started/index.md b/content/en/user-guide/extensions/getting-started/index.md index bfc8e3153f..44e3b7c21a 100644 --- a/content/en/user-guide/extensions/getting-started/index.md +++ b/content/en/user-guide/extensions/getting-started/index.md @@ -9,7 +9,9 @@ tags: ["Pro image"] ## Introduction -MailHog is an open source email testing tool for developers. It provides a simple SMTP server and web interface that allows developers to easily catch and inspect emails sent from their application during development. In this guide, you will install and use the [official MailHog extension for LocalStack](https://github.com/localstack/localstack-extensions/tree/main/mailhog) and send an email through SES, while inspecting it in MailHog. +MailHog is an open source email testing tool for developers. +It provides a simple SMTP server and web interface that allows developers to easily catch and inspect emails sent from their application during development. +In this guide, you will install and use the [official MailHog extension for LocalStack](https://github.com/localstack/localstack-extensions/tree/main/mailhog) and send an email through SES, while inspecting it in MailHog. ## Prerequisites @@ -18,11 +20,13 @@ MailHog is an open source email testing tool for developers. It provides a simpl ## Installation -To get started, start your LocalStack instance with your `LOCALSTACK_API_KEY`. Access our [Extension Manager](https://app.localstack.cloud/inst/default/extensions/manage), and click the **Install** button for the MailHog extension. +To get started, start your LocalStack instance with your `LOCALSTACK_API_KEY`. +Access our [Extension Manager](https://app.localstack.cloud/inst/default/extensions/manage), and click the **Install** button for the MailHog extension. {{< figure src="install-extensions.png" alt="Extensions Manager" width="800">}} -You'll receive a confirmation prompt indicating that LocalStack container will restart, after which the extension will become accessible. Check your LocalStack logs for MailHog extension output, where you should see relevant logging information: +You'll receive a confirmation prompt indicating that LocalStack container will restart, after which the extension will become accessible. +Check your LocalStack logs for MailHog extension output, where you should see relevant logging information: ```bash 2023-10-11T19:10:54.708 INFO --- [ MainThread] l.extensions.platform : loaded 1 extensions @@ -35,18 +39,22 @@ You'll receive a confirmation prompt indicating that LocalStack container will r ## Usage -MailHog enables you to conduct end-to-end testing of applications that utilize SES (Simple Email Service) for sending emails. To test this, let's use the AWS CLI to send an email. +MailHog enables you to conduct end-to-end testing of applications that utilize SES (Simple Email Service) for sending emails. +To test this, let's use the AWS CLI to send an email. ### Send an Email -You can use the [`VerifyEmailIdentity`](https://docs.aws.amazon.com/cli/latest/reference/ses/verify-email-identity.html) API to verify an email address with SES. This is a required step before you can send emails from SES. Run the following command to verify an email address: +You can use the [`VerifyEmailIdentity`](https://docs.aws.amazon.com/cli/latest/reference/ses/verify-email-identity.html) API to verify an email address with SES. +This is a required step before you can send emails from SES. +Run the following command to verify an email address: {{< command >}} $ aws --endpoint-url=http://localhost:4566 \ ses verify-email-identity --email-address user1@yourdomain.com {{< /command >}} -You can further send an email using the [`SendEmail`](https://docs.aws.amazon.com/cli/latest/reference/ses/send-email.html) API. Run the following command to send an email: +You can further send an email using the [`SendEmail`](https://docs.aws.amazon.com/cli/latest/reference/ses/send-email.html) API. +Run the following command to send an email: {{< command >}} $ aws --endpoint-url=http://localhost:4566 \ @@ -58,13 +66,15 @@ $ aws --endpoint-url=http://localhost:4566 \ ### Navigate to Extension UI -Navigate in your browser to the [MailHog UI in LocalStack](http://mailhog.localhost.localstack.cloud:4566/). You should see the email you sent in the MailHog UI. +Navigate in your browser to the [MailHog UI in LocalStack](http://mailhog.localhost.localstack.cloud:4566/). +You should see the email you sent in the MailHog UI. {{< figure src="mailhog.png" alt="Mailhog UI" width="800">}} ## Next steps -- Explore our collection of official extensions, along with a growing ecosystem of third-party extensions, in our [Extensions Library](https://app.localstack.cloud/extensions/library). -- Learn about the various methods for extension management and automating their installation when using LocalStack in a CI environment. Get detailed insights from our [Managing Extensions]({{< ref "managing-extensions" >}}) guide. -- Want to create your own extensions? Dive into our guide on [Developing Extensions]({{< ref "developing-extensions" >}}) for step-by-step instructions. - +- Explore our collection of official extensions, along with a growing ecosystem of third-party extensions, in our [Extensions Library](https://app.localstack.cloud/extensions/library). +- Learn about the various methods for extension management and automating their installation when using LocalStack in a CI environment. + Get detailed insights from our [Managing Extensions]({{< ref "managing-extensions" >}}) guide. +- Want to create your own extensions? + Dive into our guide on [Developing Extensions]({{< ref "developing-extensions" >}}) for step-by-step instructions. diff --git a/content/en/user-guide/extensions/managing-extensions/index.md b/content/en/user-guide/extensions/managing-extensions/index.md index e738e1cfbc..8c6ccf25fa 100644 --- a/content/en/user-guide/extensions/managing-extensions/index.md +++ b/content/en/user-guide/extensions/managing-extensions/index.md @@ -26,7 +26,6 @@ It re-starts the process inside the running container, not the container itself. However, you may lose LocalStack state if you do not use persistence. {{}} - {{< figure src="extensions-manager.png" >}} ## Using the extensions CLI @@ -53,7 +52,8 @@ list List installed extension uninstall Remove a LocalStack extension {{< / command >}} -To install an extension, specify the name of the `pip` dependency that contains the extension. For example, for the official Stripe extension, you can either use the package distributed on PyPI: +To install an extension, specify the name of the `pip` dependency that contains the extension. +For example, for the official Stripe extension, you can either use the package distributed on PyPI: {{< command >}} $ localstack extensions install localstack-extension-httpbin @@ -75,7 +75,9 @@ pip install file://./my-extensions/dist/my-extension-0.0.1.dev0.tar.gz ### Specify the `LOCALSTACK_VOLUME_DIR` -Extensions should be installed in the `LOCALSTACK_VOLUME_DIR`. The default directory on your host is currently `~/.cache/localstack`. If you decide to mount a different directory to `/var/lib/localstack` in your docker-compose file, as shown below, you must specify the `LOCALSTACK_VOLUME_DIR` before installing extensions. +Extensions should be installed in the `LOCALSTACK_VOLUME_DIR`. +The default directory on your host is currently `~/.cache/localstack`. +If you decide to mount a different directory to `/var/lib/localstack` in your docker-compose file, as shown below, you must specify the `LOCALSTACK_VOLUME_DIR` before installing extensions. ```yaml volumes: @@ -104,6 +106,7 @@ The value is a comma-separated list of extensions directives that can also be sp If you want to use the `file://` directive, the distribution file needs to be mounted into the container. In a docker-compose file, this would look something like: + ```yaml version: "3.8" @@ -133,18 +136,23 @@ Since LocalStack extensions are essentially just Python pip packages, the `exten An example project could look something like this: * `extensions.txt` - ``` + + ```text localstack-extension-mailhog git+https://github.com/localstack/localstack-extensions/#egg=localstack-extension-aws-replicator&subdirectory=aws-replicator ``` -* Project layout: + +* Project layout: + ```console extension-install ├── conf.d │ └── extensions.txt └── docker-compose.yml ``` + * `docker-compose.yaml` + ```yaml version: "3.8" @@ -161,7 +169,10 @@ When LocalStack starts up, you should see it tries to install the extensions and ## Extension Management within LocalStack -Extensions in LocalStack are Python distributions that operate within their dedicated virtual environment, residing in the [LocalStack Volume]({{< ref "filesystem" >}}). This involves the creation of a"variable packages folder `/var/lib/localstack/lib`," where the volume management system establishes both an `extensions` folder and a virtual environment named `python_venv`. Within this environment, all extensions and their dependencies are managed. LocalStack integrates its virtual environment, ensuring the resolution of all transitive dependencies associated with extensions. +Extensions in LocalStack are Python distributions that operate within their dedicated virtual environment, residing in the [LocalStack Volume]({{< ref "filesystem" >}}). +This involves the creation of a"variable packages folder `/var/lib/localstack/lib`," where the volume management system establishes both an `extensions` folder and a virtual environment named `python_venv`. +Within this environment, all extensions and their dependencies are managed. +LocalStack integrates its virtual environment, ensuring the resolution of all transitive dependencies associated with extensions. Here's an example what the default LocalStack volume looks like after installing the MailHog extension: diff --git a/content/en/user-guide/integrations/architect/index.md b/content/en/user-guide/integrations/architect/index.md index a63567a395..d17ab4c78f 100644 --- a/content/en/user-guide/integrations/architect/index.md +++ b/content/en/user-guide/integrations/architect/index.md @@ -16,6 +16,7 @@ If you are adapting an existing configuration, you might be able to skip certain ## Example ### Setup + To use Architect in conjunction with LocalStack, simply install the `arclocal` command (sources can be found [here](https://github.com/localstack/architect-local)). {{< command >}} $ npm install -g architect-local @architect/architect aws-sdk @@ -37,7 +38,8 @@ $ arclocal init ### Deployment -Now you need to start LocalStack. After LocalStack has started you can deploy your Architect setup via: +Now you need to start LocalStack. +After LocalStack has started you can deploy your Architect setup via: {{< command >}} $ arclocal deploy {{< / command >}} diff --git a/content/en/user-guide/integrations/aws-cdk/index.md b/content/en/user-guide/integrations/aws-cdk/index.md index 9da321edb2..68dc2c9da3 100644 --- a/content/en/user-guide/integrations/aws-cdk/index.md +++ b/content/en/user-guide/integrations/aws-cdk/index.md @@ -46,7 +46,6 @@ The following environment variables can be configured: * `AWS_ENDPOINT_URL`: The endpoint URL (i.e., protocol, host, and port) to connect to LocalStack (default: `http://localhost:4566`) * `LAMBDA_MOUNT_CODE`: Whether to use local Lambda code mounting (via setting `hot-reload` S3 bucket name) - ### Example Make sure that LocalStack is installed and successfully started with the required services before running the example @@ -87,7 +86,9 @@ $ awslocal sns list-topics ### Updating CDK stacks -Updating CDK stacks may result in deployment failures and inconsistent state within LocalStack. It is advisable to prioritize re-creating (deleting and re-deploying) over updating stacks. Our focus for this year will be on resolving issues related to the `UPDATE` support, and continuous improvements can be anticipated in this area throughout 2024. +Updating CDK stacks may result in deployment failures and inconsistent state within LocalStack. +It is advisable to prioritize re-creating (deleting and re-deploying) over updating stacks. +Our focus for this year will be on resolving issues related to the `UPDATE` support, and continuous improvements can be anticipated in this area throughout 2024. ### Stacks with validated certificates @@ -97,15 +98,15 @@ When this lambda is executed locally from the `/tmp` folder, the package can not ## Other resources -- [Hot-reloading Lambda functions with CDK]({{< ref "user-guide/lambda-tools/hot-reloading#aws-cloud-development-kit-cdk-configuration" >}}) +* [Hot-reloading Lambda functions with CDK]({{< ref "user-guide/lambda-tools/hot-reloading#aws-cloud-development-kit-cdk-configuration" >}}) ## External resources -- [aws-cdk-local](https://github.com/localstack/aws-cdk-local) -- [AWS CDK API reference](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-construct-library.html) -- [AWS CDK Developer Guide](https://docs.aws.amazon.com/cdk/latest/guide/home.html) +* [aws-cdk-local](https://github.com/localstack/aws-cdk-local) +* [AWS CDK API reference](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-construct-library.html) +* [AWS CDK Developer Guide](https://docs.aws.amazon.com/cdk/latest/guide/home.html) ## Community resources -- https://blog.dennisokeeffe.com/blog/2021-08-07-using-the-aws-cdk-with-localstack-and-aws-cdk-local -- https://www.youtube.com/watch?v=3_sqr0G9zb0 +* https://blog.dennisokeeffe.com/blog/2021-08-07-using-the-aws-cdk-with-localstack-and-aws-cdk-local +* https://www.youtube.com/watch?v=3_sqr0G9zb0 diff --git a/content/en/user-guide/integrations/aws-cli/index.md b/content/en/user-guide/integrations/aws-cli/index.md index 2423d8ab53..ae9bd29e44 100644 --- a/content/en/user-guide/integrations/aws-cli/index.md +++ b/content/en/user-guide/integrations/aws-cli/index.md @@ -7,7 +7,8 @@ description: > ## Introduction -The [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) is a unified tool for creating and managing AWS services via a command line interface. All CLI commands applicable to services implemented within [LocalStack]({{< ref "references/coverage/" >}}) can be executed when operating against LocalStack. +The [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) is a unified tool for creating and managing AWS services via a command line interface. +All CLI commands applicable to services implemented within [LocalStack]({{< ref "references/coverage/" >}}) can be executed when operating against LocalStack. You can use the AWS CLI with LocalStack using either of the following approaches: @@ -24,12 +25,13 @@ $ pip install awscli You can configure the AWS CLI to redirect AWS API requests to LocalStack using two approaches: -- [Configuring an endpoint URL](#configuring-an-endpoint-url) -- [Configuring a custom profile](#configuring-a-custom-profile) +* [Configuring an endpoint URL](#configuring-an-endpoint-url) +* [Configuring a custom profile](#configuring-a-custom-profile) ### Configuring an endpoint URL -You can use AWS CLI with an endpoint URL by configuring test environment variables and include the `--endpoint-url=` flag in your `aws` CLI commands. For example: +You can use AWS CLI with an endpoint URL by configuring test environment variables and include the `--endpoint-url=` flag in your `aws` CLI commands. +For example: {{< command >}} $ export AWS_ACCESS_KEY_ID="test" @@ -45,7 +47,8 @@ To enable the creation of pre-signed URLs for S3 buckets, please set both `AWS_A ### Configuring a custom profile -You can configure a custom profile to use with LocalStack. Add the following profile to your AWS configuration file (by default, this file is at `~/.aws/config`): +You can configure a custom profile to use with LocalStack. +Add the following profile to your AWS configuration file (by default, this file is at `~/.aws/config`): ```bash [profile localstack] @@ -86,7 +89,8 @@ $ pip install awscli-local[ver1] {{< / command >}} {{< callout "tip" >}} -The above command installs the most recent version of the underlying AWS CLI version 1 (`awscli`) package. If you would rather manage your own `awscli` version (e.g., `v1` or `v2`) and only install the wrapper script, you can use the following command: +The above command installs the most recent version of the underlying AWS CLI version 1 (`awscli`) package. +If you would rather manage your own `awscli` version (e.g., `v1` or `v2`) and only install the wrapper script, you can use the following command: {{< command >}} $ pip install awscli-local @@ -99,7 +103,8 @@ Automatic installation of AWS CLI version 2 is currently not supported yet (at t ### Usage -The `awslocal` command shares identical usage with the standard `aws` command. For comprehensive usage instructions, refer to the manual pages by running `awslocal help`. +The `awslocal` command shares identical usage with the standard `aws` command. +For comprehensive usage instructions, refer to the manual pages by running `awslocal help`. {{< command >}} awslocal kinesis list-streams @@ -118,8 +123,8 @@ awslocal kinesis list-streams Please note that there is a known limitation for using the `cloudformation package ...` command with the AWS CLI v2. The problem is that the AWS CLI v2 is [not available as a package on pypi.org](https://github.com/aws/aws-cli/issues/4947), but is instead shipped as a binary package that cannot be easily patched from `awslocal`. To work around this issue, you have 2 options: -- Downgrade to the v1 AWS CLI (this is the recommended approach) -- There is an unofficial way to install AWS CLI v2 from sources. +* Downgrade to the v1 AWS CLI (this is the recommended approach) +* There is an unofficial way to install AWS CLI v2 from sources. We do not recommend this, but it is technically possible. Also, you should install these libraries in a Python virtualenv, to avoid version clashes with other libraries on your system: @@ -130,10 +135,12 @@ $ pip install https://github.com/boto/botocore/archive/v2.zip https://github.com {{< / command >}} Please also note there is a known limitation for issuing requests using -`--no-sign-request` with the AWS CLI. LocalStack's routing mechanism depends on +`--no-sign-request` with the AWS CLI. +LocalStack's routing mechanism depends on the signature of each request to identify the correct service for the request. Thus, adding the flag `--no-sign-requests` provokes your request to reach the -wrong service. One possible way to address this is to use the `awslocal` CLI +wrong service. +One possible way to address this is to use the `awslocal` CLI instead of AWS CLI. ## AWS CLI v2 diff --git a/content/en/user-guide/integrations/aws-sam/index.md b/content/en/user-guide/integrations/aws-sam/index.md index d88b4552a7..736e0f6905 100644 --- a/content/en/user-guide/integrations/aws-sam/index.md +++ b/content/en/user-guide/integrations/aws-sam/index.md @@ -7,13 +7,14 @@ description: > ## Introduction -The AWS Serverless Application Model (SAM) is an open-source framework for developing serverless applications. It uses a simplified syntax to define functions, APIs, databases, and event source mappings. +The AWS Serverless Application Model (SAM) is an open-source framework for developing serverless applications. +It uses a simplified syntax to define functions, APIs, databases, and event source mappings. When you deploy, SAM converts its syntax into AWS CloudFormation syntax, helping you create serverless applications more quickly. -LocalStack can work with SAM using the AWS SAM CLI for LocalStack. This CLI comes in the form of a `samlocal` wrapper script, which lets you deploy SAM applications on LocalStack. +LocalStack can work with SAM using the AWS SAM CLI for LocalStack. +This CLI comes in the form of a `samlocal` wrapper script, which lets you deploy SAM applications on LocalStack. This guide explains how to set up local AWS resources using the `samlocal` wrapper script. - ## `samlocal` wrapper script `samlocal` is a wrapper for the `sam` command line interface, facilitating the use of SAM framework with LocalStack. @@ -35,7 +36,8 @@ You can initialize a new SAM project using the following command: $ samlocal init {{< / command >}} -Select `1` to create a new SAM application using an AWS Quick Start template. The SAM CLI will ask you for the project name and the runtime for the Lambda function. +Select `1` to create a new SAM application using an AWS Quick Start template. +The SAM CLI will ask you for the project name and the runtime for the Lambda function. For this example, select `1` for the Hello World example. Choose the Python runtime and `zip` for the packaging type. @@ -66,7 +68,6 @@ The `samlocal` wrapper will package and deploy the application to LocalStack. To debug your Lambda functions in VS Code while using the SAM CLI's `sam local` command alongside other services provided by LocalStack, set up a launch configuration in the `.vscode/launch.json` file. Insert the following settings into the file: - ```json { "type": "aws-sam", @@ -96,4 +97,5 @@ Insert the following settings into the file: } ``` -The `dockerNetwork` property is essential as it allows the LocalStack container to use the `sam invoke` commands within the same network as the LocalStack container itself. Adjust the Lambda function handler and environment variables as needed. +The `dockerNetwork` property is essential as it allows the LocalStack container to use the `sam invoke` commands within the same network as the LocalStack container itself. +Adjust the Lambda function handler and environment variables as needed. diff --git a/content/en/user-guide/integrations/chalice/index.md b/content/en/user-guide/integrations/chalice/index.md index 3f2538e2b3..8107012f9c 100644 --- a/content/en/user-guide/integrations/chalice/index.md +++ b/content/en/user-guide/integrations/chalice/index.md @@ -5,9 +5,12 @@ description: > Use AWS Chalice with LocalStack --- -[AWS Chalice](https://aws.github.io/chalice/) is a serverless micro framework used to develop and deploy your serverless applications on AWS resources. Chalice provides integrated functionality with most of the AWS Toolings like S3 Storage, Simple Queue Service, API Gateway and more. It offers a handy CLI interface that allows you to easily create, develop & deploy your serverless applications. +[AWS Chalice](https://aws.github.io/chalice/) is a serverless micro framework used to develop and deploy your serverless applications on AWS resources. +Chalice provides integrated functionality with most of the AWS Toolings like S3 Storage, Simple Queue Service, API Gateway and more. +It offers a handy CLI interface that allows you to easily create, develop & deploy your serverless applications. -LocalStack offers an [AWS Chalice client](https://github.com/localstack/chalice-local) that allows you to interact with your Chalice applications locally. Using LocalStack, you can kick-start your development process, create a new Chalice application, and test it application locally. +LocalStack offers an [AWS Chalice client](https://github.com/localstack/chalice-local) that allows you to interact with your Chalice applications locally. +Using LocalStack, you can kick-start your development process, create a new Chalice application, and test it application locally. ## Creating a new Chalice project @@ -29,7 +32,8 @@ You can now create a new Chalice project by running: $ chalice-local new-project {{< / command >}} -You will be prompted with an interactive menu where you can choose the name of your project and the project type. In this example, we are using `localstack-test` as the project name and `REST API` as the project type: +You will be prompted with an interactive menu where you can choose the name of your project and the project type. +In this example, we are using `localstack-test` as the project name and `REST API` as the project type: ```sh ___ _ _ _ _ ___ ___ ___ @@ -71,7 +75,10 @@ tree 2 directories, 6 files ``` -The `app.py` is our main API file. It has only one Route that would assign the URL of the application to the function. The decorators here primarily "wrap" functions here which makes it easy to write Code Logic by breaking them down into separate routes. For now, our Application is serving only a JSON Message which is `{'hello': 'world'}`. +The `app.py` is our main API file. +It has only one Route that would assign the URL of the application to the function. +The decorators here primarily "wrap" functions here which makes it easy to write Code Logic by breaking them down into separate routes. +For now, our Application is serving only a JSON Message which is `{'hello': 'world'}`. ## Testing the Chalice API @@ -101,8 +108,8 @@ Creating IAM role: localstack-test-dev Creating lambda function: localstack-test-dev Creating Rest API Resources deployed: - - Lambda ARN: arn:aws:lambda:us-east-1:000000000000:function:localstack-test-dev - - Rest API URL: https://y5iuni004m.execute-api.us-east-1.amazonaws.com/api/ +- Lambda ARN: arn:aws:lambda:us-east-1:000000000000:function:localstack-test-dev +- Rest API URL: https://y5iuni004m.execute-api.us-east-1.amazonaws.com/api/ {{< / command >}} We now have our Chalice Application deployed on a Lambda Amazon Resource Name (ARN) along with a REST API URL. diff --git a/content/en/user-guide/integrations/cloud-custodian/index.md b/content/en/user-guide/integrations/cloud-custodian/index.md index e964793c18..38430bca20 100644 --- a/content/en/user-guide/integrations/cloud-custodian/index.md +++ b/content/en/user-guide/integrations/cloud-custodian/index.md @@ -7,15 +7,18 @@ description: > ## Introduction -Cloud Custodian is an open-source rules engine and cloud management tool designed to help organizations maintain security and compliance across their cloud environments. Cloud Custodian's YAML DSL allows definition of rules to filter and tag resources, and then apply actions to those resources. +Cloud Custodian is an open-source rules engine and cloud management tool designed to help organizations maintain security and compliance across their cloud environments. +Cloud Custodian's YAML DSL allows definition of rules to filter and tag resources, and then apply actions to those resources. -Cloud Custodian can be used to manage local AWS resources in LocalStack, resembling the live AWS environment, allowing you to test and validate your security policies locally. You can use Cloud Custodian with LocalStack by just specifying the Cloud Custodian package to use the LocalStack profile configured with your AWS CLI. +Cloud Custodian can be used to manage local AWS resources in LocalStack, resembling the live AWS environment, allowing you to test and validate your security policies locally. +You can use Cloud Custodian with LocalStack by just specifying the Cloud Custodian package to use the LocalStack profile configured with your AWS CLI. ## Getting started This guide is designed for users who are new to Cloud Custodian and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can spin up an EC2 instance and tag it with the key `Custodian`, and then use Cloud Custodian to stop the instance. +Start your LocalStack container using your preferred method. +We will demonstrate how you can spin up an EC2 instance and tag it with the key `Custodian`, and then use Cloud Custodian to stop the instance. ### Install Cloud Custodian @@ -29,7 +32,9 @@ After installing Cloud Custodian, you can configure a [custom LocalStack profile ### Create an EC2 instance -You can create an EC2 instance using the `awslocal` wrapper script. You can use the [`RunInstances`](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API to create an EC2 instance. The following example creates an EC2 instance with the tag `Custodian` (any value): +You can create an EC2 instance using the `awslocal` wrapper script. +You can use the [`RunInstances`](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API to create an EC2 instance. +The following example creates an EC2 instance with the tag `Custodian` (any value): {{< command >}} $ awslocal ec2 run-instances \ @@ -51,7 +56,8 @@ You can navigate to the LocalStack logs to verify that the EC2 instance was crea ### Create a Cloud Custodian policy -You can now create a Cloud Custodian policy to stop the EC2 instances with the tag `Custodian`. Create a file named `custodian.yml` and add the following content: +You can now create a Cloud Custodian policy to stop the EC2 instances with the tag `Custodian`. +Create a file named `custodian.yml` and add the following content: ```yaml policies: @@ -67,8 +73,10 @@ The above policy specifies the following: - `name`: The name of the policy. - `resource`: The AWS resource to apply the policy to. -- `filters`: The filters to apply to the resource. In this case, the filter is `tag:Custodian` and the value is `present`. -- `actions`: The actions to apply to the resource. In this case, the action is `stop`. +- `filters`: The filters to apply to the resource. + In this case, the filter is `tag:Custodian` and the value is `present`. +- `actions`: The actions to apply to the resource. + In this case, the action is `stop`. ### Run the Cloud Custodian policy @@ -76,7 +84,8 @@ You can now run the Cloud Custodian policy using the following command: {{< command >}} $ custodian run \ - --output-dir=. custodian.yml \ + --output-dir=. +custodian.yml \ --profile localstack {{< / command >}} @@ -101,9 +110,11 @@ You can then navigate to the LocalStack logs to verify that the EC2 instance was ### Create CloudWatch metrics for Cloud Custodian -Cloud Custodian creates CloudWatch metrics for each policy. These metrics show how many resources met the filters, the time taken to gather and filter those resources, and the time required to perform actions. +Cloud Custodian creates CloudWatch metrics for each policy. +These metrics show how many resources met the filters, the time taken to gather and filter those resources, and the time required to perform actions. -Certain filters and actions might produce their own metrics. To activate metric output, you must set the `metrics` flag running Cloud Custodian. +Certain filters and actions might produce their own metrics. +To activate metric output, you must set the `metrics` flag running Cloud Custodian. {{< command >}} $ custodian run -s . \ diff --git a/content/en/user-guide/integrations/copilot/index.md b/content/en/user-guide/integrations/copilot/index.md index ae011c6640..25cdcc2180 100644 --- a/content/en/user-guide/integrations/copilot/index.md +++ b/content/en/user-guide/integrations/copilot/index.md @@ -21,20 +21,23 @@ Using `copilotlocal` instead of `copilot` in your command line therefore ensures {{< tab header="Linux AMD64" lang="bash">}} curl -Lo copilotlocal https://github.com/localstack/copilot-cli/raw/localstack-builds/build/linux-amd64/copilotlocal && chmod +x copilotlocal # if you want to have copilotlocal in your $PATH, move the executable e.g. to /usr/local/bin/ + sudo mv copilotlocal /usr/local/bin/ {{< /tab >}} {{< tab header="Linux ARM64" lang="bash">}} curl -Lo copilotlocal https://github.com/localstack/copilot-cli/raw/localstack-builds/build/linux-arm64/copilotlocal && chmod +x copilotlocal # if you want to have copilotlocal in your $PATH, move the executable e.g. to /usr/local/bin/ + sudo mv copilotlocal /usr/local/bin/ {{< /tab >}} {{< tab header="Mac OS" lang="bash">}} curl -Lo copilotlocal https://github.com/localstack/copilot-cli/raw/localstack-builds/build/macos-darwin/copilotlocal && chmod +x copilotlocal # if you want to have copilotlocal in your $PATH, move the executable e.g. to /usr/local/bin/ + sudo mv copilotlocal /usr/local/bin/ {{< /tab >}} {{< tab header="Windows Powershell" lang="powershell">}} -Invoke-WebRequest -Uri https://github.com/localstack/copilot-cli/raw/localstack-builds/build/windows/copilotlocal.exe -OutFile copilotlocal.exe +Invoke-WebRequest -Uri https://github.com/localstack/copilot-cli/raw/localstack-builds/build/windows/copilotlocal.exe -OutFile copilotlocal.exe {{< /tab >}} {{< /tabpane >}} @@ -56,4 +59,4 @@ copilotlocal init ``` For more information about how to use the AWS Copilot CLI, checkout the [copilot documentation](https://aws.github.io/copilot-cli/docs/overview/). -Just remember to replace `copilot` with `copilotlocal`. \ No newline at end of file +Just remember to replace `copilot` with `copilotlocal`. diff --git a/content/en/user-guide/integrations/crossplane/index.md b/content/en/user-guide/integrations/crossplane/index.md index bdeab711d7..f5b7b2337d 100644 --- a/content/en/user-guide/integrations/crossplane/index.md +++ b/content/en/user-guide/integrations/crossplane/index.md @@ -20,6 +20,7 @@ Crossplane AWS provider supports a comprehensive set of some [900+ resource type In the following, we provide a step-by-step guide for installing Crossplane in a local test environment, and creating AWS resources (S3 bucket, SQS queue) in LocalStack via Crossplane. ### Prerequisites + * LocalStack running in local Docker * A local Kubernetes cluster: * We can use the [embedded Kubernetes cluster](https://docs.docker.com/desktop/kubernetes) that ships with modern versions of Docker Desktop (can be easily enabled in the Docker settings) @@ -35,7 +36,8 @@ $ helm repo update $ helm install crossplane crossplane-stable/crossplane --namespace crossplane-system --create-namespace {{}} -The installation may take a few minutes. In parallel, we can install the `crossplane` command-line extensions for `kubectl`: +The installation may take a few minutes. +In parallel, we can install the `crossplane` command-line extensions for `kubectl`: {{}} $ curl -sL https://raw.githubusercontent.com/crossplane/crossplane/master/install.sh | bash ... @@ -141,7 +143,8 @@ The endpoint `http://host.docker.internal:4566` in the listing above assumes tha {{< /callout >}} {{< callout >}} -The Crossplane AWS provider currently requires us to specify the list of `services` for which the local `endpoint` is used as the target URL. Please make sure to extend this list accordingly if you're working with additional LocalStack services. +The Crossplane AWS provider currently requires us to specify the list of `services` for which the local `endpoint` is used as the target URL. +Please make sure to extend this list accordingly if you're working with additional LocalStack services. {{< /callout >}} ### Deploying sample resources in LocalStack @@ -170,7 +173,8 @@ crossplane-test-bucket True True crossplane-test-bucket 30s {{}} -... and the bucket it should also be visible when querying the local S3 buckets in LocalStack via [`awslocal`](https://github.com/localstack/awscli-local): +... +and the bucket it should also be visible when querying the local S3 buckets in LocalStack via [`awslocal`](https://github.com/localstack/awscli-local): {{}} $ awslocal s3 ls 2023-09-03 15:18:47 crossplane-test-bucket @@ -199,7 +203,8 @@ crossplane-test-queue True True http://host.docker.internal:4566/000000 {{}} -... and the queue should be visible when listing the SQS queues in LocalStack: +... +and the queue should be visible when listing the SQS queues in LocalStack: {{}} $ awslocal sqs list-queues { @@ -214,7 +219,8 @@ $ awslocal sqs list-queues The Crossplane AWS provider is a great way to manage AWS resources, and by leveraging the `endpoint` configuration of the provider, we can seamlessly run resource deployments against LocalStack. -In this tutorial, we have provided an end-to-end walkthrough of how to provision two simple resources - an S3 bucket, and an SQS queue. Crossplane supports a vast range of additional AWS resource types, as well as advanced operations like updating, deleting, or composing resources. +In this tutorial, we have provided an end-to-end walkthrough of how to provision two simple resources - an S3 bucket, and an SQS queue. +Crossplane supports a vast range of additional AWS resource types, as well as advanced operations like updating, deleting, or composing resources. You can refer to the additional reading material to learn and explore more advanced features. ## Further Reading diff --git a/content/en/user-guide/integrations/eksctl/index.md b/content/en/user-guide/integrations/eksctl/index.md index 168715edb6..70febdf56e 100644 --- a/content/en/user-guide/integrations/eksctl/index.md +++ b/content/en/user-guide/integrations/eksctl/index.md @@ -6,15 +6,20 @@ description: Running `eksctl` on LocalStack to create EKS clusters ## Introduction -[eksctl](https://eksctl.io/) is a CLI tool for creating and managing EKS clusters, Amazon's managed Kubernetes service. LocalStack supports running `eksctl` on LocalStack to create EKS clusters locally. LocalStack's EKS spin up embedded Kubernetes clusters using [K3s](https://github.com/k3s-io/k3s) to allow you to use the EKS APIs in your local environment. +[eksctl](https://eksctl.io/) is a CLI tool for creating and managing EKS clusters, Amazon's managed Kubernetes service. +LocalStack supports running `eksctl` on LocalStack to create EKS clusters locally. +LocalStack's EKS spin up embedded Kubernetes clusters using [K3s](https://github.com/k3s-io/k3s) to allow you to use the EKS APIs in your local environment. {{< callout >}} -The support for `eksctl` is currently experimental and may not work in all cases. We are working on improving the support for `eksctl` in LocalStack. +The support for `eksctl` is currently experimental and may not work in all cases. +We are working on improving the support for `eksctl` in LocalStack. {{< /callout >}} ## Getting started -This guide is designed for users new to `eksctl` and running EKS clusters with LocalStack. Start LocalStack using your preferred method. We will demonstrate how you can create a local EKS cluster using `eksctl` and fetch the nodes in the cluster. +This guide is designed for users new to `eksctl` and running EKS clusters with LocalStack. +Start LocalStack using your preferred method. +We will demonstrate how you can create a local EKS cluster using `eksctl` and fetch the nodes in the cluster. ### Pre-requisites @@ -25,7 +30,8 @@ This guide is designed for users new to `eksctl` and running EKS clusters with L ### Create a cluster -To create a cluster, you can use the `eksctl create cluster` command. You can use the `--profile` flag to [specify the LocalStack profile](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#configuring-a-custom-profile) to use for the cluster. +To create a cluster, you can use the `eksctl create cluster` command. +You can use the `--profile` flag to [specify the LocalStack profile](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#configuring-a-custom-profile) to use for the cluster. Run the following command to create a cluster: diff --git a/content/en/user-guide/integrations/former2/index.md b/content/en/user-guide/integrations/former2/index.md index 2f10ed7123..ca71b9253f 100644 --- a/content/en/user-guide/integrations/former2/index.md +++ b/content/en/user-guide/integrations/former2/index.md @@ -7,25 +7,32 @@ description: > ## Introduction -[Former2](https://github.com/iann0036/former2) allows you to generate Infrastructure-as-Code (IaC) outputs using your pre-existing AWS resources. It uses the AWS JavaScript SDK to make relevant API calls, scans your infrastructure, and provides you with a resource list. You can then select the resources for which you want to generate IaC outputs. Former2 currently supports the following outputs: +[Former2](https://github.com/iann0036/former2) allows you to generate Infrastructure-as-Code (IaC) outputs using your pre-existing AWS resources. +It uses the AWS JavaScript SDK to make relevant API calls, scans your infrastructure, and provides you with a resource list. +You can then select the resources for which you want to generate IaC outputs. +Former2 currently supports the following outputs: -- [CloudFormation](https://aws.amazon.com/cloudformation/) -- [Terraform](https://www.terraform.io/) -- [Troposphere](https://github.com/cloudtools/troposphere) -- [CDK V1 (Cfn Primitives) & CDK V2 (Cfn Primitives)](https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html) (TypeScript, Python, Java, C#) -- [CDK for Terraform](https://developer.hashicorp.com/terraform/cdktf) (TypeScript) -- [Pulumi](https://www.pulumi.com/docs/get-started/aws/) (TypeScript) -- [Diagrams](https://diagrams.mingrammer.com/) +- [CloudFormation](https://aws.amazon.com/cloudformation/) +- [Terraform](https://www.terraform.io/) +- [Troposphere](https://github.com/cloudtools/troposphere) +- [CDK V1 (Cfn Primitives) & CDK V2 (Cfn Primitives)](https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html) (TypeScript, Python, Java, C#) +- [CDK for Terraform](https://developer.hashicorp.com/terraform/cdktf) (TypeScript) +- [Pulumi](https://www.pulumi.com/docs/get-started/aws/) (TypeScript) +- [Diagrams](https://diagrams.mingrammer.com/) -With Former2, you can scan the resources within your LocalStack instance and produce Infrastructure-as-Code (IaC) outputs. These outputs enable you to redeploy your resources while spinning a new LocalStack instance or deploy them to a live Amazon Web Services (AWS) environment. +With Former2, you can scan the resources within your LocalStack instance and produce Infrastructure-as-Code (IaC) outputs. +These outputs enable you to redeploy your resources while spinning a new LocalStack instance or deploy them to a live Amazon Web Services (AWS) environment. -## Getting started +## Getting started -This guide is designed for users new to Former2 and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. We will demonstrate how you can create local AWS resources using LocalStack, and import a CloudFormation output via Former2. +This guide is designed for users new to Former2 and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. +We will demonstrate how you can create local AWS resources using LocalStack, and import a CloudFormation output via Former2. ### Install Former2 -You can use the publicly hosted [Former2 Web Application](https://former2.com/) or a [self-hosted version](https://github.com/iann0036/former2/blob/master/HOSTING.md) to generate IaC outputs. For this guide, we will use the publicly hosted version. You would also need a Former2 Helper extension/add-on for your preferred web browser: +You can use the publicly hosted [Former2 Web Application](https://former2.com/) or a [self-hosted version](https://github.com/iann0036/former2/blob/master/HOSTING.md) to generate IaC outputs. +For this guide, we will use the publicly hosted version. +You would also need a Former2 Helper extension/add-on for your preferred web browser: - [Google Chrome](https://chrome.google.com/webstore/detail/former2-helper/fhejmeojlbhfhjndnkkleooeejklmigi) - [Mozilla Firefox](https://addons.mozilla.org/en-US/firefox/addon/former2-helper/) @@ -41,7 +48,8 @@ Start your LocalStack container using your preferred method with the following e - **Mozilla Firefox**: `EXTRA_CORS_ALLOWED_ORIGINS=moz-extension://853c673f-1bd8-4226-a5ff-f1473f7b3d90` - **Microsoft Edge**: `EXTRA_CORS_ALLOWED_ORIGINS=extension://okkjnfohglnomdbpimkcdkiojbeiedof` -You can create local AWS resources using the AWS CLI and the `awslocal` wrapper script. For example, you can create a new S3 bucket, SQS queue, and DynamoDB table using the following commands: +You can create local AWS resources using the AWS CLI and the `awslocal` wrapper script. +For example, you can create a new S3 bucket, SQS queue, and DynamoDB table using the following commands: {{< command >}} $ awslocal s3 mb s3://my-bucket @@ -88,18 +96,23 @@ $ awslocal dynamodb list-tables ### Configure Former2 -Navigate to the Former2 setup dashboard. Open the [**Credentials**](https://former2.com/#section-setup-credentials) tab and enter your IAM credentials. For LocalStack, you can just configure the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables as `test` and `test`, respectively. +Navigate to the Former2 setup dashboard. +Open the [**Credentials**](https://former2.com/#section-setup-credentials) tab and enter your IAM credentials. +For LocalStack, you can just configure the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables as `test` and `test`, respectively. Enter test credentials on Former2 Dashboard

-Click on [**Continue to Parameters**](https://former2.com/#section-setup-parameters) and include your own CloudFormation stack parameters by adding them below. Click on [**Continue to Settings**](https://former2.com/#section-setup-settings) and navigate to **Custom Endpoints**. Toggle the **Use LocalStack Endpoint** switch to enable the LocalStack endpoint URL (`http://localhost:4566`). Click on [**Go to Dashboard**](https://former2.com/#section-dashboard) to complete the setup. +Click on [**Continue to Parameters**](https://former2.com/#section-setup-parameters) and include your own CloudFormation stack parameters by adding them below. +Click on [**Continue to Settings**](https://former2.com/#section-setup-settings) and navigate to **Custom Endpoints**. +Toggle the **Use LocalStack Endpoint** switch to enable the LocalStack endpoint URL (`http://localhost:4566`). +Click on [**Go to Dashboard**](https://former2.com/#section-dashboard) to complete the setup. LocalStack endpoint toggle on Former2 Dashboard

- -You can now click on **Scan Account** button on the top-right corner of the dashboard to scan your LocalStack instance for resources. Once the scan is complete, you can select the resources you want to generate IaC outputs for. +You can now click on **Scan Account** button on the top-right corner of the dashboard to scan your LocalStack instance for resources. +Once the scan is complete, you can select the resources you want to generate IaC outputs for. ### Generate IaC output @@ -108,13 +121,10 @@ Navigate to [S3](https://former2.com/#section-storage-s3), [DynamoDB](https://fo S3 Console on Former2 Dashboard

- -You can select the resources you want to generate IaC outputs for and click on **Add Selected**. Finally, you can click on **Generate** on the top-left corner of the dashboard to generate the IaC outputs. - - +You can select the resources you want to generate IaC outputs for and click on **Add Selected**. +Finally, you can click on **Generate** on the top-left corner of the dashboard to generate the IaC outputs. CloudFormation Output on Former2 Dashboard

- You can also choose to generate the IaC outputs in a different format by clicking on the various options available on the left-hand side of the dashboard. diff --git a/content/en/user-guide/integrations/gitpod/index.md b/content/en/user-guide/integrations/gitpod/index.md index 202ceb69d7..2aa8d4f509 100644 --- a/content/en/user-guide/integrations/gitpod/index.md +++ b/content/en/user-guide/integrations/gitpod/index.md @@ -9,13 +9,20 @@ description: > ## Overview -Gitpod is an open-source platform that enables remote software development via ephemeral workspaces. It provides an automated setup with cloud-based, remote developer environments connected with a developer’s [editing experience of choice](https://www.gitpod.io/docs/references/ides-and-editors). Gitpod allow users to codify their developer environment as code. With projects codified, you can spin up a new workspace, start coding and throw away the workspace when they are done! +Gitpod is an open-source platform that enables remote software development via ephemeral workspaces. +It provides an automated setup with cloud-based, remote developer environments connected with a developer’s [editing experience of choice](https://www.gitpod.io/docs/references/ides-and-editors). +Gitpod allow users to codify their developer environment as code. +With projects codified, you can spin up a new workspace, start coding and throw away the workspace when they are done! ## LocalStack on GitPod -LocalStack allows you to set up a development environment with a cloud sandbox that can be used to test and develop cloud applications. Using GitPod's environment you can run a LocalStack container inside the runtime that allows to instantiate your application on a code editor of your choice. You can then conveniently deploy your cloud application assets into LocalStack's cloud sandbox, to then preview the results. +LocalStack allows you to set up a development environment with a cloud sandbox that can be used to test and develop cloud applications. +Using GitPod's environment you can run a LocalStack container inside the runtime that allows to instantiate your application on a code editor of your choice. +You can then conveniently deploy your cloud application assets into LocalStack's cloud sandbox, to then preview the results. -To configure LocalStack on GitPod, you would need to set up a `.gitpod.yml` on the root of your repository. The file configures your workspace and the environment that you would like to use. You can find more information on the [GitPod documentation](https://www.gitpod.io/docs/config-gitpod-file/). +To configure LocalStack on GitPod, you would need to set up a `.gitpod.yml` on the root of your repository. +The file configures your workspace and the environment that you would like to use. +You can find more information on the [GitPod documentation](https://www.gitpod.io/docs/config-gitpod-file/). ```yaml tasks: @@ -53,7 +60,9 @@ ports: onOpen: ignore ``` -If you are using GitHub, you can also use the [GitPod Prebuilds](https://www.gitpod.io/docs/prebuilds/) feature to automatically build your workspace. This will allow you to start your workspace faster and with all the dependencies already installed. Add the following to your `.gitpod.yml` file: +If you are using GitHub, you can also use the [GitPod Prebuilds](https://www.gitpod.io/docs/prebuilds/) feature to automatically build your workspace. +This will allow you to start your workspace faster and with all the dependencies already installed. +Add the following to your `.gitpod.yml` file: ```yaml github: @@ -74,6 +83,8 @@ github: addBadge: true ``` -After adding the configuration, you can start your development & testing by creating [your workspace in GitPod](https://www.gitpod.io/docs/getting-started/#start-your-first-workspace). Upon creation, you will be able to see the LocalStack container running in the background (you can use `localstack status` to check the status of the container). +After adding the configuration, you can start your development & testing by creating [your workspace in GitPod](https://www.gitpod.io/docs/getting-started/#start-your-first-workspace). +Upon creation, you will be able to see the LocalStack container running in the background (you can use `localstack status` to check the status of the container). -For a simple demonstration, check out the [LocalStack GitPod demo](https://github.com/Gitpod-Samples/localstack-gitpod-demo) repository. Check out our [in-depth walkthrough over the demo](https://localstack.cloud/blog/2022-09-26-localstack-x-gitpod-run-cloud-applications-with-localstack-and-gitpod/) on our blog! +For a simple demonstration, check out the [LocalStack GitPod demo](https://github.com/Gitpod-Samples/localstack-gitpod-demo) repository. +Check out our [in-depth walkthrough over the demo](https://localstack.cloud/blog/2022-09-26-localstack-x-gitpod-run-cloud-applications-with-localstack-and-gitpod/) on our blog! diff --git a/content/en/user-guide/integrations/kubernetes/index.md b/content/en/user-guide/integrations/kubernetes/index.md index 17295d3fdd..3e7374a1fa 100644 --- a/content/en/user-guide/integrations/kubernetes/index.md +++ b/content/en/user-guide/integrations/kubernetes/index.md @@ -6,10 +6,13 @@ description: Running LocalStack on Kubernetes ## Introduction -[Kubernetes](https://kubernetes.io) is an open-source container orchestration platform that simplifies the deployment, scaling, and management of containerized applications. LocalStack can be deployed on Kubernetes using the [LocalStack Helm chart](http://helm.localstack.cloud). +[Kubernetes](https://kubernetes.io) is an open-source container orchestration platform that simplifies the deployment, scaling, and management of containerized applications. +LocalStack can be deployed on Kubernetes using the [LocalStack Helm chart](http://helm.localstack.cloud). {{< callout "warning" >}} -Creating shared/hosted LocalStack instances may have some licensing implications. For example, a valid license might be necessary for each user who interacts with the instance. If you have any questions or uncertainties regarding the licensing implications, we encourage you to [contact us](https://localstack.cloud/contact) for further details. +Creating shared/hosted LocalStack instances may have some licensing implications. +For example, a valid license might be necessary for each user who interacts with the instance. +If you have any questions or uncertainties regarding the licensing implications, we encourage you to [contact us](https://localstack.cloud/contact) for further details. {{< /callout >}} ## Getting started @@ -27,7 +30,8 @@ For setting up Kubernetes refer to the Kubernetes [getting started guide](https ### Install Helm -Helm is a tool for managing Kubernetes charts. Charts are packages of pre-configured Kubernetes resources. +Helm is a tool for managing Kubernetes charts. +Charts are packages of pre-configured Kubernetes resources. To install Helm, refer to the [Helm install guide](https://github.com/helm/helm#install) and ensure that the `helm` binary is in the `PATH` of your shell. @@ -47,9 +51,9 @@ Please refer to the [Quick Start guide](https://helm.sh/docs/intro/quickstart/) Some useful Helm client commands are: -- View available charts: `helm search repo` -- Install a chart: `helm install localstack/` -- Upgrade your application: `helm upgrade` +- View available charts: `helm search repo` +- Install a chart: `helm install localstack/` +- Upgrade your application: `helm upgrade` ## LocalStack on Kubernetes (`l8k`) @@ -59,13 +63,13 @@ The [`localstack-on-k8s`](https://github.com/localstack/localstack-on-k8s) sampl This sample requires the following tools installed on your machine: -* Python 3.7+ -* [`awslocal`](https://github.com/localstack/awscli-local) -* [Docker](https://www.docker.com) -* [Git](https://git-scm.com) -* [Helm](https://helm.sh) -* [`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) -* [Serverless](https://www.npmjs.com/package/serverless) +- Python 3.7+ +- [`awslocal`](https://github.com/localstack/awscli-local) +- [Docker](https://www.docker.com) +- [Git](https://git-scm.com) +- [Helm](https://helm.sh) +- [`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) +- [Serverless](https://www.npmjs.com/package/serverless) ### Clone the sample repository @@ -124,7 +128,8 @@ NAME DESIRED CURRENT READY AGE replicaset.apps/localstack-6fd5b98f59 1 1 1 5m {{< /command >}} -The LocalStack instance should be available via the local ingress port `8081`. We can verify that the resources were successfully created by running a few `awslocal` commands against the local endpoint: +The LocalStack instance should be available via the local ingress port `8081`. +We can verify that the resources were successfully created by running a few `awslocal` commands against the local endpoint: {{< command >}} $ awslocal sqs --endpoint-url=http://localhost:8081 list-queues @@ -142,23 +147,31 @@ $ awslocal apigateway --endpoint-url=http://localhost:8081 get-rest-apis ... {{< /command >}} -We can then use a browser to open the [Web UI](http://localhost:8081/archive-bucket/index.html), which should have been deployed to an S3 bucket inside LocalStack. The Web UI can be used to interact with the sample application, send new requests to the backend, inspect the state of existing requests, etc. +We can then use a browser to open the [Web UI](http://localhost:8081/archive-bucket/index.html), which should have been deployed to an S3 bucket inside LocalStack. +The Web UI can be used to interact with the sample application, send new requests to the backend, inspect the state of existing requests, etc. ## Lambda on Kubernetes -LocalStack on Kubernetes can be used in conjunction with the [LocalStack Community image](https://hub.docker.com/r/localstack/localstack). However, specific features such as execution of Lambda functions as Kubernetes pods are only available in the [LocalStack Pro image](https://hub.docker.com/r/localstack/localstack-pro). To configure LocalStack Lambdas to use Kubernetes Pods, you need to configure values in the [LocalStack Helm Chart](https://github.com/localstack/helm-charts/blob/ce47b1590605901650ab788556bc871efbd78b8d/charts/localstack/values.yaml#L178-L208). +LocalStack on Kubernetes can be used in conjunction with the [LocalStack Community image](https://hub.docker.com/r/localstack/localstack). +However, specific features such as execution of Lambda functions as Kubernetes pods are only available in the [LocalStack Pro image](https://hub.docker.com/r/localstack/localstack-pro). +To configure LocalStack Lambdas to use Kubernetes Pods, you need to configure values in the [LocalStack Helm Chart](https://github.com/localstack/helm-charts/blob/ce47b1590605901650ab788556bc871efbd78b8d/charts/localstack/values.yaml#L178-L208). ### Scaling Lambda Execution -The Kubernetes Lambda Executor in LocalStack handles Lambda execution scaling by spawning new environments (running in pods) when no existing environment is available due to concurrent invocations. An environment shuts down if it remains inactive for 10 minutes, a duration customizable through the `LAMBDA_KEEPALIVE_MS` variable. All environments terminate when LocalStack stops running. +The Kubernetes Lambda Executor in LocalStack handles Lambda execution scaling by spawning new environments (running in pods) when no existing environment is available due to concurrent invocations. +An environment shuts down if it remains inactive for 10 minutes, a duration customizable through the `LAMBDA_KEEPALIVE_MS` variable. +All environments terminate when LocalStack stops running. ### Lambda Scheduling Strategy -For multiple Lambda functions, the executor schedules according to Kubernetes cluster defaults without specifying node affinity. Users can assign labels to lambda pods using the `LAMBDA_K8S_LABELS` variable (e.g., `LAMBDA_K8S_LABELS=key=value,key2=value2`). The [Helm Charts](https://github.com/localstack/helm-charts), facilitates such advanced configurations, ensuring flexibility in node affinity decisions. +For multiple Lambda functions, the executor schedules according to Kubernetes cluster defaults without specifying node affinity. +Users can assign labels to lambda pods using the `LAMBDA_K8S_LABELS` variable (e.g., `LAMBDA_K8S_LABELS=key=value,key2=value2`). +The [Helm Charts](https://github.com/localstack/helm-charts), facilitates such advanced configurations, ensuring flexibility in node affinity decisions. ### Lambda Limitations and Configuration -LocalStack enforces timeout configurations similar to AWS, using the `Timeout` function parameter. There are no intrinsic limits on the number of Lambdas, with configurable limits on concurrent executions set at 1000 by default (`LAMBDA_LIMITS_CONCURRENT_EXECUTIONS`). +LocalStack enforces timeout configurations similar to AWS, using the `Timeout` function parameter. +There are no intrinsic limits on the number of Lambdas, with configurable limits on concurrent executions set at 1000 by default (`LAMBDA_LIMITS_CONCURRENT_EXECUTIONS`). ### Custom DNS for Lambda on Kubernetes @@ -170,8 +183,11 @@ Users can customize Lambda runtime behavior by building custom images based on p ### Warm Start and Persistence -Lambda on Kubernetes supports Warm Start and Persistence. Persistence has to be configured for the LocalStack pod. The `/var/lib/localstack` directory has to be persisted over LocalStack runs, in a volume for example. +Lambda on Kubernetes supports Warm Start and Persistence. +Persistence has to be configured for the LocalStack pod. +The `/var/lib/localstack` directory has to be persisted over LocalStack runs, in a volume for example. ### Debugging Lambda on Kubernetes -Debugging is currently not supported. Lambda hot-reloading will not function, as the bind mounting into pods cannot be done at runtime. +Debugging is currently not supported. +Lambda hot-reloading will not function, as the bind mounting into pods cannot be done at runtime. diff --git a/content/en/user-guide/integrations/lambdatest-hyperexecute/index.md b/content/en/user-guide/integrations/lambdatest-hyperexecute/index.md index 603851449d..d7bef8be8b 100644 --- a/content/en/user-guide/integrations/lambdatest-hyperexecute/index.md +++ b/content/en/user-guide/integrations/lambdatest-hyperexecute/index.md @@ -4,10 +4,14 @@ linktitle: LambdaTest HyperExecute description: Executing LocalStack tests on LambdaTest's HyperExecute --- -[HyperExecute](https://www.lambdatest.com/hyperexecute) is a test orchestration platform designed to optimize the execution of automated tests in the cloud. It supports a wide range of testing frameworks and integrates seamlessly with CI/CD pipelines, such as GitHub Actions. You can use HyperExecute to run your LocalStack tests on your local machine or in the CI pipeline using a single configuration file. +[HyperExecute](https://www.lambdatest.com/hyperexecute) is a test orchestration platform designed to optimize the execution of automated tests in the cloud. +It supports a wide range of testing frameworks and integrates seamlessly with CI/CD pipelines, such as GitHub Actions. +You can use HyperExecute to run your LocalStack tests on your local machine or in the CI pipeline using a single configuration file. {{< callout >}} -LambdaTest provides specialized runners for LocalStack. The default runners don't provide a Docker socket, which is required for LocalStack to work properly. If you want to use LocalStack with HyperExecute, you need to get in touch with the LambdaTest team to get access to the specialized runners. +LambdaTest provides specialized runners for LocalStack. +The default runners don't provide a Docker socket, which is required for LocalStack to work properly. +If you want to use LocalStack with HyperExecute, you need to get in touch with the LambdaTest team to get access to the specialized runners. {{< /callout >}} ## Getting started @@ -44,7 +48,8 @@ pre: The above minimal configuration file starts LocalStack and creates an S3 bucket, SQS queue, and SNS topic. {{< callout >}} -To use the LocalStack Pro image, configure a LocalStack Auth Token by appending `LOCALSTACK_AUTH_TOKEN=${{ .secrets.LOCALSTACK_AUTH_TOKEN }}` to the `localstack start` command. Subsequently, you need to add your LocalStack Auth Token to your HyperExecute Portal as a secret. +To use the LocalStack Pro image, configure a LocalStack Auth Token by appending `LOCALSTACK_AUTH_TOKEN=${{ .secrets.LOCALSTACK_AUTH_TOKEN }}` to the `localstack start` command. +Subsequently, you need to add your LocalStack Auth Token to your HyperExecute Portal as a secret. {{< /callout >}} ### Enabling test execution on HyperExecute @@ -70,21 +75,26 @@ sourcePayload: accessToken: ${{ .secrets.PAT }} ``` -Before running the tests, add your Personal Access Token (PAT) to your HyperExecute Portal as a secret. In this minimal configuration, you will set up our [`Serverless image resizer`](https://github.com/localstack-samples/sample-serverless-image-resizer-s3-lambda) application and run the tests using `pytest`. The `bin/deploy.sh` script is responsible for deploying the application to LocalStack. HyperExecute will automatically detect the tests and run them in parallel. +Before running the tests, add your Personal Access Token (PAT) to your HyperExecute Portal as a secret. +In this minimal configuration, you will set up our [`Serverless image resizer`](https://github.com/localstack-samples/sample-serverless-image-resizer-s3-lambda) application and run the tests using `pytest`. +The `bin/deploy.sh` script is responsible for deploying the application to LocalStack. +HyperExecute will automatically detect the tests and run them in parallel. ### Running the tests locally You can run the tests locally using the following command: {{< command >}} -$ hyperexecute --user '' --key '' --config he.yaml +$ hyperexecute --user '' --key '' --config he.yaml {{< /command >}} -Swap `` and `` with your HyperExecute username and access key. You can find your access key in the HyperExecute Portal. +Swap `` and `` with your HyperExecute username and access key. +You can find your access key in the HyperExecute Portal. ### Running the tests in the CI pipeline -In this example, we will use GitHub Actions to run the tests in the CI pipeline. To do so, you need to add the following content to your GitHub Actions workflow file in `.github/workflows/main.yml`: +In this example, we will use GitHub Actions to run the tests in the CI pipeline. +To do so, you need to add the following content to your GitHub Actions workflow file in `.github/workflows/main.yml`: ```yaml name: Running tests on HyperExecute @@ -111,4 +121,6 @@ jobs: --config he.yaml ``` -Add your username and access key to your GitHub repository secrets. You can find your access key in the HyperExecute Portal. If you are using the LocalStack Pro image, you need to add your LocalStack Auth Token to your GitHub repository secrets. +Add your username and access key to your GitHub repository secrets. +You can find your access key in the HyperExecute Portal. +If you are using the LocalStack Pro image, you need to add your LocalStack Auth Token to your GitHub repository secrets. diff --git a/content/en/user-guide/integrations/openshift/index.md b/content/en/user-guide/integrations/openshift/index.md index 43a2963c67..e562004c5e 100644 --- a/content/en/user-guide/integrations/openshift/index.md +++ b/content/en/user-guide/integrations/openshift/index.md @@ -7,34 +7,47 @@ description: > ## Introduction -OpenShift is a container orchestration platform as a service designed to simplify the deployment, scaling, and management of containerized applications. Built on Kubernetes, OpenShift provides a comprehensive set of tools and features that facilitate the orchestration, automation, and monitoring of containerized workloads. +OpenShift is a container orchestration platform as a service designed to simplify the deployment, scaling, and management of containerized applications. +Built on Kubernetes, OpenShift provides a comprehensive set of tools and features that facilitate the orchestration, automation, and monitoring of containerized workloads. -With OpenShift, you can deploy LocalStack on a managed Kubernetes cluster, as a cloud sandbox that emulates various AWS services & APIs. This guide demonstrates how you can deploy LocalStack on OpenShift using Devfile. You can use the deployed LocalStack container to create AWS resources that you can use for local development and testing purposes. +With OpenShift, you can deploy LocalStack on a managed Kubernetes cluster, as a cloud sandbox that emulates various AWS services & APIs. +This guide demonstrates how you can deploy LocalStack on OpenShift using Devfile. +You can use the deployed LocalStack container to create AWS resources that you can use for local development and testing purposes. {{< callout "warning" >}} -Creating shared/hosted LocalStack instances may have some licensing implications. For example, a valid license might be necessary for each user who interacts with the instance. If you have any questions or uncertainties regarding the licensing implications, we encourage you to [contact us](https://localstack.cloud/contact) for further details. +Creating shared/hosted LocalStack instances may have some licensing implications. +For example, a valid license might be necessary for each user who interacts with the instance. +If you have any questions or uncertainties regarding the licensing implications, we encourage you to [contact us](https://localstack.cloud/contact) for further details. {{< /callout >}} {{< callout >}} -LocalStack on OpenShift can be used in conjunction with the [LocalStack Community image](https://hub.docker.com/r/localstack/localstack). However, specific features such as execution of Lambda functions as OpenShift pods and other container workloads is only available in the [LocalStack Pro image](https://hub.docker.com/r/localstack/localstack-pro). +LocalStack on OpenShift can be used in conjunction with the [LocalStack Community image](https://hub.docker.com/r/localstack/localstack). +However, specific features such as execution of Lambda functions as OpenShift pods and other container workloads is only available in the [LocalStack Pro image](https://hub.docker.com/r/localstack/localstack-pro). {{< /callout >}} ## Getting started -This guide is designed for users new to LocalStack and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. As a general prerequisite, you should have access to the [OpenShift Web Console](https://docs.openshift.com/container-platform/4.14/web_console/web-console-overview.html). +This guide is designed for users new to LocalStack and assumes basic knowledge of the AWS CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. +As a general prerequisite, you should have access to the [OpenShift Web Console](https://docs.openshift.com/container-platform/4.14/web_console/web-console-overview.html). -We will demonstrate how you can create local AWS resources using LocalStack using the AWS CLI. Instead of running LocalStack locally, you will deploy it on OpenShift and use the exposed endpoint to interact with the LocalStack container. +We will demonstrate how you can create local AWS resources using LocalStack using the AWS CLI. +Instead of running LocalStack locally, you will deploy it on OpenShift and use the exposed endpoint to interact with the LocalStack container. ### Setting up LocalStack on OpenShift -You can deploy LocalStack via the **Developer** perspective in the OpenShift Web Console. Navigate to the **+Add** view to deploy LocalStack using a Devfile. +You can deploy LocalStack via the **Developer** perspective in the OpenShift Web Console. +Navigate to the **+Add** view to deploy LocalStack using a Devfile. OpenShift Developer perspective

-To deploy LocalStack on OpenShift, click on **Import from Git** in the **Git Repository** tile. In the Git section, enter the following Git repository URL to import the Devfile and Helm charts which contains the configuration for LocalStack: [**https://github.com/localstack/localstack-dev-spaces**](https://github.com/localstack/localstack-dev-spaces). +To deploy LocalStack on OpenShift, click on **Import from Git** in the **Git Repository** tile. +In the Git section, enter the following Git repository URL to import the Devfile and Helm charts which contains the configuration for LocalStack: [**https://github.com/localstack/localstack-dev-spaces**](https://github.com/localstack/localstack-dev-spaces). -OpenShift Web Console will automatically detect the Devfile and display the import strategy. A unique application name will be generated to the application grouping to label your resources. A unique name will also be provided to the component that will be used to name associated resources. You can edit these values if you want. +OpenShift Web Console will automatically detect the Devfile and display the import strategy. +A unique application name will be generated to the application grouping to label your resources. +A unique name will also be provided to the component that will be used to name associated resources. +You can edit these values if you want. Click on **Create** to deploy LocalStack on OpenShift. @@ -45,7 +58,9 @@ You can see the build status of the LocalStack deployment in the **Topology** vi OpenShift Topology view

-After successful deployment, you can see the **localstack-dev-spaces** pod in the **Topology** view. Click on the pod to view the details. You will be able to see the following details: +After successful deployment, you can see the **localstack-dev-spaces** pod in the **Topology** view. +Click on the pod to view the details. +You will be able to see the following details: - Running pods along with the status and logs. - Builds for your existing pods and an option to create new builds. @@ -57,7 +72,9 @@ After successful deployment, you can see the **localstack-dev-spaces** pod in th ### Creating AWS resources on OpenShift -Click on the **localstack-dev-spaces** pod to view the details. You will be able to see the exposed route for the LocalStack container. Copy the route URL and use it to interact with the LocalStack container. +Click on the **localstack-dev-spaces** pod to view the details. +You will be able to see the exposed route for the LocalStack container. +Copy the route URL and use it to interact with the LocalStack container. To create an S3 bucket and an SQS queue, run the following commands: @@ -67,14 +84,19 @@ $ awslocal s3 mb s3://my-bucket $ awslocal sqs create-queue --queue-name my-queue {{< /command >}} -In the above commands, replace `` with the route URL of the LocalStack container. The `AWS_ENDPOINT_URL` environment variable is used to specify the endpoint URL of the LocalStack container. +In the above commands, replace `` with the route URL of the LocalStack container. +The `AWS_ENDPOINT_URL` environment variable is used to specify the endpoint URL of the LocalStack container. {{< callout >}} -By default, the endpoint URL for `awslocal` is `http://localhost:4566`. Since we are running LocalStack on OpenShift, we need to specify the route URL of the LocalStack container. You can swap `awslocal` with the AWS CLI, by specifying the additional `--endpoint-url` parameter. +By default, the endpoint URL for `awslocal` is `http://localhost:4566`. +Since we are running LocalStack on OpenShift, we need to specify the route URL of the LocalStack container. +You can swap `awslocal` with the AWS CLI, by specifying the additional `--endpoint-url` parameter. {{< /callout >}} -You can further use integrations, such as [CDK](https://docs.localstack.cloud/user-guide/integrations/aws-cdk/), [SAM CLI](https://docs.localstack.cloud/user-guide/integrations/aws-sam/), and [Terraform](https://docs.localstack.cloud/user-guide/integrations/terraform/), to interact with the Ephemeral Instance. In these integrations, you can change the `AWS_ENDPOINT_URL` environment variable to the endpoint URL of the Ephemeral Instance. +You can further use integrations, such as [CDK](https://docs.localstack.cloud/user-guide/integrations/aws-cdk/), [SAM CLI](https://docs.localstack.cloud/user-guide/integrations/aws-sam/), and [Terraform](https://docs.localstack.cloud/user-guide/integrations/terraform/), to interact with the Ephemeral Instance. +In these integrations, you can change the `AWS_ENDPOINT_URL` environment variable to the endpoint URL of the Ephemeral Instance. ### Deleting the LocalStack deployment -To delete the LocalStack deployment, click on the **localstack-dev-spaces** pod in the **Topology** view. Click on the **Actions** menu and select **Delete Deployment**. +To delete the LocalStack deployment, click on the **localstack-dev-spaces** pod in the **Topology** view. +Click on the **Actions** menu and select **Delete Deployment**. diff --git a/content/en/user-guide/integrations/pulumi/index.md b/content/en/user-guide/integrations/pulumi/index.md index 649f03fd9e..5fb2d22493 100644 --- a/content/en/user-guide/integrations/pulumi/index.md +++ b/content/en/user-guide/integrations/pulumi/index.md @@ -7,9 +7,11 @@ description: > ## Introduction -Pulumi's SDK for infrastructure-as-code allows you to create, deploy, and manage AWS containers, serverless functions, and other infrastructure using popular programming languages. It supports a range of cloud providers, including AWS, Azure, Google Cloud, and Kubernetes. +Pulumi's SDK for infrastructure-as-code allows you to create, deploy, and manage AWS containers, serverless functions, and other infrastructure using popular programming languages. +It supports a range of cloud providers, including AWS, Azure, Google Cloud, and Kubernetes. -LocalStack can integrate with Pulumi through the Pulumi configuration environment. There are two main methods to configure Pulumi for use with LocalStack: +LocalStack can integrate with Pulumi through the Pulumi configuration environment. +There are two main methods to configure Pulumi for use with LocalStack: - Using the `pulumilocal` wrapper script which automatically configures service endpoints. - Manually setting up the service endpoints in your Pulumi configuration, which requires ongoing maintenance. @@ -18,9 +20,11 @@ This guide will show you how to set up local AWS resources using both the `pulum ## `pulumilocal` wrapper script -`pulumilocal` is a wrapper for the `pulumi` command line interface, facilitating the use of Pulumi with LocalStack. When executing deployment commands like `pulumilocal ["up", "destroy", "preview", "cancel"]`, the script configures the Pulumi settings for LocalStack and runs the specified Pulumi command. +`pulumilocal` is a wrapper for the `pulumi` command line interface, facilitating the use of Pulumi with LocalStack. +When executing deployment commands like `pulumilocal ["up", "destroy", "preview", "cancel"]`, the script configures the Pulumi settings for LocalStack and runs the specified Pulumi command. -The endpoints are set to point to the LocalStack API (`http://localhost:4566`). This setup simplifies the deployment of Pulumi stacks against LocalStack. +The endpoints are set to point to the LocalStack API (`http://localhost:4566`). +This setup simplifies the deployment of Pulumi stacks against LocalStack. ### Configure the Local Backend @@ -74,7 +78,8 @@ Create and select the `lsdev` stack with: $ pulumilocal stack select -c lsdev --cwd myproj {{< / command >}} -If you've just run the `new typescript` command, the stack is already selected. Deploy it with: +If you've just run the `new typescript` command, the stack is already selected. +Deploy it with: {{< command >}} $ pulumilocal up --cwd myproj @@ -91,7 +96,8 @@ $ pulumilocal up --cwd myproj ## Manual configuration -Alternatively, you can manually configure local service endpoints and credentials. The following section will provide detailed steps for this manual configuration, assuming you have [Pulumi](https://www.pulumi.com/docs/install/) installed. +Alternatively, you can manually configure local service endpoints and credentials. +The following section will provide detailed steps for this manual configuration, assuming you have [Pulumi](https://www.pulumi.com/docs/install/) installed. ### Create a new Pulumi stack @@ -141,7 +147,8 @@ $ tree -L 1 ### Configure the stack -Modify your stack configuration in `Pulumi.dev.yaml` to include endpoints for AWS services pointing to `http://localhost:4566`. However, these endpoints may change depending on the AWS plugin version you are using. +Modify your stack configuration in `Pulumi.dev.yaml` to include endpoints for AWS services pointing to `http://localhost:4566`. +However, these endpoints may change depending on the AWS plugin version you are using. ```yaml config: diff --git a/content/en/user-guide/integrations/quarkus/index.md b/content/en/user-guide/integrations/quarkus/index.md index 66af97b029..f92d867820 100644 --- a/content/en/user-guide/integrations/quarkus/index.md +++ b/content/en/user-guide/integrations/quarkus/index.md @@ -7,13 +7,16 @@ description: > ## Introduction -Quarkus is a Java framework optimized for cloud, serverless, and containerized environments. Quarkus leverages a Kubernetes Native Java stack tailored for GraalVM & OpenJDK HotSpot, which further builds on various Java libraries and standards. +Quarkus is a Java framework optimized for cloud, serverless, and containerized environments. +Quarkus leverages a Kubernetes Native Java stack tailored for GraalVM & OpenJDK HotSpot, which further builds on various Java libraries and standards. -Localstack is supported by Quarkus as a Dev service for Amazon Services. Quarkus Amazon Services automatically starts a LocalStack container in development mode and when running tests, and the extension client is configured automatically. +Localstack is supported by Quarkus as a Dev service for Amazon Services. +Quarkus Amazon Services automatically starts a LocalStack container in development mode and when running tests, and the extension client is configured automatically. ## Getting started -In this guide, we will demonstrate how you can create a service client for creating and managing Lambdas on LocalStack. The Lambda extension is based on [AWS Java SDK 2.x](https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/welcome.html). +In this guide, we will demonstrate how you can create a service client for creating and managing Lambdas on LocalStack. +The Lambda extension is based on [AWS Java SDK 2.x](https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/welcome.html). ### Prerequisites @@ -40,7 +43,9 @@ The above command generates a Maven project structure with imports for RESTEasy ### Configure Lambda Client -Both Lambda clients (sync and async) can be configured through the `application.properties` file, which should be located in the `src/main/resources` directory. Additionally, ensure that a suitable implementation of the sync client is added to the `classpath`. By default, the extension employs the URL connection HTTP client, so it's necessary to include a URL connection client dependency in the `pom.xml` file: +Both Lambda clients (sync and async) can be configured through the `application.properties` file, which should be located in the `src/main/resources` directory. +Additionally, ensure that a suitable implementation of the sync client is added to the `classpath`. +By default, the extension employs the URL connection HTTP client, so it's necessary to include a URL connection client dependency in the `pom.xml` file: ```xml @@ -99,9 +104,9 @@ $ ./mvnw clean package -Dnative. {{< callout >}} Dev Services for Amazon Services is automatically enabled for each extension added to the `pom.xml`, except in the following scenarios: -- When `quarkus.devservices.enabled` is set to false. -- When `devservices.enabled` is set to false per extension (e.g., `quarkus.s3.devservices.enabled=false`). -- When the `endpoint-override` is configured (e.g., `quarkus.s3.endpoint-override=http://localhost:4566`). +- When `quarkus.devservices.enabled` is set to false. +- When `devservices.enabled` is set to false per extension (e.g., `quarkus.s3.devservices.enabled=false`). +- When the `endpoint-override` is configured (e.g., `quarkus.s3.endpoint-override=http://localhost:4566`). {{< /callout >}} ## Supported extensions @@ -118,7 +123,8 @@ Dev Services for Amazon Services is automatically enabled for each extension add ## Configuration -The following configuration properties are fixed at build time. All the other configuration properties can be overridden at runtime. +The following configuration properties are fixed at build time. +All the other configuration properties can be overridden at runtime. | Property | Type | Default | |----------------------------------------------------------|------------------------|--------------------------------------| @@ -133,14 +139,26 @@ The following configuration properties are fixed at build time. All the other co | `quarkus.aws.devservices.localstack.additional-services."additional-services".container-properties` | `Map` | | {{< callout >}} -- If `quarkus.aws.devservices.localstack.additional-services."additional-services".enabled` is set to `true` and the endpoint-override is not configured, LocalStack will be started and utilized instead of the provided configuration. For all services excluding Cognito, LocalStack will function as the core cloud emulator. In the case of Cognito, the emulation/mocking will be done by Moto. -- The `quarkus.aws.devservices.localstack.additional-services."additional-services".shared` indicates whether the LocalStack container managed by Dev Services is shared. In shared mode, Quarkus utilizes label-based service discovery, specifically the `quarkus-dev-service-localstack` label, to identify running containers. If a matching container is found, it is used. Otherwise, Dev Services initiates a new container. It's important to note that sharing is not supported for the Cognito extension. -- In `quarkus.aws.devservices.localstack.additional-services."additional-services".service-name`, the value of the `quarkus-dev-service-localstack` label is attached to the initiated container. In dev mode, when the shared flag is true, Dev Services checks for a container with the `quarkus-dev-service-localstack` label set to the configured value before starting a new one. If found, it utilizes the existing container. Otherwise, it creates a new container with the `quarkus-dev-service-localstack` label set to the specified value. In test mode, Dev Services groups services with the same service-name value into a single container instance. This property is useful when there's a requirement for multiple shared LocalStack instances. +- If `quarkus.aws.devservices.localstack.additional-services."additional-services".enabled` is set to `true` and the endpoint-override is not configured, LocalStack will be started and utilized instead of the provided configuration. + For all services excluding Cognito, LocalStack will function as the core cloud emulator. + In the case of Cognito, the emulation/mocking will be done by Moto. +- The `quarkus.aws.devservices.localstack.additional-services."additional-services".shared` indicates whether the LocalStack container managed by Dev Services is shared. + In shared mode, Quarkus utilizes label-based service discovery, specifically the `quarkus-dev-service-localstack` label, to identify running containers. + If a matching container is found, it is used. + Otherwise, Dev Services initiates a new container. + It's important to note that sharing is not supported for the Cognito extension. +- In `quarkus.aws.devservices.localstack.additional-services."additional-services".service-name`, the value of the `quarkus-dev-service-localstack` label is attached to the initiated container. + In dev mode, when the shared flag is true, Dev Services checks for a container with the `quarkus-dev-service-localstack` label set to the configured value before starting a new one. + If found, it utilizes the existing container. + Otherwise, it creates a new container with the `quarkus-dev-service-localstack` label set to the specified value. + In test mode, Dev Services groups services with the same service-name value into a single container instance. + This property is useful when there's a requirement for multiple shared LocalStack instances. {{< /callout >}} ### Specific configuration -Dev Services can support specific configurations passed to the LocalStack container. These configurations can be globally applied to all containers or specified individually per service. +Dev Services can support specific configurations passed to the LocalStack container. +These configurations can be globally applied to all containers or specified individually per service. ```bash quarkus.aws.devservices.localstack.image-name=localstack/localstack:3.0.3 diff --git a/content/en/user-guide/integrations/sdks/_index.md b/content/en/user-guide/integrations/sdks/_index.md index ed2ce96158..b49ddd741f 100644 --- a/content/en/user-guide/integrations/sdks/_index.md +++ b/content/en/user-guide/integrations/sdks/_index.md @@ -17,7 +17,8 @@ This lets you develop and test your applications locally without connecting to t To connect to LocalStack services using AWS SDKs, you can use one of the following methods: -- **Manual configuration:** Manually configure the SDK to connect to LocalStack services by setting the endpoint URL to `http://localhost:4566` or `localhost.localstack.cloud:4566`. +- **Manual configuration:** Manually configure the SDK to connect to LocalStack services by setting the endpoint URL to `http://localhost:4566` or `localhost.localstack.cloud:4566`. This can also be specified using a [profile or an environment variable](https://docs.aws.amazon.com/sdkref/latest/guide/feature-ss-endpoints.html). -- **Transparent endpoint injection (recommended):** Connect to LocalStack services without modifying your application code. -Transparent endpoint injection uses the integrated DNS server to resolve AWS API calls to target LocalStack. Refer to the [Transparent Endpoint Injection]({{< ref "user-guide/tools/transparent-endpoint-injection" >}}) guide for more information. +- **Transparent endpoint injection (recommended):** Connect to LocalStack services without modifying your application code. +Transparent endpoint injection uses the integrated DNS server to resolve AWS API calls to target LocalStack. + Refer to the [Transparent Endpoint Injection]({{< ref "user-guide/tools/transparent-endpoint-injection" >}}) guide for more information. diff --git a/content/en/user-guide/integrations/sdks/dotnet/index.md b/content/en/user-guide/integrations/sdks/dotnet/index.md index 25e8815989..c1ce21b905 100644 --- a/content/en/user-guide/integrations/sdks/dotnet/index.md +++ b/content/en/user-guide/integrations/sdks/dotnet/index.md @@ -50,7 +50,8 @@ var s3client = new AmazonS3Client(config); ``` {{< callout >}} -In case of issues resolving this DNS record, we can fallback to in combination with the provider setting `ForcePathStyle = true`. The S3 service endpoint is slightly different from the other service endpoints, because AWS is deprecating path-style based access for hosting buckets. +In case of issues resolving this DNS record, we can fallback to in combination with the provider setting `ForcePathStyle = true`. +The S3 service endpoint is slightly different from the other service endpoints, because AWS is deprecating path-style based access for hosting buckets. {{< /callout >}} ```csharp @@ -65,17 +66,20 @@ var s3client = new AmazonS3Client(config); ## Alternative: Using LocalStack.NET -If you're working with .NET and LocalStack, you have a few options. In addition to the AWS SDK for .NET, there's an alternative client library, `LocalStack.NET`, which facilitates integration with LocalStack. +If you're working with .NET and LocalStack, you have a few options. +In addition to the AWS SDK for .NET, there's an alternative client library, `LocalStack.NET`, which facilitates integration with LocalStack. ### Overview -`LocalStack.NET` is a .NET client library developed to simplify the connection between .NET applications and LocalStack. It wraps around the AWS SDK for .NET and offers an alternative setup for creating LocalStack clients. +`LocalStack.NET` is a .NET client library developed to simplify the connection between .NET applications and LocalStack. +It wraps around the AWS SDK for .NET and offers an alternative setup for creating LocalStack clients. **LocalStack.NET Documentation:** Comprehensive guide and examples [here](https://github.com/localstack-dotnet/localstack-dotnet-client). ### How it Works -Instead of manually setting the endpoint configurations when initializing a client, `LocalStack.NET` offers methods that handle these details. The library aims to reduce the boilerplate required to set up LocalStack clients in .NET. +Instead of manually setting the endpoint configurations when initializing a client, `LocalStack.NET` offers methods that handle these details. +The library aims to reduce the boilerplate required to set up LocalStack clients in .NET. ### Example Usage @@ -117,9 +121,10 @@ var amazonS3Client = session.CreateClientByImplementation(); - **Adaptable Environment Transition:** Switching between LocalStack and actual AWS services can be achieved with minimal configuration changes when leveraging `LocalStack.NET`. - **Versatile .NET Compatibility:** Supports a broad spectrum of .NET versions, from .NET Framework 4.6.1 and .NET Standard 2.0, up to recent .NET iterations such as .NET 7.0. -### Considerations: +### Considerations -- Both the standard AWS SDK method and `LocalStack.NET` provide ways to integrate with LocalStack using .NET. The choice depends on developer preferences and specific project needs. +- Both the standard AWS SDK method and `LocalStack.NET` provide ways to integrate with LocalStack using .NET. + The choice depends on developer preferences and specific project needs. - `LocalStack.NET` works alongside the AWS SDK, using it as a base and providing a more focused API for LocalStack interactions. ## Resources diff --git a/content/en/user-guide/integrations/sdks/go/index.md b/content/en/user-guide/integrations/sdks/go/index.md index a7e97359d1..9dbf2063a9 100644 --- a/content/en/user-guide/integrations/sdks/go/index.md +++ b/content/en/user-guide/integrations/sdks/go/index.md @@ -80,8 +80,6 @@ func main() { }{{< /tab >}} {{< /tabpane >}} - - ## Resources * [localstack-aws-sdk-examples for Go](https://github.com/localstack/localstack-aws-sdk-examples/tree/main/go) diff --git a/content/en/user-guide/integrations/sdks/java/index.md b/content/en/user-guide/integrations/sdks/java/index.md index 6f52021df2..b9db2f89c8 100644 --- a/content/en/user-guide/integrations/sdks/java/index.md +++ b/content/en/user-guide/integrations/sdks/java/index.md @@ -9,10 +9,13 @@ aliases: ## Overview -The AWS SDK for Java provides a Java API for AWS services. Using the SDK, your Java application can interact -with LocalStack services the same way it does with Amazon services. Support for new services is regularly added to -the SDK. For a list of the supported services and their API versions that are -included with each release of the SDK, view the [release notes](https://github.com/aws/aws-sdk-java#release-notes) +The AWS SDK for Java provides a Java API for AWS services. +Using the SDK, your Java application can interact +with LocalStack services the same way it does with Amazon services. +Support for new services is regularly added to +the SDK. +For a list of the supported services and their API versions that are +included with each release of the SDK, view the [release notes](https://github.com/aws/aws-sdk-java#release-notes) for the version that you’re working with. The Java SDK currently supports two major versions: @@ -22,17 +25,17 @@ The Java SDK currently supports two major versions: ## Examples -Full examples for both SDK versions can be found in the -[example repository](https://github.com/localstack/localstack-aws-sdk-examples/tree/main/java). This includes proper +Full examples for both SDK versions can be found in the +[example repository](https://github.com/localstack/localstack-aws-sdk-examples/tree/main/java). +This includes proper exception handling and all the necessary Maven dependencies. The scripts to create the AWS services on LocalStack can be found under the `src/main/resources` folder -of each module in the repository. +of each module in the repository. ### S3 Service Below you'll find an example of how to create an S3 client with the endpoint configured for LocalStack. -The client can be used to upload a file to an existing bucket and then retrieve it. - +The client can be used to upload a file to an existing bucket and then retrieve it. #### Configuring the S3 Client @@ -44,7 +47,6 @@ The client can be used to upload a file to an existing bucket and then retrieve final String ACCESS_KEY = "test"; final String SECRET_KEY = "test"; - // S3 Client with configured credentials, endpoint directing to LocalStack and desired region. AmazonS3 s3Client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials)) @@ -138,17 +140,21 @@ ResponseInputStream response = s3Client.getObject(getObjectRe ### DynamoDB Service -Another interesting case is interacting with the DynamoDB service. Here we can see code snippets of -a DynamoDB client inserting an entity of type `Person` into a table with the same name. Once the object is in +Another interesting case is interacting with the DynamoDB service. +Here we can see code snippets of +a DynamoDB client inserting an entity of type `Person` into a table with the same name. +Once the object is in the database, we would like to retrieve it as well. -Just like the example before, the scripts to create the AWS services on LocalStack can be found under +Just like the example before, the scripts to create the AWS services on LocalStack can be found under the `src/main/resources` folder of each module in the repository. -Pay particular attention to the handling of the data model in the v2 example. As part of improvements, some -boilerplate code can be abstracted with the help of specific annotations, which help label the Java bean, the -partition key and even specify converters for certain data types. +Pay particular attention to the handling of the data model in the v2 example. +As part of improvements, some +boilerplate code can be abstracted with the help of specific annotations, which help label the Java bean, the +partition key and even specify converters for certain data types. Unfortunately, the enhanced mapping in v2 does not support Date type, but a handwritten converter is enough to -cater to the application's needs. The full list of supported converters can be found +cater to the application's needs. +The full list of supported converters can be found [here](https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/package-summary.html). #### Configuring the DynamoDB Client @@ -180,7 +186,6 @@ private static AmazonDynamoDB dynamoDBClient = AmazonDynamoDBClientBuilder.stand final String ACCESS_KEY = "test"; final String SECRET_KEY = "test"; - // Creating the AWS Credentials provider, using the above access and secret keys. AwsCredentialsProvider credentials = StaticCredentialsProvider.create( AwsBasicCredentials.create(ACCESS_KEY, SECRET_KEY)); @@ -204,7 +209,6 @@ DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() {{< /tab >}} {{< /tabpane >}} - #### Interacting with DynamoDB {{< tabpane lang="java" >}} @@ -282,4 +286,3 @@ Person person = table.getItem(Key.builder().partitionValue(personId).build()); * [Official repository of the AWS SDK for Java (v1)](https://github.com/aws/aws-sdk-java) * [Official repository of the AWS SDK for Java (v2)](https://github.com/aws/aws-sdk-java-v2) * [localstack-aws-sdk-examples for Java](https://github.com/localstack/localstack-aws-sdk-examples/tree/main/java) - diff --git a/content/en/user-guide/integrations/sdks/javascript/index.md b/content/en/user-guide/integrations/sdks/javascript/index.md index a35ea4381a..73d2cc8d12 100644 --- a/content/en/user-guide/integrations/sdks/javascript/index.md +++ b/content/en/user-guide/integrations/sdks/javascript/index.md @@ -47,7 +47,7 @@ lambda.listFunctions({}, (err, data) => { // You can read the S3 documentation to learn more about the different endpoints. const s3 = new AWS.S3({ endpoint: 'http://s3.localhost.localstack.cloud:4566', - s3ForcePathStyle: true, // If you want to use virtual host addressing of buckets, you can remove `s3ForcePathStyle: true`. + s3ForcePathStyle: true, // If you want to use virtual host addressing of buckets, you can remove `s3ForcePathStyle: true`. accessKeyId: 'test', secretAccessKey: 'test', region: 'us-east-1', @@ -87,18 +87,17 @@ lambda.send(new ListFunctionsCommand({})) .then((data) => console.log(data)) .catch((error) => console.error(error)); - // By default, @aws-sdk/client-s3 will using virtual host addressing: // -> http://.s3.localhost.localstack.cloud:4566/ // To allow those requests to be directed to LocalStack, you need to set a specific endpoint. // If this is not possible, you can set the special S3 configuration flag to use path -// addressing instead: +// addressing instead: // -> http://s3.localhost.localstack.cloud:4566// -// You can read the S3 documentation to learn more about the different endpoints. +// You can read the S3 documentation to learn more about the different endpoints. const s3 = new S3Client({ region: 'us-east-1', - forcePathStyle: true, // If you want to use virtual host addressing of buckets, you can remove `forcePathStyle: true`. + forcePathStyle: true, // If you want to use virtual host addressing of buckets, you can remove `forcePathStyle: true`. endpoint: 'http://s3.localhost.localstack.cloud:4566', credentials: { accessKeyId: 'test', @@ -111,15 +110,15 @@ s3.send(new ListBucketsCommand({})) .then((data) => console.log(data)) .catch((error) => console.error(error)); - {{< /tab >}} {{< /tabpane >}} {{< callout >}} -In case of issues resolving S3 DNS record, we can fallback to `http://localhost:4566` in combination with the provider setting `forcePathStyle: true` (see the specific way of setting this parameter for each SDK above). The S3 service endpoint is slightly different from the other service endpoints, because AWS is deprecating path-style based access for hosting buckets. See [S3 documentation]({{< ref "user-guide/aws/s3" >}}) about endpoints. +In case of issues resolving S3 DNS record, we can fallback to `http://localhost:4566` in combination with the provider setting `forcePathStyle: true` (see the specific way of setting this parameter for each SDK above). +The S3 service endpoint is slightly different from the other service endpoints, because AWS is deprecating path-style based access for hosting buckets. +See [S3 documentation]({{< ref "user-guide/aws/s3" >}}) about endpoints. {{< /callout >}} - ## Resources * [AWS SDK for JavaScript](https://aws.amazon.com/sdk-for-javascript/) diff --git a/content/en/user-guide/integrations/sdks/php/index.md b/content/en/user-guide/integrations/sdks/php/index.md index 2314b4da6a..fdd8375c35 100644 --- a/content/en/user-guide/integrations/sdks/php/index.md +++ b/content/en/user-guide/integrations/sdks/php/index.md @@ -32,7 +32,6 @@ $s3 = new Aws\S3\S3Client([ A full example can be found [in our samples repository](https://github.com/localstack/localstack-aws-sdk-examples/tree/main/php). - ## Resources * [localstack-aws-sdk-examples for PHP](https://github.com/localstack/localstack-aws-sdk-examples/tree/main/php) diff --git a/content/en/user-guide/integrations/sdks/python/index.md b/content/en/user-guide/integrations/sdks/python/index.md index e31231a32a..e47324da87 100644 --- a/content/en/user-guide/integrations/sdks/python/index.md +++ b/content/en/user-guide/integrations/sdks/python/index.md @@ -9,7 +9,8 @@ aliases: [Boto3](https://github.com/boto/boto3) is the Amazon Web Services (AWS) Software Development Kit (SDK) for Python, which allows Python developers to write software that makes use of AWS services. -You can easily create a `boto3` client that interacts with your LocalStack instance. The example below creates a `boto3` client that lists all available Lambda functions: +You can easily create a `boto3` client that interacts with your LocalStack instance. +The example below creates a `boto3` client that lists all available Lambda functions: ```python3 import boto3 @@ -37,11 +38,13 @@ client = boto3.client("lambda") ``` Alternatively, if you prefer to (or need to) set the endpoints directly, you can use the environment variable `AWS_ENDPOINT_URL`, which is available when executing user code (e.g., Lambda functions) in LocalStack: + ```python3 import os client = boto3.client("lambda", endpoint_url=os.getenv("AWS_ENDPOINT_URL")) ... ``` -### Further Material: +### Further Material + * [localstack-python-client](https://github.com/localstack/localstack-python-client): small Python library with additional utils for interacting with LocalStack diff --git a/content/en/user-guide/integrations/sdks/ruby/index.md b/content/en/user-guide/integrations/sdks/ruby/index.md index 2d14e23b7f..1a8d518bcf 100644 --- a/content/en/user-guide/integrations/sdks/ruby/index.md +++ b/content/en/user-guide/integrations/sdks/ruby/index.md @@ -74,13 +74,15 @@ run_demo if $PROGRAM_NAME == __FILE__ You can run the example by saving it to a file, for example `localstack.rb`, and then running it with: {{< command >}} -$ ruby ./localstack.rb +$ ruby ./localstack.rb Created bucket doc-example-bucket-b911f85f-4dd3-4668-a32e-3f69aa4e37dc. Your bucket's region is: us-east-2 {{< /command >}} {{< callout >}} -The endpoint we configure for the S3 and virtual host bucket is `http://s3.localhost.localstack.cloud`. In case of issues resolving the DNS record, we can fall back to `http://localhost:4566` in combination with the provider setting `force_path_style: true`. The S3 service endpoint differs slightly from the other service endpoints because AWS deprecates path-style-based access for hosting buckets. +The endpoint we configure for the S3 and virtual host bucket is `http://s3.localhost.localstack.cloud`. +In case of issues resolving the DNS record, we can fall back to `http://localhost:4566` in combination with the provider setting `force_path_style: true`. +The S3 service endpoint differs slightly from the other service endpoints because AWS deprecates path-style-based access for hosting buckets. {{< /callout >}} For alternative AWS services, you can use the following configuration: diff --git a/content/en/user-guide/integrations/serverless-framework/index.md b/content/en/user-guide/integrations/serverless-framework/index.md index 57183eb11c..6416b1f615 100644 --- a/content/en/user-guide/integrations/serverless-framework/index.md +++ b/content/en/user-guide/integrations/serverless-framework/index.md @@ -10,7 +10,7 @@ description: > ## Overview This guide explains how to integrate LocalStack with the [Serverless Framework](https://www.serverless.com/). -Although it probably requires a few code changes, integrating LocalStack with the Serverless Framework is fairly straightforward. +Although it probably requires a few code changes, integrating LocalStack with the Serverless Framework is fairly straightforward. In particular, the setup consists of the following two steps. @@ -24,8 +24,8 @@ This guide assumes that you have the following tools installed. * LocalStack ([Install](https://docs.localstack.cloud/get-started/#installation)) * Serverless ([Install](https://www.serverless.com/framework/docs/getting-started/)) -It also assumes that you already have a Serverless app set up consisting of a couple of Lambda functions and a `serverless.yml` file similar to the following. An example Serverless app integrated with LocalStack can be found here: Simple REST API using the Serverless Framework and LocalStack - +It also assumes that you already have a Serverless app set up consisting of a couple of Lambda functions and a `serverless.yml` file similar to the following. +An example Serverless app integrated with LocalStack can be found here: Simple REST API using the Serverless Framework and LocalStack ```yaml service: my-service @@ -66,12 +66,14 @@ resources: ``` ## Install and configure Serverless-LocalStack Plugin + To install the plugin, execute the following command in the root of your project. {{< command >}} $ npm install -D serverless-localstack {{< / command >}} Next, set up the plugin by adding the following properties to `serverless.yml`. + ```yaml ... @@ -84,30 +86,35 @@ custom: - local ``` -This sets up Serverless to use the LocalStack plugin but only for the stage "local". +This sets up Serverless to use the LocalStack plugin but only for the stage "local". Next, you need make minor adjustments to your function code in order to make your application work no matter if it is deployed on AWS or LocalStack. ## Adjust AWS endpoints in Lambda functions + You are likely using an AWS SDK (such as [Boto3](https://github.com/boto/boto3) for Python) in your Lambda functions to interact with other AWS services such as DynamoDB. For example, in Python, your code to set up a connection to DynamoDB may look like this: + ```python ... dynamodb = boto3.resource('dynamodb') ... ``` -By default, this call attempts to create a connection via the usual AWS endpoints. However, when running services in LocalStack, we need to make sure, our applications creates a connection via the LocalStack endpoint instead. +By default, this call attempts to create a connection via the usual AWS endpoints. +However, when running services in LocalStack, we need to make sure, our applications creates a connection via the LocalStack endpoint instead. -Usually, all of LocalStack's services are available via a specific port on localhost (e.g. `localhost:4566`). However, this endpoint only works when accessing LocalStack from outside its Docker runtime. +Usually, all of LocalStack's services are available via a specific port on localhost (e.g. `localhost:4566`). +However, this endpoint only works when accessing LocalStack from outside its Docker runtime. -Since the Lambda functions execute within the LocalStack Docker container, Lambda functions cannot access other services via the usual localhost endpoint. +Since the Lambda functions execute within the LocalStack Docker container, Lambda functions cannot access other services via the usual localhost endpoint. Instead, LocalStack provides a special environment variable `AWS_ENDPOINT_URL` which contains the internal endpoint of the LocalStack services from within its runtime environment. Hence, you need to configure the Lambda functions to use the `AWS_ENDPOINT_URL` endpoint when accessing other AWS services in LocalStack. -In Python, this may look something like. The code detects if it is running in LocalStack by checking if the `AWS_ENDPOINT_URL` variable exists and then configures the endpoint URL accordingly. +In Python, this may look something like. +The code detects if it is running in LocalStack by checking if the `AWS_ENDPOINT_URL` variable exists and then configures the endpoint URL accordingly. ```python ... @@ -121,6 +128,7 @@ else: In LocalStack Pro, no code changes are required using our [Transparent Endpoint Injection]({{< ref "user-guide/tools/transparent-endpoint-injection" >}}). ## Deploying to LocalStack + You can now deploy your Serverless service to LocalStack. First, start LocalStack by running @@ -167,8 +175,7 @@ layers: None ``` -Use the displayed endpoint `http://localhost:4566/restapis/XXXXXXXXXX/local/_user_request_/my/custom/endpoint` to make requests to the deployed service. - +Use the displayed endpoint `http://localhost:4566/restapis/XXXXXXXXXX/local/_user_request_/my/custom/endpoint` to make requests to the deployed service. ## Advanced topics diff --git a/content/en/user-guide/integrations/spring-cloud-function/index.md b/content/en/user-guide/integrations/spring-cloud-function/index.md index 7cf5b5b21f..3dea5975f7 100644 --- a/content/en/user-guide/integrations/spring-cloud-function/index.md +++ b/content/en/user-guide/integrations/spring-cloud-function/index.md @@ -25,21 +25,21 @@ The primary language for the application is Kotlin powered by [Gradle](https://gradle.org) build tool, but the described concepts would work for any other JVM setup. -* [Limitations](#limitations) +* [Current Limitations](#current-limitations) * [Setting up an Application](#setting-up-an-application) - * [Starting a new Project](#starting-a-new-project) - * [Project Settings](#project-settings) - * [Configure Log4J2 for AWS Lambda](#configure-log4j2-for-aws-lambda) - * [Configure Spring Cloud Function for Rest API](#configure-spring-cloud-function-for-rest-api) - * [Define an Application class](#define-an-application-class) - * [Configure Jackson](#configure-jackson) - * [Define Logging Utility](#define-logging-utility) - * [Add Request/Response utilities](#add-requestresponse-utilities) - * [Creating a sample Model / DTO](#creating-a-sample-model--dto) - * [Creating Rest API endpoints](#creating-rest-api-endpoints) - * [Cold Start and Warmup (PRO)](#cold-start-and-warmup-pro) - * [Creating other lambda Handlers](#creating-other-lambda-handlers) -* [Setting up Deployment](#settings-up-deployment) + * [Starting a new Project](#starting-a-new-project) + * [Project Settings](#project-settings) + * [Configure Log4J2 for AWS Lambda](#configure-log4j2-for-aws-lambda) + * [Configure Spring Cloud Function for Rest API](#configure-spring-cloud-function-for-rest-api) + * [Define an Application class](#define-an-application-class) + * [Configure Jackson](#configure-jackson) + * [Define Logging Utility](#define-logging-utility) + * [Add Request/Response utilities](#add-requestresponse-utilities) + * [Creating a sample Model / DTO](#creating-a-sample-model--dto) + * [Creating Rest API endpoints](#creating-rest-api-endpoints) + * [Cold Start and Warmup (PRO)](#cold-start-and-warmup-pro) + * [Creating other lambda Handlers](#creating-other-lambda-handlers) +* [Setting up Deployment](#setting-up-deployment) * [Testing, Debugging and Hot Reloading](#testing-debugging-and-hot-reloading) * [Useful links](#useful-links) @@ -243,6 +243,7 @@ spring.cloud.function.scan.packages=org.localstack.sampleproject.api Once configured, you can use `FunctionInvoker` as a handler for your Rest API lambda function. It will automatically pick up the configuration we have just set. + ```java org.springframework.cloud.function.adapter.aws.FunctionInvoker::handleRequest ``` @@ -428,13 +429,11 @@ import org.localstack.sampleproject.util.buildJsonResponse import org.springframework.context.annotation.Bean import org.springframework.stereotype.Component - private val SAMPLE_RESPONSE = mutableListOf( SampleModel(id = 1, name = "Sample #1"), SampleModel(id = 2, name = "Sample #2"), ) - @Component class SampleApi(private val objectMapper: ObjectMapper) { @@ -480,7 +479,6 @@ import org.localstack.sampleproject.util.buildJsonResponse import org.springframework.context.annotation.Bean import org.springframework.stereotype.Component - @Component class ScheduleApi(private val objectMapper: ObjectMapper) { @@ -587,8 +585,8 @@ package: artifact: build/libs/localstack-sampleproject-all.jar plugins: - - serverless-localstack - - serverless-deployment-bucket +* serverless-localstack +* serverless-deployment-bucket custom: localstack: diff --git a/content/en/user-guide/integrations/terraform/index.md b/content/en/user-guide/integrations/terraform/index.md index 76630894ca..dc047a45b2 100644 --- a/content/en/user-guide/integrations/terraform/index.md +++ b/content/en/user-guide/integrations/terraform/index.md @@ -9,22 +9,29 @@ aliases: ## Introduction -[Terraform](https://terraform.io/) is an Infrastructure-as-Code (IaC) framework developed by HashiCorp. It enables users to define and provision infrastructure using a high-level configuration language. Terraform uses HashiCorp Configuration Language (HCL) as its configuration syntax. HCL is a domain-specific language designed for writing configurations that define infrastructure elements and their relationships. +[Terraform](https://terraform.io/) is an Infrastructure-as-Code (IaC) framework developed by HashiCorp. +It enables users to define and provision infrastructure using a high-level configuration language. +Terraform uses HashiCorp Configuration Language (HCL) as its configuration syntax. +HCL is a domain-specific language designed for writing configurations that define infrastructure elements and their relationships. -LocalStack supports Terraform via the [AWS provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) through [custom service endpoints](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/guides/custom-service-endpoints#localstack). You can configure Terraform to use LocalStack in two ways: +LocalStack supports Terraform via the [AWS provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) through [custom service endpoints](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/guides/custom-service-endpoints#localstack). +You can configure Terraform to use LocalStack in two ways: -- Using the [`tflocal` wrapper script](https://github.com/localstack/terraform-local) to automatically configure the service endpoints for you. -- Manually configuring the service endpoints in your Terraform configuration with additional maintenance. +- Using the [`tflocal` wrapper script](https://github.com/localstack/terraform-local) to automatically configure the service endpoints for you. +- Manually configuring the service endpoints in your Terraform configuration with additional maintenance. In this guide, we will demonstrate how you can create local AWS resources using Terraform and LocalStack, by using the `tflocal` wrapper script and a manual configuration example. ## `tflocal` wrapper script -`tflocal` is a small wrapper script to run Terraform against LocalStack. `tflocal` script uses the [Terraform Override mechanism](https://www.terraform.io/language/files/override) and creates a temporary file `localstack_providers_override.tf` to configure the endpoints for the AWS `provider` section. The endpoints for all services are configured to point to the LocalStack API (`http://localhost:4566` by default). It allows you to easily deploy your unmodified Terraform scripts against LocalStack. +`tflocal` is a small wrapper script to run Terraform against LocalStack. `tflocal` script uses the [Terraform Override mechanism](https://www.terraform.io/language/files/override) and creates a temporary file `localstack_providers_override.tf` to configure the endpoints for the AWS `provider` section. +The endpoints for all services are configured to point to the LocalStack API (`http://localhost:4566` by default). +It allows you to easily deploy your unmodified Terraform scripts against LocalStack. ### Create a Terraform configuration -Create a new file named `main.tf` and add a minimal S3 bucket configuration to it. The following contents should be added in the `main.tf` file: +Create a new file named `main.tf` and add a minimal S3 bucket configuration to it. +The following contents should be added in the `main.tf` file: ```hcl resource "aws_s3_bucket" "test-bucket" { @@ -52,7 +59,8 @@ Usage: terraform [global options] [args] ### Deploy the Terraform configuration -Start your LocalStack container using your preferred method. Initialize Terraform using the following command: +Start your LocalStack container using your preferred method. +Initialize Terraform using the following command: {{< command >}} $ tflocal init @@ -81,21 +89,23 @@ $ tflocal apply {{< callout >}} While using `CUSTOMIZE_ACCESS_KEY`, following cases are taking precedence over each other from top to bottom: -1. If the `AWS_ACCESS_KEY_ID` environment variable is set. -2. If `access_key` is configured in the Terraform AWS provider. -3. If the `AWS_PROFILE` environment variable is set and properly configured. -4. If the `AWS_DEFAULT_PROFILE` environment variable is set and configured. -5. If credentials for the `default` profile are configured. -6. If none of the above settings are present, it falls back to using the default `AWS_ACCESS_KEY_ID` mock value. +1. If the `AWS_ACCESS_KEY_ID` environment variable is set. +2. If `access_key` is configured in the Terraform AWS provider. +3. If the `AWS_PROFILE` environment variable is set and properly configured. +4. If the `AWS_DEFAULT_PROFILE` environment variable is set and configured. +5. If credentials for the `default` profile are configured. +6. If none of the above settings are present, it falls back to using the default `AWS_ACCESS_KEY_ID` mock value. {{< /callout >}} ## Manual Configuration -Instead of using the `tflocal` script, you have the option to manually configure the local service endpoints and credentials. The following sections will provide detailed steps for this manual configuration. +Instead of using the `tflocal` script, you have the option to manually configure the local service endpoints and credentials. +The following sections will provide detailed steps for this manual configuration. ### General Configuration -To begin, you need to define mock credentials for the AWS provider. Specify the following in your `main.tf` file: +To begin, you need to define mock credentials for the AWS provider. +Specify the following in your `main.tf` file: ```hcl provider "aws" { @@ -129,7 +139,8 @@ provider "aws" { ### Services -Furthermore, it's necessary to configure the individual services to use LocalStack. For S3, this configuration resembles the following snippet, where we've chosen to use the virtual hosted-style endpoint: +Furthermore, it's necessary to configure the individual services to use LocalStack. +For S3, this configuration resembles the following snippet, where we've chosen to use the virtual hosted-style endpoint: ```hcl endpoints { @@ -138,7 +149,8 @@ Furthermore, it's necessary to configure the individual services to use LocalSta ``` {{< callout >}} -If there are any difficulties resolving this DNS record, you can utilize `http://localhost:4566` as a fallback option in combination with setting `s3_use_path_style = true` in the provider. It's worth noting that the S3 service endpoint differs slightly from the other service endpoints due to AWS deprecating path-style based access for hosting buckets. +If there are any difficulties resolving this DNS record, you can utilize `http://localhost:4566` as a fallback option in combination with setting `s3_use_path_style = true` in the provider. +It's worth noting that the S3 service endpoint differs slightly from the other service endpoints due to AWS deprecating path-style based access for hosting buckets. {{< /callout >}} ### Final Configuration @@ -169,7 +181,9 @@ resource "aws_s3_bucket" "test-bucket" { ### Endpoint Configuration -Here's a configuration example with additional service endpoints. Please note that these provider configurations may not be necessary if you use the `tflocal` script (as described above). You can save the following configuration in a file named `provider.tf` and include it in your Terraform configuration. +Here's a configuration example with additional service endpoints. +Please note that these provider configurations may not be necessary if you use the `tflocal` script (as described above). +You can save the following configuration in a file named `provider.tf` and include it in your Terraform configuration. ```hcl provider "aws" { @@ -219,22 +233,26 @@ output "is_localstack" { } ``` -It will detect whether the AWS account ID is `000000000000`, which is the default value for LocalStack. If you use a different account ID within LocalStack, you can customize the snippet accordingly. +It will detect whether the AWS account ID is `000000000000`, which is the default value for LocalStack. +If you use a different account ID within LocalStack, you can customize the snippet accordingly. {{< /callout >}} ## CDK for Terraform -Cloud Development Kit for Terraform (CDKTF) allows you to use general-purpose programming languages, such as TypeScript, Python, Java, and more, to create infrastructure declaratively. It allows you to create, update, and delete AWS infrastructure by leveraging a Terraform backend without manually configuring Terraform using HCL and [AWS Cloud Development Kit](https://aws.amazon.com/cdk/) to translate your code into infrastructure configuration files for Terraform. CDKTF supports every Terraform provider and module available on the [Terraform Registry](https://registry.terraform.io/). +Cloud Development Kit for Terraform (CDKTF) allows you to use general-purpose programming languages, such as TypeScript, Python, Java, and more, to create infrastructure declaratively. +It allows you to create, update, and delete AWS infrastructure by leveraging a Terraform backend without manually configuring Terraform using HCL and [AWS Cloud Development Kit](https://aws.amazon.com/cdk/) to translate your code into infrastructure configuration files for Terraform. +CDKTF supports every Terraform provider and module available on the [Terraform Registry](https://registry.terraform.io/). ### Configuration -To configure your existing CDKTF configuration to work with LocalStack, manually configure the local service endpoints and credentials. It includes: +To configure your existing CDKTF configuration to work with LocalStack, manually configure the local service endpoints and credentials. +It includes: - General configuration to specify mock credentials for the AWS provider (`region`, `access_key`, `secret_key`). - Request Management to avoid issues with routing and authentication, if needed. - Service configuration to point the individual services to LocalStack. -Here is a configuration example to use with Python & TypeScript CDKTF configurations: +Here is a configuration example to use with Python & TypeScript CDKTF configurations: {{< tabpane >}} {{< tab header="localstack_config.py" lang="py" >}} @@ -267,7 +285,7 @@ AWS_CONFIG = { "sts": "http://localhost:4566", } ], -} +} {{< /tab >}} {{< tab header="localstack-config.ts" lang="ts" >}} export const AWS_CONFIG = { @@ -299,7 +317,7 @@ export const AWS_CONFIG = { sts: "http://localhost:4566", }, ], -}; +}; {{< /tab >}} {{< /tabpane >}} @@ -324,19 +342,21 @@ new AwsProvider(this, "aws", AWS_CONFIG); ### Getting started -To get started with CDKTF on LocalStack, we will set up a simple stack to create some AWS resources. We will then deploy the stack to LocalStack, and verify that the resources have been created successfully. Before we start, make sure you have the following prerequisites: +To get started with CDKTF on LocalStack, we will set up a simple stack to create some AWS resources. +We will then deploy the stack to LocalStack, and verify that the resources have been created successfully. +Before we start, make sure you have the following prerequisites: -* LocalStack -* [`cdktf`](https://www.npmjs.com/package/cdktf) +- LocalStack +- [`cdktf`](https://www.npmjs.com/package/cdktf) For Python: -* [`python`](https://www.python.org/downloads/) -* [`pipenv`](https://pipenv.pypa.io/en/latest/installation.html#installing-pipenv) +- [`python`](https://www.python.org/downloads/) +- [`pipenv`](https://pipenv.pypa.io/en/latest/installation.html#installing-pipenv) For TypeScript: -* [`tsc`](https://www.npmjs.com/package/typescript) +- [`tsc`](https://www.npmjs.com/package/typescript) Create a new directory named `cdktf-localstack` and initialize a new CDKTF project using the following command: @@ -388,16 +408,16 @@ Add the following code to import the AWS provider and create a new S3 bucket in {{< tabpane >}} {{< tab header="main.py" lang="py" >}} -#!/usr/bin/env python +# !/usr/bin/env python + from constructs import Construct from cdktf import App, TerraformStack from cdktf_cdktf_provider_aws.provider import AwsProvider from cdktf_cdktf_provider_aws.s3_bucket import S3Bucket - class MyStack(TerraformStack): - def __init__(self, scope: Construct, id: str): - super().__init__(scope, id) + def **init**(self, scope: Construct, id: str): + super().**init**(scope, id) AwsProvider(self, "aws", region="us-east-1", diff --git a/content/en/user-guide/integrations/testcontainers/index.md b/content/en/user-guide/integrations/testcontainers/index.md index bbd14ec9ef..3c9fd935fc 100644 --- a/content/en/user-guide/integrations/testcontainers/index.md +++ b/content/en/user-guide/integrations/testcontainers/index.md @@ -141,7 +141,8 @@ const s3 = S3Client(awsConfig); ## Special Setup for using RDS -Some services like RDS require additional setup so that the correct port is exposed and accessible for the tests. The reserved ports on LocalStack are between `4510-4559`, depending on your use case you might need to expose several ports using `witExposedPorts`. +Some services like RDS require additional setup so that the correct port is exposed and accessible for the tests. +The reserved ports on LocalStack are between `4510-4559`, depending on your use case you might need to expose several ports using `witExposedPorts`. Check the [pro-sample on how to use RDS with Testcontainers for Java](https://github.com/localstack/localstack-pro-samples/tree/master/testcontainers-java-sample). diff --git a/content/en/user-guide/lambda-tools/_index.md b/content/en/user-guide/lambda-tools/_index.md index 7b14d5482b..0aed8bd439 100755 --- a/content/en/user-guide/lambda-tools/_index.md +++ b/content/en/user-guide/lambda-tools/_index.md @@ -9,7 +9,9 @@ aliases: - /user-guide/tools/lambda-tools/ --- -Lambda Tools by LocalStack offers a suite of utilities to streamline the development of Lambda functions on your local machine. LocalStack's Lambda emulation enables you to develop, deploy, and test your functions in a local environment, removing the need for deployment on AWS, while integrating with other AWS services. These tools aim to enhance the developer experience by providing quicker feedback cycles. +Lambda Tools by LocalStack offers a suite of utilities to streamline the development of Lambda functions on your local machine. +LocalStack's Lambda emulation enables you to develop, deploy, and test your functions in a local environment, removing the need for deployment on AWS, while integrating with other AWS services. +These tools aim to enhance the developer experience by providing quicker feedback cycles. With Lambda Tools, you can: diff --git a/content/en/user-guide/lambda-tools/debugging/index.md b/content/en/user-guide/lambda-tools/debugging/index.md index 3f98d3f2d9..be3000dd5a 100644 --- a/content/en/user-guide/lambda-tools/debugging/index.md +++ b/content/en/user-guide/lambda-tools/debugging/index.md @@ -20,11 +20,12 @@ More examples and tooling support for local Lambda debugging (including support * [Debugging Python lambdas](#debugging-python-lambdas) * [Debugging JVM lambdas](#debugging-jvm-lambdas) * [Debugging Node.js lambdas](#debugging-nodejs-lambdas) -* [Useful Links](#useful-links) +* [Resources](#resources) ## Debugging Python lambdas -Lambda functions debugging used to be a difficult task. LocalStack changes that +Lambda functions debugging used to be a difficult task. +LocalStack changes that with the same local code mounting functionality that also helps you to [iterate quickly over your function code]({{< ref "user-guide/lambda-tools" >}}). @@ -32,7 +33,6 @@ For a simple working example of this feature, you can refer to [our samples](https://github.com/localstack/localstack-pro-samples/tree/master/lambda-mounting-and-debugging). There, the necessary code fragments for enabling debugging are already present. - ### Debugging a Python Lambda in Visual Studio Code #### Configure LocalStack for VS Code remote Python debugging @@ -46,7 +46,8 @@ $ LAMBDA_DOCKER_FLAGS='-p 19891:19891' localstack start #### Preparing your code For providing the debug server, we use [`debugpy`](https://github.com/microsoft/debugpy) -inside the Lambda function code. In general, all you need is the following code +inside the Lambda function code. +In general, all you need is the following code fragment placed inside your handler code: ```python @@ -105,7 +106,8 @@ For attaching the debug server from Visual Studio Code, you need to add a run co } ``` -In the next step we create our function. In order to debug the function in Visual Studio Code, run the preconfigured remote debugger, which will wait about 15 seconds as defined above, and then invoke the function. +In the next step we create our function. +In order to debug the function in Visual Studio Code, run the preconfigured remote debugger, which will wait about 15 seconds as defined above, and then invoke the function. Make sure to set a breakpoint in the Lambda handler code first, which can then later be inspected. The screenshot below shows the triggered breakpoint with our `'Hello from LocalStack!'` in the variable inspection view: @@ -114,11 +116,12 @@ The screenshot below shows the triggered breakpoint with our `'Hello from LocalS #### Current Limitations -Due to the ports published by the lambda container for the debugger, you can currently only debug one Lambda at a time. Due to the port publishing, multiple concurrently running lambda environments are not supported. +Due to the ports published by the lambda container for the debugger, you can currently only debug one Lambda at a time. +Due to the port publishing, multiple concurrently running lambda environments are not supported. ### Debugging a Python Lambda in PyCharm Professional -Please be aware that [remote debugging in PyCharm](https://www.jetbrains.com/help/pycharm/remote-debugging-with-product.html) is only available in the Professional version. +Please be aware that [remote debugging in PyCharm](https://www.jetbrains.com/help/pycharm/remote-debugging-with-product.html) is only available in the Professional version. You do not need to change the `LAMBDA_DOCKER_FLAGS` when debugging with PyCharm Professional. @@ -126,7 +129,7 @@ You do not need to change the `LAMBDA_DOCKER_FLAGS` when debugging with PyCharm You can [follow the steps in the official docs](https://www.jetbrains.com/help/pycharm/remote-debugging-with-product.html#remote-debug-config), which will come down to: -* Create a debug configuration with the IDE host name `localhost` and the debug port `19891`. +* Create a debug configuration with the IDE host name `localhost` and the debug port `19891`. * Add path mapping with your project files on the host and map it to the remote directory `/var/task`. * Copy the `pip install` command, and make sure to install the correct `pydevd-pycharm` version for your PyCharm IDE. @@ -134,7 +137,8 @@ You can [follow the steps in the official docs](https://www.jetbrains.com/help/p #### Preparing your code -PyCharm provides its own debugging package, called `pydevd-pycharm`. Essentially, you will add the following code to your lambda: +PyCharm provides its own debugging package, called `pydevd-pycharm`. +Essentially, you will add the following code to your lambda: ```python import pydevd_pycharm @@ -142,7 +146,7 @@ pydevd_pycharm.settrace('host.docker.internal', port=19891, stdoutToServer=True, stderrToServer=True) ``` -The `host.docker.internal` is a [special DNS name by Docker](https://docs.docker.com/desktop/networking/#use-cases-and-workarounds-for-all-platforms) and will make sure that the lambda running in the docker can connect to PyCharm running on your Localhost. +The `host.docker.internal` is a [special DNS name by Docker](https://docs.docker.com/desktop/networking/#use-cases-and-workarounds-for-all-platforms) and will make sure that the lambda running in the docker can connect to PyCharm running on your Localhost. You can use the `wait_for_debug_client` and add it to your lambda (please adapt the path to your `venv` directory if necessary): @@ -160,14 +164,16 @@ def wait_for_debug_client(): stderrToServer=True) ``` -In the next step we create our function. In order to debug the function in PyCharm set a breakpoint in your function, run the Remote Debug configuration and then invoke the function. +In the next step we create our function. +In order to debug the function in PyCharm set a breakpoint in your function, run the Remote Debug configuration and then invoke the function. ![PyCharm Professional debugging](pycharm_lambda_debugging.png) ### Creating the Lambda function To create the Lambda function, you just need to take care of two things: -1. Deploy the function via an S3 Bucket. You need to use the magic variable `hot-reload` as the bucket name. +1. Deploy the function via an S3 Bucket. + You need to use the magic variable `hot-reload` as the bucket name. 2. Set the S3 key to the path of the directory your lambda function resides in. The handler is then referenced by the filename of your lambda code and the function in that code that should be invoked. @@ -264,8 +270,8 @@ Compared to the previous setup the "Wait Remote Debugger Server" run configurati For the Lambda function you will have to adjust the environment variable to `"_JAVA_OPTIONS": "-Xshare:off -agentlib:jdwp=transport=dt_socket,server=n,address=172.17.0.1:5050,suspend=y,onuncaught=n"`. Notice the `address=172.17.0.1:5050`. -Here we tell the Lambda function to connect to port 5050 on 172.17.0.1. When using Docker desktop you might have to set this to `address=host.docker.internal:5050` instead. - +Here we tell the Lambda function to connect to port 5050 on 172.17.0.1. +When using Docker desktop you might have to set this to `address=host.docker.internal:5050` instead. ### Configuring Visual Studio Code for remote JVM debugging @@ -329,7 +335,6 @@ services: - LAMBDA_DOCKER_FLAGS=-e NODE_OPTIONS=--inspect-brk=0.0.0.0:9229 -p 9229:9229 ``` - ### Configuring Visual Studio Code for remote Node.js debugging Add a new task by creating/modifying the `.vscode/tasks.json` file: @@ -369,6 +374,7 @@ then add the following configuration: ``` A simple example of a Node.js lambda, `myindex.js` could look like this: + ```js exports.handler = async (event) => { console.log(event); @@ -412,7 +418,6 @@ $ awslocal lambda invoke --function-name func1 \ {{% /tab %}} {{< /tabpane >}} - ## Resources * [Lambda Code Mounting and Debugging (Python)](https://github.com/localstack/localstack-pro-samples/tree/master/lambda-mounting-and-debugging) diff --git a/content/en/user-guide/lambda-tools/hot-reloading/index.md b/content/en/user-guide/lambda-tools/hot-reloading/index.md index a0e6ad792d..5152ce4b0a 100644 --- a/content/en/user-guide/lambda-tools/hot-reloading/index.md +++ b/content/en/user-guide/lambda-tools/hot-reloading/index.md @@ -65,7 +65,8 @@ $ chmod +x bin/watchman.sh {{< / command >}} Now configure your build tool to unzip the FatJar to some folder, which will be -then mounted to LocalStack. We are using `Gradle` build tool to unpack the +then mounted to LocalStack. +We are using `Gradle` build tool to unpack the `FatJar` into the `build/hot` folder: ```gradle @@ -84,7 +85,8 @@ $ bin/watchman.sh src "./gradlew buildHot" {{< / command >}} Please note that you still need to configure your deployment tool to use -local code mounting. Read the [Deployment Configuration Examples](#deployment-configuration-examples) +local code mounting. +Read the [Deployment Configuration Examples](#deployment-configuration-examples) for more information. ### Hot reloading for Python Lambdas @@ -92,7 +94,8 @@ for more information. We will show you how you can do this with a simple example function, taken directly from the [AWS Lambda developer guide](https://github.com/awsdocs/aws-doc-sdk-examples/blob/main/python/example_code/lambda/lambda_handler_basic.py). -You can check out that code, or use your own lambda functions to follow along. To use the example just do: +You can check out that code, or use your own lambda functions to follow along. +To use the example just do: {{< command >}} $ cd /tmp @@ -103,7 +106,8 @@ $ git clone git@github.com:awsdocs/aws-doc-sdk-examples.git To create the Lambda function, you just need to take care of two things: -1. Deploy via an S3 Bucket. You need to use the magic variable `hot-reload` as the bucket. +1. Deploy via an S3 Bucket. + You need to use the magic variable `hot-reload` as the bucket. 2. Set the S3 key to the path of the directory your lambda function resides in. The handler is then referenced by the filename of your lambda code and the function in that code that needs to be invoked. @@ -177,7 +181,8 @@ Cool! #### Usage with Virtualenv For [virtualenv](https://virtualenv.pypa.io)-driven projects, all dependencies should be made -available to the Python interpreter at runtime. There are different ways to achieve that, including: +available to the Python interpreter at runtime. +There are different ways to achieve that, including: * expanding the Python module search path in your Lambda handler * creating a watchman script to copy the libraries @@ -225,7 +230,8 @@ watch: .PHONY: build-hot watch ``` -To run the example above, run `make watch`. The script is copying the project module `PROJECT_MODULE_NAME` +To run the example above, run `make watch`. +The script is copying the project module `PROJECT_MODULE_NAME` along with all dependencies into the `build/hot` folder, which is then mounted into LocalStack's Lambda container. @@ -255,7 +261,8 @@ Install the the [@types/aws-lambda](https://www.npmjs.com/package/@types/aws-lam $ npm install -D @types/aws-lambda esbuild {{< / command >}} -Create a new file named `index.ts`. Add the following code to the new file: +Create a new file named `index.ts`. +Add the following code to the new file: ```ts import { Context, APIGatewayProxyResult, APIGatewayEvent } from 'aws-lambda'; @@ -292,8 +299,10 @@ $ npm run build To create the Lambda function, you need to take care of two things: -* Deploy via an S3 Bucket. You need to use the magic variable `hot-reload` as the bucket. -* Set the S3 key to the path of the directory your lambda function resides in. The handler is then referenced by the filename of your lambda code and the function in that code that needs to be invoked. +* Deploy via an S3 Bucket. + You need to use the magic variable `hot-reload` as the bucket. +* Set the S3 key to the path of the directory your lambda function resides in. + The handler is then referenced by the filename of your lambda code and the function in that code that needs to be invoked. Create the Lambda Function using the `awslocal` CLI: @@ -347,7 +356,9 @@ The `output.txt` file contains the following: The Lambda function is now mounted as a file in the executing container, hence any change that we save on the file will be there in an instant. -Change the `Hello World!` message to `Hello LocalStack!` and run `npm run build`. Trigger the Lambda once again. You will see the following in the `output.txt` file: +Change the `Hello World!` message to `Hello LocalStack!` and run `npm run build`. +Trigger the Lambda once again. +You will see the following in the `output.txt` file: ```sh {"statusCode":200,"body":"{\"message\":\"Hello LocalStack!\"}"} @@ -355,7 +366,8 @@ Change the `Hello World!` message to `Hello LocalStack!` and run `npm run build` #### Webpack -In this example, you can use our public [Webpack example](https://github.com/localstack-samples/localstack-pro-samples/tree/master/lambda-hot-reloading/lambda-typescript-webpack) to create a simple Lambda function using TypeScript and Webpack. To use the example, run the following commands: +In this example, you can use our public [Webpack example](https://github.com/localstack-samples/localstack-pro-samples/tree/master/lambda-hot-reloading/lambda-typescript-webpack) to create a simple Lambda function using TypeScript and Webpack. +To use the example, run the following commands: {{< command >}} $ cd /tmp @@ -437,6 +449,7 @@ custom: mountCode: true # or if you need to enable code mounting only for specific stages + custom: stages: local: @@ -553,9 +566,12 @@ EOF } resource "aws_lambda_function" "exampleFunctionOne" { - s3_bucket = var.STAGE == "local" ? "hot-reload" : null - s3_key = var.STAGE == "local" ? var.LAMBDA_MOUNT_CWD : null - filename = var.STAGE == "local" ? null : var.JAR_PATH + s3_bucket = var.STAGE == "local" ? +"hot-reload" : null + s3_key = var.STAGE == "local" ? +var.LAMBDA_MOUNT_CWD : null + filename = var.STAGE == "local" ? +null : var.JAR_PATH function_name = "ExampleFunctionOne" role = aws_iam_role.lambda-execution-role.arn handler = "org.localstack.sampleproject.api.LambdaApi" diff --git a/content/en/user-guide/lambda-tools/vscode-extension/index.md b/content/en/user-guide/lambda-tools/vscode-extension/index.md index 8b728e3f04..a97f2882cd 100644 --- a/content/en/user-guide/lambda-tools/vscode-extension/index.md +++ b/content/en/user-guide/lambda-tools/vscode-extension/index.md @@ -19,7 +19,9 @@ aliases: ## Getting Started -You can use a [sample project](https://github.com/joe4dev/lambda-python) to get started with the extension. The sample project contains a simple Lambda function and a SAM template. Clone the repository and open the project in VSCode. +You can use a [sample project](https://github.com/joe4dev/lambda-python) to get started with the extension. +The sample project contains a simple Lambda function and a SAM template. +Clone the repository and open the project in VSCode. {{< command >}} $ git clone https://github.com/joe4dev/lambda-python.git @@ -29,16 +31,14 @@ $ code . Install the [LocalStack VSCode Extension](https://marketplace.visualstudio.com/items?itemName=localstack.localstack) as recommended by the project. -You can now open the Python handler function under `app/hello_world.py`. Click the CodeLens **Deploy Lambda function**, select the `template.yaml`, and choose a stack name such as `my-stack`. - +You can now open the Python handler function under `app/hello_world.py`. +Click the CodeLens **Deploy Lambda function**, select the `template.yaml`, and choose a stack name such as `my-stack`. Deploying Lambda function via the VS Code Extension

- Click the CodeLens **Invoke Lambda function** and pick the stack name `my-stack` and the function `hello-world-function`. - Invoking Lambda function via the VS Code Extension
diff --git a/content/en/user-guide/security-testing/_index.md b/content/en/user-guide/security-testing/_index.md index 1d26d33996..ed1f6b1974 100644 --- a/content/en/user-guide/security-testing/_index.md +++ b/content/en/user-guide/security-testing/_index.md @@ -10,7 +10,8 @@ cascade: ## Introduction -Security Testing in LocalStack enables you to enforce your IAM permissions allowing you to test your security policies and create a more realistic environment that more closely resembles the real AWS. Security Testing in LocalStack encompasses the following features: +Security Testing in LocalStack enables you to enforce your IAM permissions allowing you to test your security policies and create a more realistic environment that more closely resembles the real AWS. +Security Testing in LocalStack encompasses the following features: - Enforce IAM policies & permissions in your setup to test your application security. - Retrieve IAM policy engine logs to gain visibility into the policy evaluation. diff --git a/content/en/user-guide/security-testing/explainable-iam/index.md b/content/en/user-guide/security-testing/explainable-iam/index.md index b7f915c77d..87016998f8 100644 --- a/content/en/user-guide/security-testing/explainable-iam/index.md +++ b/content/en/user-guide/security-testing/explainable-iam/index.md @@ -8,7 +8,8 @@ tags: ["Pro image"] ## Introduction -The IAM Policy Engine logs output related to failed policy evaluation directly to the LocalStack logs. You can enable `DEBUG=1` to gain visibility into these log messages, allowing you to identify the additional policies required for your request to succeed. +The IAM Policy Engine logs output related to failed policy evaluation directly to the LocalStack logs. +You can enable `DEBUG=1` to gain visibility into these log messages, allowing you to identify the additional policies required for your request to succeed. ## Getting started @@ -20,7 +21,8 @@ Start your LocalStack container with the `DEBUG=1` and `ENFORCE_IAM=1` environme $ DEBUG=1 ENFORCE_IAM=1 localstack start {{< /command >}} -In this guide, we will create a policy for creating Lambda functions by only allowing the `lambda:CreateFunction` permission. However we have not included the `iam:PassRole` permission, and we will use the Policy Engine's log to point out adding the necessary permission. +In this guide, we will create a policy for creating Lambda functions by only allowing the `lambda:CreateFunction` permission. +However we have not included the `iam:PassRole` permission, and we will use the Policy Engine's log to point out adding the necessary permission. ### Create a new user @@ -97,11 +99,14 @@ DEBUG:localstack_ext.services.iam.policy_engine.handler: 1 permissions have been DEBUG:localstack_ext.services.iam.policy_engine.handler: 1 permissions have been implicitly denied: ["Action 'iam:PassRole' for 'arn:aws:iam::000000000000:role/lambda-role'"] ``` -Upon examination, it becomes apparent that the action `iam:PassRole` is not allowed; rather, it is implicitly denied for your user concerning the resource `arn:aws:iam::000000000000:role/lambda-role`. This implies that there is no explicit deny statement in the relevant policies, but there is also no allow statement, resulting in the implicit denial of the action. You can incorporate this action into the policy. +Upon examination, it becomes apparent that the action `iam:PassRole` is not allowed; rather, it is implicitly denied for your user concerning the resource `arn:aws:iam::000000000000:role/lambda-role`. +This implies that there is no explicit deny statement in the relevant policies, but there is also no allow statement, resulting in the implicit denial of the action. +You can incorporate this action into the policy. ### Incorporate the action into the policy -For illustrative purposes, we will keep the example straightforward, using the same wildcard resource. Edit the `policy_1.json` file to include the `iam:PassRole` action: +For illustrative purposes, we will keep the example straightforward, using the same wildcard resource. +Edit the `policy_1.json` file to include the `iam:PassRole` action: ```json { @@ -117,10 +122,11 @@ For illustrative purposes, we will keep the example straightforward, using the s } ``` -Re-run the Lambda [`CreateFunction`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) API. You will notice that the request is now successful, and the function is created. +Re-run the Lambda [`CreateFunction`](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html) API. +You will notice that the request is now successful, and the function is created. ## Soft Mode -Enabling `IAM_SOFT_MODE=1` allows you to review the logs and assess whether your requests would have been denied or granted while executing your entire stack without disruptions. +Enabling `IAM_SOFT_MODE=1` allows you to review the logs and assess whether your requests would have been denied or granted while executing your entire stack without disruptions. Using this, you can avoid the need for redeployment to address each missing permission individually, streamlining the debugging process and enhancing the efficiency of your IAM configurations. diff --git a/content/en/user-guide/security-testing/iam-enforcement/index.md b/content/en/user-guide/security-testing/iam-enforcement/index.md index 3200e53ed4..f655b54c6b 100644 --- a/content/en/user-guide/security-testing/iam-enforcement/index.md +++ b/content/en/user-guide/security-testing/iam-enforcement/index.md @@ -8,7 +8,9 @@ tags: ["Pro image"] ## Introduction -IAM Policy Enforcement feature can be used to test your security policies and create a more realistic environment that more closely resembles real AWS. The environment configuration `ENFORCE_IAM=1` is required while starting LocalStack to enable this feature. Per default, IAM enforcement is disabled, and all APIs can be accessed without authentication. +IAM Policy Enforcement feature can be used to test your security policies and create a more realistic environment that more closely resembles real AWS. +The environment configuration `ENFORCE_IAM=1` is required while starting LocalStack to enable this feature. +Per default, IAM enforcement is disabled, and all APIs can be accessed without authentication. ## Getting started @@ -20,13 +22,15 @@ Start your LocalStack container with the `DEBUG=1` and `ENFORCE_IAM=1` environme $ DEBUG=1 ENFORCE_IAM=1 localstack start {{< /command >}} -We will demonstrate IAM Policy Enforcement, by creating a user and obtaining the access/secret keys. We will make an attempt to create a bucket using the user’s credentials, which inevitably fails due to insufficient permissions. +We will demonstrate IAM Policy Enforcement, by creating a user and obtaining the access/secret keys. +We will make an attempt to create a bucket using the user’s credentials, which inevitably fails due to insufficient permissions. Lastly, a policy is attached to the user, granting the necessary `s3:CreateBucket` permission, thereby enabling the successful creation of the bucket. ### Create a user -To follow this guide, open two separate terminal sessions: **Terminal 1** for the administrative IAM commands, which will utilize the default root IAM user, and **Terminal 2** for executing the commands under the test IAM user you are about to create. This way, we can demonstrate the differentiation in access permissions between the administrative and test users in real-time. +To follow this guide, open two separate terminal sessions: **Terminal 1** for the administrative IAM commands, which will utilize the default root IAM user, and **Terminal 2** for executing the commands under the test IAM user you are about to create. +This way, we can demonstrate the differentiation in access permissions between the administrative and test users in real-time. In **Terminal 1**, execute the following commands to create a `test` user and obtain the access/secret keys: @@ -59,7 +63,8 @@ $ awslocal iam create-access-key --user-name test ### Attempt to create a bucket -Navigate to **Terminal 2**, where we will configure the access keys for the user `test` in the environment. Once the access keys are set, you will attempt to create an S3 bucket using these credentials. +Navigate to **Terminal 2**, where we will configure the access keys for the user `test` in the environment. +Once the access keys are set, you will attempt to create an S3 bucket using these credentials. {{< command >}} $ export AWS_ACCESS_KEY_ID=LKIAQAAAAAAAHFR7QTN3 AWS_SECRET_ACCESS_KEY=EYUHpIol7bRJpKd/28c/LI2C4bbEnp82LJCRwXRV @@ -69,7 +74,8 @@ make_bucket failed: s3://mybucket An error occurred (AccessDeniedException) when
{{< / command >}} -As anticipated, the attempt to create the bucket fails with an `AccessDeniedException` error, confirming that user `test` lacks the necessary permissions for this action. You can view the LocalStack logs to validate the policy enforcement: +As anticipated, the attempt to create the bucket fails with an `AccessDeniedException` error, confirming that user `test` lacks the necessary permissions for this action. +You can view the LocalStack logs to validate the policy enforcement: ```bash 2023-11-03T12:21:10.971 INFO --- [ asgi_gw_1] l.s.i.p.handler : Request for service 's3' by principal 'arn:aws:iam::000000000000:user/test' for operation 'CreateBucket' denied. @@ -96,7 +102,8 @@ make_bucket: mybucket
{{< / command >}} -The bucket creation succeeds, confirming that the user `test` now has the necessary permissions to perform this action. You can view the LocalStack logs to validate the policy enforcement: +The bucket creation succeeds, confirming that the user `test` now has the necessary permissions to perform this action. +You can view the LocalStack logs to validate the policy enforcement: ```bash 2023-11-03T12:23:11.469 INFO --- [ asgi_gw_1] localstack.request.aws : AWS iam.CreatePolicy => 200 @@ -104,4 +111,5 @@ The bucket creation succeeds, confirming that the user `test` now has the necess 2023-11-03T12:23:22.795 INFO --- [ asgi_gw_2] localstack.request.aws : AWS s3.CreateBucket => 200 ``` -You can further use the IAM Policy Enforcement feature to test your Infrastructure as Code (IaC) deployments and ensure that your policies are correctly enforced. If the IAM policies are not correctly enforced, you will get an unsuccessful response from the API call, and the LocalStack logs will provide you with the necessary information to debug the issue. +You can further use the IAM Policy Enforcement feature to test your Infrastructure as Code (IaC) deployments and ensure that your policies are correctly enforced. +If the IAM policies are not correctly enforced, you will get an unsuccessful response from the API call, and the LocalStack logs will provide you with the necessary information to debug the issue. diff --git a/content/en/user-guide/security-testing/iam-policy-stream/index.md b/content/en/user-guide/security-testing/iam-policy-stream/index.md index a217427feb..44bfe492c2 100644 --- a/content/en/user-guide/security-testing/iam-policy-stream/index.md +++ b/content/en/user-guide/security-testing/iam-policy-stream/index.md @@ -8,19 +8,24 @@ tags: ["Pro image"] ## Introduction -The IAM Policy Stream generates a steady stream of policies along with their corresponding principals or resources. When a request is made, it initially displays the principal or resource to which the policy will be attached. This is typically a service resource for resource-based policies, or an IAM principal for other cases. Subsequently, it displays the suggested policy. This feature aids in identifying the correct permissions for cloud applications and can help spot logical errors, such as unexpected actions in a policy. +The IAM Policy Stream generates a steady stream of policies along with their corresponding principals or resources. +When a request is made, it initially displays the principal or resource to which the policy will be attached. +This is typically a service resource for resource-based policies, or an IAM principal for other cases. +Subsequently, it displays the suggested policy. +This feature aids in identifying the correct permissions for cloud applications and can help spot logical errors, such as unexpected actions in a policy. ## Getting started -This guide is designed for users who are new to the IAM Policy Stream. It assumes you have basic knowledge of the AWS CLI (and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script). +This guide is designed for users who are new to the IAM Policy Stream. +It assumes you have basic knowledge of the AWS CLI (and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script). ### Start your LocalStack container To experiment with the IAM Policy Stream, initiate LocalStack using these flags: -1. Enable debugging: `DEBUG=1` -2. Set your LocalStack API key: `LOCALSTACK_API_KEY=` -3. Set the IAM Soft Mode: `IAM_SOFT_MODE=1` +1. Enable debugging: `DEBUG=1` +2. Set your LocalStack API key: `LOCALSTACK_API_KEY=` +3. Set the IAM Soft Mode: `IAM_SOFT_MODE=1` You can execute the following command in your terminal to start your LocalStack container: @@ -38,13 +43,15 @@ $ localstack aws iam stream ### Create AWS Resources -In a separate terminal tab, we will create AWS resources to observe the necessary policies for them. In this example, we are creating an SNS topic using the following command: +In a separate terminal tab, we will create AWS resources to observe the necessary policies for them. +In this example, we are creating an SNS topic using the following command: {{< command >}} $ awslocal sns create-topic --name test-topic {{< /command >}} -In the other tab, the required policy will be generated. This policy can then be attached to an IAM role, enabling it to create the resource. +In the other tab, the required policy will be generated. +This policy can then be attached to an IAM role, enabling it to create the resource. ```bash Attached to identity: "arn:aws:iam::000000000000:root" @@ -65,15 +72,17 @@ Policy: ## Web Application -The LocalStack Web Application includes an IAM Policy Stream dashboard, which allows you to discover the necessary permissions for AWS API calls. The Web Application provides the following features: +The LocalStack Web Application includes an IAM Policy Stream dashboard, which allows you to discover the necessary permissions for AWS API calls. +The Web Application provides the following features: -1. Provides a live display of API calls and the specific policies each call generates. -2. Offers a real-time summary policy, merging all individual policies into one consolidated policy. -3. Includes a feature to activate or deactivate this functionality on-the-fly for performance tuning. -4. Presents an option to reset the stream, facilitating a clean slate to generate new policies. +1. Provides a live display of API calls and the specific policies each call generates. +2. Offers a real-time summary policy, merging all individual policies into one consolidated policy. +3. Includes a feature to activate or deactivate this functionality on-the-fly for performance tuning. +4. Presents an option to reset the stream, facilitating a clean slate to generate new policies. {{< callout "tip" >}} -You don't need to set additional configuration variables, such as `DEBUG=1` or `IAM_SOFT_MODE=1`, when using the IAM Policy Stream with Web Application. However, it won't enforce policies or print IAM-related logs in the LocalStack container. +You don't need to set additional configuration variables, such as `DEBUG=1` or `IAM_SOFT_MODE=1`, when using the IAM Policy Stream with Web Application. +However, it won't enforce policies or print IAM-related logs in the LocalStack container. {{< /callout >}} To use this feature, open the LocalStack Web Application in your browser, go to the IAM Policy Stream section, and click on **Enable** to view the **Summary Policy** and **Output**. diff --git a/content/en/user-guide/state-management/_index.md b/content/en/user-guide/state-management/_index.md index d955071504..187accbd06 100644 --- a/content/en/user-guide/state-management/_index.md +++ b/content/en/user-guide/state-management/_index.md @@ -8,7 +8,9 @@ aliases: - /user-guide/cloud-pods/ --- -State Management in LocalStack allows you to save and load the state of your LocalStack instance. LocalStack is ephemeral in nature, so when you stop and restart your LocalStack instance, all the data is lost. With State Management, you can save the state of your LocalStack instance and load it back when you restart your LocalStack instance. +State Management in LocalStack allows you to save and load the state of your LocalStack instance. +LocalStack is ephemeral in nature, so when you stop and restart your LocalStack instance, all the data is lost. +With State Management, you can save the state of your LocalStack instance and load it back when you restart your LocalStack instance. State Management in LocalStack encompasses the following features: @@ -18,11 +20,13 @@ State Management in LocalStack encompasses the following features: Anything that is inside a LocalStack container constitutes the "state.” The "state" can either be persisted on your local machine and be loaded at startup (persistence), exported anytime as a single local file (state export), or stored on the LocalStack platform (cloud pods). -The underlying mechanisms of all three solutions are similar, but the layout obviously differs. We can observe how using state -locally and Cloud Pods are very similar, as they look like `local` and `remote` versions of the same bundle. To get a better understanding of how +The underlying mechanisms of all three solutions are similar, but the layout obviously differs. +We can observe how using state +locally and Cloud Pods are very similar, as they look like `local` and `remote` versions of the same bundle. +To get a better understanding of how these three features differ, they can be illustrated as following: -The difference between persistence, local state and Cloud Pods. State Management is an essential feature that supports various use-cases, such as pre-seeding your fresh LocalStack instance with data, sharing your LocalStack instance's state with your team, fostering collaboration, and more. diff --git a/content/en/user-guide/state-management/cloud-pods/index.md b/content/en/user-guide/state-management/cloud-pods/index.md index dc517813f1..94a44c51c7 100644 --- a/content/en/user-guide/state-management/cloud-pods/index.md +++ b/content/en/user-guide/state-management/cloud-pods/index.md @@ -11,20 +11,25 @@ tags: ["Teams plan"] ## Introduction -Cloud pods are persistent state snapshots of your LocalStack instance that can easily be stored, versioned, shared, and restored. Cloud Pods can be used for various purposes, such as: +Cloud pods are persistent state snapshots of your LocalStack instance that can easily be stored, versioned, shared, and restored. +Cloud Pods can be used for various purposes, such as: -- Save and manage snapshots of active LocalStack instances. -- Share state snapshots with your team to debug collectively. -- Automate your testing pipelines by pre-seeding CI environments. -- Create reproducible development and testing environments locally. +- Save and manage snapshots of active LocalStack instances. +- Share state snapshots with your team to debug collectively. +- Automate your testing pipelines by pre-seeding CI environments. +- Create reproducible development and testing environments locally. Cloud Pods Web UI ## Installation -You can save and load the persistent state of Cloud Pods, you can use the [Cloud Pods command-line interface (CLI)]({{< ref "pods-cli" >}}). LocalStack provides a remote storage backend that can be used to store the state of your running application and share it with your team members. You can interact with the Cloud Pods over the storage backend via the LocalStack Web Application. +You can save and load the persistent state of Cloud Pods, you can use the [Cloud Pods command-line interface (CLI)]({{< ref "pods-cli" >}}). +LocalStack provides a remote storage backend that can be used to store the state of your running application and share it with your team members. +You can interact with the Cloud Pods over the storage backend via the LocalStack Web Application. -Cloud Pods CLI is included in the [LocalStack CLI installation](https://docs.localstack.cloud/getting-started/installation/#localstack-cli), so there's no need for additional installations to begin using it. If you're a licensed user, we suggest setting the `LOCALSTACK_AUTH_TOKEN` as an environment variable. This enables you to access the complete range of LocalStack Cloud Pods features. +Cloud Pods CLI is included in the [LocalStack CLI installation](https://docs.localstack.cloud/getting-started/installation/#localstack-cli), so there's no need for additional installations to begin using it. +If you're a licensed user, we suggest setting the `LOCALSTACK_AUTH_TOKEN` as an environment variable. +This enables you to access the complete range of LocalStack Cloud Pods features. You can access the Cloud Pods CLI by running the `pod` command from your terminal. @@ -49,18 +54,21 @@ Commands: {{< / command >}} {{< callout >}} -These Cloud Pods are securely stored within an AWS storage backend, where each user or organization is allocated a dedicated and isolated S3 bucket. The LocalStack Cloud Pods CLI utilizes secure S3 presigned URLs to directly interface with the S3 bucket, bypassing the need to transmit the state files through LocalStack Platform APIs. +These Cloud Pods are securely stored within an AWS storage backend, where each user or organization is allocated a dedicated and isolated S3 bucket. +The LocalStack Cloud Pods CLI utilizes secure S3 presigned URLs to directly interface with the S3 bucket, bypassing the need to transmit the state files through LocalStack Platform APIs. {{< /callout >}} ## Getting started This guide is designed for users new to Cloud Pods and assumes basic knowledge of the LocalStack CLI and our [`awslocal`](https://github.com/localstack/awscli-local) wrapper script. -Start your LocalStack container using your preferred method. We will demonstrate how you can save a snapshot of your active LocalStack instance into your LocalStack account, and pull it to a running instance. +Start your LocalStack container using your preferred method. +We will demonstrate how you can save a snapshot of your active LocalStack instance into your LocalStack account, and pull it to a running instance. ### Create AWS resources -You can use the `awslocal` CLI to create new AWS resources within your active LocalStack instance. For example, you can create an S3 bucket and add data to it using the `awslocal` CLI: +You can use the `awslocal` CLI to create new AWS resources within your active LocalStack instance. +For example, you can create an S3 bucket and add data to it using the `awslocal` CLI: {{< command >}} $ awslocal s3 mb s3://test @@ -71,7 +79,8 @@ $ awslocal s3 ls s3://test/ ### Save your Cloud Pod state -You can now save your Pod state using the `save` command, specifying the desired Cloud Pod name as the first argument. This action will save the pod and register it with the LocalStack Web Application: +You can now save your Pod state using the `save` command, specifying the desired Cloud Pod name as the first argument. +This action will save the pod and register it with the LocalStack Web Application: {{< command >}} $ localstack pod save s3-test @@ -129,7 +138,8 @@ $ localstack pod versions s3-test ### Pull your Pod state -On a separate machine, start LocalStack while ensuring the auth token is properly configured. Then, retrieve the previously created Cloud Pod by employing the `load` command, specifying the Cloud Pod name as the first argument: +On a separate machine, start LocalStack while ensuring the auth token is properly configured. +Then, retrieve the previously created Cloud Pod by employing the `load` command, specifying the Cloud Pod name as the first argument: {{< command >}} $ localstack pod load s3-test @@ -180,7 +190,8 @@ $ localstack state inspect --format json {{< / command >}} -For comprehensive instructions, navigate to our [Command-Line Interface (CLI) Guide]({{< ref "pods-cli" >}}). To access your Cloud Pods through the LocalStack Web Application, navigate to the [Cloud Pods browser](https://app.localstack.cloud/pods). +For comprehensive instructions, navigate to our [Command-Line Interface (CLI) Guide]({{< ref "pods-cli" >}}). +To access your Cloud Pods through the LocalStack Web Application, navigate to the [Cloud Pods browser](https://app.localstack.cloud/pods). {{< callout >}} Permission on Cloud Pods are assigned at organization level. @@ -248,7 +259,10 @@ In addition to loading Cloud Pods through the Command-Line Interface (CLI) or th To automatically load a Cloud Pod at startup, utilize the `AUTO_LOAD_POD` [configuration variable](https://docs.localstack.cloud/references/configuration/). -`AUTO_LOAD_POD` can accept multiple Cloud Pod names separated by commas. To autoload multiple Cloud Pods, such as `foo-pod` and `bar-pod`, use: `AUTO_LOAD_POD=foo-pod,bar-pod`. The order of Cloud Pods in `AUTO_LOAD_POD` dictates their loading sequence. When autoloading multiple Cloud Pods, later pods might overwrite the state of earlier ones if they share the same service, account, and region. +`AUTO_LOAD_POD` can accept multiple Cloud Pod names separated by commas. +To autoload multiple Cloud Pods, such as `foo-pod` and `bar-pod`, use: `AUTO_LOAD_POD=foo-pod,bar-pod`. +The order of Cloud Pods in `AUTO_LOAD_POD` dictates their loading sequence. +When autoloading multiple Cloud Pods, later pods might overwrite the state of earlier ones if they share the same service, account, and region. {{< tabpane lang="bash" >}} {{< tab header="LocalStack CLI" lang="bash" >}} @@ -329,12 +343,14 @@ services: ## Remotes -A remote is the location where Cloud Pods are stored. By default, Cloud Pod artifacts are stored in the LocalStack platform. However, if your organization's data regulations or sovereignty requirements prohibit storing Cloud Pod assets in a remote storage infrastructure, you have the option to persist Cloud Pods in an on-premises storage location under your complete control. +A remote is the location where Cloud Pods are stored. +By default, Cloud Pod artifacts are stored in the LocalStack platform. +However, if your organization's data regulations or sovereignty requirements prohibit storing Cloud Pod assets in a remote storage infrastructure, you have the option to persist Cloud Pods in an on-premises storage location under your complete control. LocalStack provides two types of alternative remotes: -- S3 bucket remote storage. -- [ORAS](https://oras.land/) (OCI Registry as Storage) remote storage. +- S3 bucket remote storage. +- [ORAS](https://oras.land/) (OCI Registry as Storage) remote storage. Cloud Pods command-line interface (CLI) allows you to create, delete, and list remotes. @@ -357,7 +373,8 @@ Commands: ### S3 bucket remote storage -The S3 remote enables you to store Cloud Pod assets in an existing S3 bucket within an actual AWS account. The initial step is to export the necessary AWS credentials within the terminal session. +The S3 remote enables you to store Cloud Pod assets in an existing S3 bucket within an actual AWS account. +The initial step is to export the necessary AWS credentials within the terminal session. {{< callout >}} The Cloud Pods S3 remote is currently _only_ available when [installing the `localstack` CLI via `pip`](https://docs.localstack.cloud/getting-started/installation/#localstack-cli), and not for the binary CLI distribution. @@ -370,7 +387,8 @@ export AWS_SECRET_ACCESS_KEY=... A possible option is to obtain credentials via [AWS SSO CLI](https://github.com/synfinatic/aws-sso-cli). -Next, we establish a new remote specifically designed for an S3 bucket. By running the following command, we create a remote named `s3-storage-aws` responsible for storing Cloud Pod artifacts in an S3 bucket called `ls-pods-bucket-test`. +Next, we establish a new remote specifically designed for an S3 bucket. +By running the following command, we create a remote named `s3-storage-aws` responsible for storing Cloud Pod artifacts in an S3 bucket called `ls-pods-bucket-test`. The `access_key_id` and `secret_access_key` placeholders ensure the correct transmission of AWS credentials to the container. @@ -411,11 +429,13 @@ If you experience any difficulties, update your [LocalStack CLI](https://docs.lo ### ORAS remote storage -The ORAS remote enables users to store Cloud Pods in OCI-compatible registries like Docker Hub, Nexus, or ECS registries. ORAS stands for "OCI Registry as Service," and you can find additional information about this standard [on the official website](https://oras.land/). +The ORAS remote enables users to store Cloud Pods in OCI-compatible registries like Docker Hub, Nexus, or ECS registries. +ORAS stands for "OCI Registry as Service," and you can find additional information about this standard [on the official website](https://oras.land/). For example, let's illustrate how you can utilize Docker Hub to store and retrieve Cloud Pods. -To begin, you must configure the new remote using the LocalStack CLI. You'll need to export two essential environment variables, `ORAS_USERNAME` and `ORAS_PASSWORD`, which are necessary for authenticating with Docker Hub. +To begin, you must configure the new remote using the LocalStack CLI. +You'll need to export two essential environment variables, `ORAS_USERNAME` and `ORAS_PASSWORD`, which are necessary for authenticating with Docker Hub. ```bash export ORAS_USERNAME=docker_hub_id @@ -455,6 +475,7 @@ We advise to create a strong passphrase by using the `openssl` utility, e,g.: {{< command >}} $ openssl rand --base64 32 # 3X03eU5pgoejObUR+Y8I4QjbjeGEKjDcmVFd0FU5pCg= + {{< / command >}} Users should treat the generated passphrase as a secret and they are responsible for securely sharing it within the organization. @@ -482,19 +503,25 @@ The process is the following: ### Limitations - Both browsing the Cloud Pod content via the UI and loading Cloud Pods into ephemeral instances are currently not supported for encrypted Cloud Pods. -- It is not possible to have both encrypted and non-encrypted versions for a Cloud Pod. Encryption is set at the moment of the creation and it cannot be changed. +- It is not possible to have both encrypted and non-encrypted versions for a Cloud Pod. + Encryption is set at the moment of the creation and it cannot be changed. ### Miscellaneous -Unless explicitly specified, all Cloud Pods commands default to targeting the LocalStack Platform as the storage remote. It's important to note that the CLI must be authenticated correctly with our Platform. +Unless explicitly specified, all Cloud Pods commands default to targeting the LocalStack Platform as the storage remote. +It's important to note that the CLI must be authenticated correctly with our Platform. -Custom remote configurations are stored within the [LocalStack volume directory](https://docs.localstack.cloud/references/filesystem/#localstack-volume-directory) and are managed by the LocalStack container. Consequently, when sharing Cloud Pods among your team using a custom remote, each team member must define the identical remote configuration. Once added, a remote persists even after LocalStack restarts. +Custom remote configurations are stored within the [LocalStack volume directory](https://docs.localstack.cloud/references/filesystem/#localstack-volume-directory) and are managed by the LocalStack container. +Consequently, when sharing Cloud Pods among your team using a custom remote, each team member must define the identical remote configuration. +Once added, a remote persists even after LocalStack restarts. ## Cloud Pods & Persistence -[Persistence]({{< ref "persistence" >}}) ensures that the service state persists across container restarts. You can enable persistence via a LocalStack config flag `PERSISTENCE=1` to restore your local resources, in case you’re stopping and re-starting the LocalStack instance on the same machine. +[Persistence]({{< ref "persistence" >}}) ensures that the service state persists across container restarts. +You can enable persistence via a LocalStack config flag `PERSISTENCE=1` to restore your local resources, in case you’re stopping and re-starting the LocalStack instance on the same machine. -In contrast, Cloud Pods provide more detailed control over your state. Rather than just restoring a state during LocalStack restarts, Cloud Pods enable you to capture snapshots of your local instance using the `save` command and inject these snapshots into a running instance using the `load` command, all without needing to perform a full restart. +In contrast, Cloud Pods provide more detailed control over your state. +Rather than just restoring a state during LocalStack restarts, Cloud Pods enable you to capture snapshots of your local instance using the `save` command and inject these snapshots into a running instance using the `load` command, all without needing to perform a full restart. ### Current Limitations @@ -505,8 +532,11 @@ We detect version miss-matches when using the `pod load` and prompt a confirmati {{< command >}} $ localstack pod load old-pod -This Cloud Pod was created with LocalStack 2.1.0. but you are running LocalStack 3.2.1. Cloud Pods might be incompatible across different LocalStack versions. -Loading a Cloud Pod with mismatching version might lead to a corrupted state of the emulator. Do you want to continue? [y/N]: +This Cloud Pod was created with LocalStack 2.1.0. +but you are running LocalStack 3.2.1. +Cloud Pods might be incompatible across different LocalStack versions. +Loading a Cloud Pod with mismatching version might lead to a corrupted state of the emulator. +Do you want to continue? [y/N]: {{< / command >}} @@ -523,10 +553,11 @@ When you try to save a Cloud Pod and see the error in LocalStack logs like this: localstack.cli.exceptions.CLIError: Failed to create Cloud Pod sample-pod ❌ - Unable to obtain auth token (code 401) - please log in again. ``` -It would be good to check if you have outdated authentication credentials (bearer token from a previous LocalStack login) in the `remotes.yaml` file for cloud pods. You have two options to fix this: +It would be good to check if you have outdated authentication credentials (bearer token from a previous LocalStack login) in the `remotes.yaml` file for cloud pods. +You have two options to fix this: -1. Run another `localstack auth login` command. -2. Find the `remotes.yaml` file in the `` directory on your machine and delete the file, or at least remove the `"default"` entry from it. +1. Run another `localstack auth login` command. +2. Find the `remotes.yaml` file in the `` directory on your machine and delete the file, or at least remove the `"default"` entry from it. Additionally, if there is a `~/.localstack/auth.json` file in your home directory, delete it as well if it still exists. @@ -538,10 +569,11 @@ When you try to save a Cloud Pod and see the `license.not_found` error in LocalS lsmulti-localstack | 2024-03-15T13:06:16.358 WARN --- [functhread31] l.p.remotes.remotes : Failed to register pod sample-pod: {"error": true, "message": "licensing.license.not_found"} ``` -To fix this, clear the LocalStack cache directory and restart the LocalStack instance before trying to save the Cloud Pod again. You can find the cache directories at: +To fix this, clear the LocalStack cache directory and restart the LocalStack instance before trying to save the Cloud Pod again. +You can find the cache directories at: -- `/Users/localstack/Library/Caches/localstack` -- `/Users/localstack/Library/Caches/localstack-cli` +- `/Users/localstack/Library/Caches/localstack` +- `/Users/localstack/Library/Caches/localstack-cli` Adjust the path based on your operating system. @@ -553,4 +585,6 @@ If you get an SSL certificate verification error while trying to save a Cloud Po An error occurred while checking remote management for pod "cloud-pod-product-app": "MyHTTPSConnectionPool(host='api.localstack.cloud', port=443): Max retries exceeded with url: /v1/cloudpods/cloud-pod-product-app (Caused by SSLError(SSLCertVerificationError(1, "[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: Hostname mismatch, certificate is not valid for 'api.localstack.cloud'. (_ssl.c:1006)")))" ``` -Check if your machine's clock is set incorrectly or if the certificate store is outdated. This error can also occur if you use `localstack` as `LOCALSTACK_HOST`. In this case, the DNS incorrectly resolves `api.localstack.cloud` to `localhost`, causing a certificate mismatch. +Check if your machine's clock is set incorrectly or if the certificate store is outdated. +This error can also occur if you use `localstack` as `LOCALSTACK_HOST`. +In this case, the DNS incorrectly resolves `api.localstack.cloud` to `localhost`, causing a certificate mismatch. diff --git a/content/en/user-guide/state-management/export-import-state/index.md b/content/en/user-guide/state-management/export-import-state/index.md index b1456059f7..462d0e2548 100644 --- a/content/en/user-guide/state-management/export-import-state/index.md +++ b/content/en/user-guide/state-management/export-import-state/index.md @@ -9,11 +9,13 @@ tags: ["Pro image"] ## Introduction -The Export/Import State feature enables you to export the state of your LocalStack instance into a file and import it into another LocalStack instance. This feature is useful when you want to save your LocalStack instance's state for later use. +The Export/Import State feature enables you to export the state of your LocalStack instance into a file and import it into another LocalStack instance. +This feature is useful when you want to save your LocalStack instance's state for later use. ## LocalStack CLI -The LocalStack CLI enables you to export your infrastructure state to a file and import it into another LocalStack instance. You can access the state management commands by running `localstack state` in your terminal. +The LocalStack CLI enables you to export your infrastructure state to a file and import it into another LocalStack instance. +You can access the state management commands by running `localstack state` in your terminal. {{< command >}} $ localstack state --help @@ -46,10 +48,15 @@ To export the state, you can run the following command: $ localstack state export {{< /command >}} -You can specify a file path to export the state to. If you do not specify a file path, the state will be exported to the current working directory into a file named `ls-state-export`. You can specify the following flags to customize the export: +You can specify a file path to export the state to. +If you do not specify a file path, the state will be exported to the current working directory into a file named `ls-state-export`. +You can specify the following flags to customize the export: -- `--services`: Specify the services to export. You can specify multiple services by separating them with a comma. If you do not specify any services, all services will be exported. -- `--format`: Specify the format of the exported state. For example, you can specify `json` to specify the save command output as JSON. +- `--services`: Specify the services to export. + You can specify multiple services by separating them with a comma. + If you do not specify any services, all services will be exported. +- `--format`: Specify the format of the exported state. + For example, you can specify `json` to specify the save command output as JSON. ### Import the State @@ -59,11 +66,13 @@ To import the state, you can run the following command: $ localstack state import {{< /command >}} -The `` argument is required and specifies the file path to import the state from. The file should be generated from a previous export. +The `` argument is required and specifies the file path to import the state from. +The file should be generated from a previous export. ## Web Application -The LocalStack Web Application enables you to export your infrastructure state to a file and import it into another LocalStack instance. The Local mode allows you to perform local exports and imports of your LocalStack instance's state. +The LocalStack Web Application enables you to export your infrastructure state to a file and import it into another LocalStack instance. +The Local mode allows you to perform local exports and imports of your LocalStack instance's state. LocalStack Export/Import State Local Mode @@ -73,7 +82,8 @@ To export the state, follow these steps: 1. Navigate to the **Local** tab within the [Export/Import State](https://app.localstack.cloud/inst/default/state) page. 2. Create AWS resources locally as needed. -3. Click on the **Export State** button. This action will initiate the download of a ZIP file. +3. Click on the **Export State** button. + This action will initiate the download of a ZIP file. The downloaded ZIP file contains your container state, which can be injected into another LocalStack instance for further use. @@ -82,6 +92,7 @@ The downloaded ZIP file contains your container state, which can be injected int To import the state, follow these steps: 1. Navigate to the **Local** tab within the [Export/Import State](https://app.localstack.cloud/inst/default/state) page. -2. Upload the ZIP file that contains your container state. This action will restore your previously loaded AWS resources. +2. Upload the ZIP file that contains your container state. + This action will restore your previously loaded AWS resources. To confirm the successful injection of the container state, visit the respective [Resource Browser](https://app.localstack.cloud/inst/default/resources) for the services and verify the resources. diff --git a/content/en/user-guide/state-management/launchpad/index.md b/content/en/user-guide/state-management/launchpad/index.md index 5dbb9cd1f1..53265abe0a 100644 --- a/content/en/user-guide/state-management/launchpad/index.md +++ b/content/en/user-guide/state-management/launchpad/index.md @@ -15,10 +15,13 @@ You can visit [Cloud Pods launchpad](https://app.localstack.cloud/launchpad) to ![Cloud Pods Launchpad Link Generator](link-generator.png) -Enter a public URL to your pod Cloud Pod the first input field, then click **Generate Link**. You can copy the resulting link and share it with others. Additionally, you have the option to copy a markdown snippet for quickly adding a badge to your repository. +Enter a public URL to your pod Cloud Pod the first input field, then click **Generate Link**. +You can copy the resulting link and share it with others. +Additionally, you have the option to copy a markdown snippet for quickly adding a badge to your repository. {{< callout "warning" >}} -The Launchpad accepts any URL as input and directly passes it to your LocalStack instance for Cloud Pod injection. As a result, this process may carry the risk of unintended side effects, as there is no validation applied to the URL provided to the launchpad. +The Launchpad accepts any URL as input and directly passes it to your LocalStack instance for Cloud Pod injection. +As a result, this process may carry the risk of unintended side effects, as there is no validation applied to the URL provided to the launchpad. Furthermore, it's essential to exercise caution and only use URLs that you trust when utilizing the launchpad, as the URL is displayed in the user interface. {{< /callout >}} @@ -33,9 +36,11 @@ To add a badge to your README that links to the Cloud Pod Launchpad, you can inc ![Cloud Pods Badge Demonstration](badge-demo.png) -You need to include the `url_of_your_pod` as the `url` query parameter in the URL. For instance, if your Cloud Pod is hosted within the same repository, simply use the URL that directs to the raw Cloud Pod file. +You need to include the `url_of_your_pod` as the `url` query parameter in the URL. +For instance, if your Cloud Pod is hosted within the same repository, simply use the URL that directs to the raw Cloud Pod file. -Additionally, you can utilize the [Link Generator]({{< ref "#creating-your-sharable-link" >}}) to generate a shareable link for your pod and copy the corresponding markdown snippet for your repository. Check out a proper example on th Cloud Pod badge GitHub repository. +Additionally, you can utilize the [Link Generator]({{< ref "#creating-your-sharable-link" >}}) to generate a shareable link for your pod and copy the corresponding markdown snippet for your repository. +Check out a proper example on th Cloud Pod badge GitHub repository. ## Troubleshooting common problems @@ -47,7 +52,8 @@ If your LocalStack instance is not running, you will encounter the following err ![Cloud Pods Launchpad Error LocalStack not running](ls-not-running.png) -Start your LocalStack instance and attempt the operation once more. For detailed instructions on starting LocalStack, refer to the [Getting Started]({{< ref "getting-started" >}}) section. +Start your LocalStack instance and attempt the operation once more. +For detailed instructions on starting LocalStack, refer to the [Getting Started]({{< ref "getting-started" >}}) section. ### Failed to load metadata @@ -55,8 +61,11 @@ If the launchpad encounters difficulties while loading the metadata of your pod, ![Cloud Pods Launchpad Error failed to load metadata](metadata-load-failed.png) -This can occur when the pod is no longer accessible or if the URL provided is invalid. Review the URL and attempt the operation once more. +This can occur when the pod is no longer accessible or if the URL provided is invalid. +Review the URL and attempt the operation once more. ### Failed to inject pod -If the launchpad is unable to successfully inject the pod into your LocalStack instance, you will encounter a `Pod injection failed` message in the log. This issue may arise from various factors, such as version disparities. For further insights and information, please review your LocalStack logs. +If the launchpad is unable to successfully inject the pod into your LocalStack instance, you will encounter a `Pod injection failed` message in the log. +This issue may arise from various factors, such as version disparities. +For further insights and information, please review your LocalStack logs. diff --git a/content/en/user-guide/state-management/persistence/index.md b/content/en/user-guide/state-management/persistence/index.md index 5dff3abeaa..7f3a25518b 100644 --- a/content/en/user-guide/state-management/persistence/index.md +++ b/content/en/user-guide/state-management/persistence/index.md @@ -11,15 +11,20 @@ tags: ["Pro image"] ## Introduction -LocalStack's Persistence mechanism enables the saving and restoration of the entire LocalStack state, including all AWS resources and data, on your local machine. It functions as a "pause and resume" feature, allowing you to take a snapshot of your LocalStack instance and save this data to disk. This mechanism ensures a quick and efficient way to preserve and continue your work with AWS resources locally. +LocalStack's Persistence mechanism enables the saving and restoration of the entire LocalStack state, including all AWS resources and data, on your local machine. +It functions as a "pause and resume" feature, allowing you to take a snapshot of your LocalStack instance and save this data to disk. +This mechanism ensures a quick and efficient way to preserve and continue your work with AWS resources locally. ## Configuration -To start snapshot-based persistence, launch LocalStack with the configuration option `PERSISTENCE=1`. This setting instructs LocalStack to save all AWS resources and their respective application states into the LocalStack Volume Directory. Upon restarting LocalStack, you'll be able to resume your activities exactly where you left off. +To start snapshot-based persistence, launch LocalStack with the configuration option `PERSISTENCE=1`. +This setting instructs LocalStack to save all AWS resources and their respective application states into the LocalStack Volume Directory. +Upon restarting LocalStack, you'll be able to resume your activities exactly where you left off. {{< tabpane lang="bash" >}} {{< tab header="LocalStack CLI" lang="bash" >}} -LOCALSTACK_AUTH_TOKEN=... PERSISTENCE=1 localstack start +LOCALSTACK_AUTH_TOKEN=... +PERSISTENCE=1 localstack start {{< /tab >}} {{< tab header="Docker Compose" lang="yaml" >}} ... @@ -53,11 +58,15 @@ There are four strategies that you can choose from that govern when these snapsh You can select a particular save strategy by setting `SNAPSHOT_SAVE_STRATEGY=`. * **`ON_REQUEST`**: On every AWS API call that potentially modifies the state of a service, LocalStack will save the state of that service. - This strategy minimizes the chance for data loss, but also has significant performance implications. The service has to be locked during snapshotting, meaning that any requests to the particular AWS service will be blocked until the snapshot is complete. In many cases this is just a few milliseconds, but can become significant in some services. + This strategy minimizes the chance for data loss, but also has significant performance implications. + The service has to be locked during snapshotting, meaning that any requests to the particular AWS service will be blocked until the snapshot is complete. + In many cases this is just a few milliseconds, but can become significant in some services. * **`ON_SHUTDOWN`**: The state of all services are saved during the shutdown phase of LocalStack. - This strategy has zero performance impact, but is not good when you want to minimize the chance for data loss. Should LocalStack for some reason not shut down properly or is terminated before it can finalize the snapshot, you may be left with an incomplete state on disk. + This strategy has zero performance impact, but is not good when you want to minimize the chance for data loss. + Should LocalStack for some reason not shut down properly or is terminated before it can finalize the snapshot, you may be left with an incomplete state on disk. * **`SCHEDULED`** (**default**): Saves at regular intervals the state of all the services that have been modified since the last snapshot. - By default, the flush interval is 15 seconds. It can be configured via the `SNAPSHOT_FLUSH_INTERVAL` configuration variable. + By default, the flush interval is 15 seconds. + It can be configured via the `SNAPSHOT_FLUSH_INTERVAL` configuration variable. This is a compromise between `ON_REQUEST` and `ON_SHUTDOWN` in terms of performance and reliability. * **`MANUAL`**: Turns off automatic snapshotting and gives you control through the internal state endpoints. @@ -65,8 +74,10 @@ You can select a particular save strategy by setting `SNAPSHOT_SAVE_STRATEGY=`. -* **`ON_REQUEST`**: (**default**) The state is loaded lazily when the service is requested. This maintains LocalStack's lazy-loading behavior for AWS services. -* **`ON_STARTUP`**: The state of all services in the snapshot is restored when LocalStack starts up. This means that services that have stored state are also started on LocalStack start, which will increase the startup time, but also give you immediate feedback whether the state was restored correctly. +* **`ON_REQUEST`**: (**default**) The state is loaded lazily when the service is requested. + This maintains LocalStack's lazy-loading behavior for AWS services. +* **`ON_STARTUP`**: The state of all services in the snapshot is restored when LocalStack starts up. + This means that services that have stored state are also started on LocalStack start, which will increase the startup time, but also give you immediate feedback whether the state was restored correctly. * **`MANUAL`**: Turns off automatic loading of snapshots and gives you control through the internal state endpoints. ### Endpoints @@ -76,13 +87,15 @@ As mentioned, with the `MANUAL` save or load strategy you can trigger snapshotti * `POST /_localstack/state//save` take a snapshot the given service * `POST /_localstack/state//load` load the most recent snapshot of the given service -For example, a snapshot for a particular service (e.g., `s3`) can be triggered by running the following command. The service name refers to the AWS service code. +For example, a snapshot for a particular service (e.g., `s3`) can be triggered by running the following command. +The service name refers to the AWS service code. {{< command >}} $ curl -X POST http://localhost:4566/_localstack/state/s3/save {{< /command >}} -It is also possible to take and load a snapshot of all the services at once. We provide the following endpoints: +It is also possible to take and load a snapshot of all the services at once. +We provide the following endpoints: * `POST /_localstack/state/save` * `POST /_localstack/state/load` diff --git a/content/en/user-guide/state-management/pods-cli/index.md b/content/en/user-guide/state-management/pods-cli/index.md index 56dd1d8f2f..05a6efb737 100644 --- a/content/en/user-guide/state-management/pods-cli/index.md +++ b/content/en/user-guide/state-management/pods-cli/index.md @@ -57,11 +57,14 @@ Usage: pod save [OPTIONS] NAME [REMOTE] Save the current state of the LocalStack container in a Cloud Pod. A Cloud Pod can be registered and saved with different storage options, - called remotes. By default, Cloud Pods are hosted in the LocalStack - platform. However, users can decide to store their Cloud Pods in other + called remotes. + By default, Cloud Pods are hosted in the LocalStack + platform. + However, users can decide to store their Cloud Pods in other remotes, such as AWS S3 buckets or ORAS registries. - An optional message can be attached to any Cloud Pod. Furthermore, one + An optional message can be attached to any Cloud Pod. + Furthermore, one could decide to export only a subset of services with the optional --service option. @@ -82,7 +85,8 @@ Options: the Cloud Pod (all by default) --visibility [public|private] Set the visibility of the Cloud Pod [`public` - or `private`]. Does not create a new version + or `private`]. + Does not create a new version -f, --format [json] The formatting style for the save command output. @@ -98,8 +102,8 @@ To save and load the state locally, you can use the command in the `localstack s $ localstack pod save my-pod {{< / command >}} -The above command generates a new version of `my-pod` and uploads it on the LocalStack platform. -When pushing an already existing pod, a new version is created and subsequently uploaded to the platform. +The above command generates a new version of `my-pod` and uploads it on the LocalStack platform. +When pushing an already existing pod, a new version is created and subsequently uploaded to the platform. Users also have the option to select a specific subset of AWS services they want to include in the new Cloud Pod version using the `--services` option. @@ -109,7 +113,8 @@ Users who want to make a Cloud Pod accessible outside their organization can mar $ localstack pod save --name my-pod --visibility public {{< / command >}} -The above command does not create a new version and requires a version already registered with the platform. The CLI manual for the `save` command is as follows: +The above command does not create a new version and requires a version already registered with the platform. +The CLI manual for the `save` command is as follows: ### `load` @@ -122,14 +127,19 @@ Usage: pod load [OPTIONS] NAME [REMOTE] being the default one. Loading the state of a Cloud Pod into LocalStack might cause some - conflicts with the current state of the container. By default, LocalStack + conflicts with the current state of the container. + By default, LocalStack will attempt a best-effort merging strategy between the current state and - the one from the Cloud Pod. For a service X present in both the current + the one from the Cloud Pod. + For a service X present in both the current state and the Cloud Pod, we will attempt to merge states across different - accounts and regions. If the service X has a state for the same account + accounts and regions. + If the service X has a state for the same account and region both in the running container and the Cloud Pod, the latter - will be used. If a service Y is present in the running container but not - in the Cloud Pod, it will be left untouched. With `--merge overwrite`, the + will be used. + If a service Y is present in the running container but not + in the Cloud Pod, it will be left untouched. + With `--merge overwrite`, the state of the Cloud Pod will completely replace the state of the running container. @@ -139,17 +149,19 @@ Options: --merge [overwrite|merge] The merge strategy to adopt when loading the Cloud Pod - -y, --yes Automatic yes to prompts. Assume a positive + -y, --yes Automatic yes to prompts. + Assume a positive answer to all prompts and run non-interactively --help Show this message and exit. {{< / command >}} -The `load` command is the inverse operation of `save`. +The `load` command is the inverse operation of `save`. It retrieves the content of a previously stored Cloud Pod a remote (by default, theLocalStack platform) and injects it into the LocalStack container. ### `delete` + {{< command >}} Usage: pod delete [OPTIONS] NAME @@ -164,7 +176,7 @@ Options: {{< / command >}} -The `delete` command let users delete a Cloud Pod stored in the remote platform. +The `delete` command let users delete a Cloud Pod stored in the remote platform. The CLI manual for the `delete` command is as follows: ### `inspect` @@ -175,7 +187,8 @@ Usage: pod inspect [OPTIONS] NAME Inspect the contents of a Cloud Pod - This command shows the content of a Cloud Pod. By default, it starts a + This command shows the content of a Cloud Pod. + By default, it starts a curses interface which allows an interactive inspection of the contents in the Cloud Pod. @@ -188,7 +201,7 @@ Options: {{< / command >}} -The `inspect` command simply lets the user inspect the content of a Cloud Pod. +The `inspect` command simply lets the user inspect the content of a Cloud Pod. ### `list` @@ -201,7 +214,8 @@ Usage: pod list [OPTIONS] [REMOTE] With the --public flag, it lists the all the available public Cloud Pods. A public Cloud Pod is available across the boundary of a user one/or - organization. In other words, any public Cloud Pod can be injected by any + organization. + In other words, any public Cloud Pod can be injected by any other user holding a LocalStack Pro (or above) license. Options: @@ -213,7 +227,7 @@ Options: {{< / command >}} -The `list` command lists all of the available Cloud Pods. +The `list` command lists all of the available Cloud Pods. It shows all the pods available for a single user and its organization by default. ### `versions` @@ -224,7 +238,8 @@ Usage: pod versions [OPTIONS] NAME List all available versions for a Cloud Pod - This command lists the versions available for a Cloud Pod. Each invocation + This command lists the versions available for a Cloud Pod. + Each invocation of the save command is going to create a new version for a named Cloud Pod, if a Pod with such name already does exist in the LocalStack platform. @@ -237,7 +252,7 @@ Options: {{< / command >}} -The `versions` command lists all the available versions of a Cloud Pod. +The `versions` command lists all the available versions of a Cloud Pod. The CLI manual for the `version` command is as follows: ### `remote` @@ -271,7 +286,8 @@ Usage: pod remote add [OPTIONS] NAME URL Add a new remote for Cloud Pods. - A remote is the place where your Cloud Pods are stored. By default, Cloud + A remote is the place where your Cloud Pods are stored. + By default, Cloud Pods are store in the LocalStack platform. Options: @@ -293,6 +309,7 @@ Options: {{< / command >}} #### `remote list` + {{< command >}} Usage: pod remote list [OPTIONS] @@ -313,6 +330,7 @@ In addition to the commands in the `pod` group, we also offer a simple alternati The `state` group offers two commands to export and import the state of the LocalStack container to/from a zip file from the host machine. ## `state` syntax + {{< command >}} Usage: state [OPTIONS] COMMAND [ARGS]... @@ -342,8 +360,10 @@ Commands: Usage: state export [OPTIONS] [DESTINATION] Save the current state of the LocalStack container to a file on the local - disk. This file can be restored at any point in time using the `localstack - state import` command. Please be aware that this might not be possible + disk. + This file can be restored at any point in time using the `localstack + state import` command. + Please be aware that this might not be possible when importing the state with a different version of LocalStack. If you are looking for a managed solution to handle the state of your @@ -351,7 +371,8 @@ Usage: state export [OPTIONS] [DESTINATION] https://docs.localstack.cloud/user-guide/tools/cloud-pods/ Use the DESTINATION argument to specify an absolute path for the exported - file or a filename in current working directory. If no destination is + file or a filename in current working directory. + If no destination is specified, a file named `ls-state-export` will be saved in the current working directory. @@ -359,11 +380,13 @@ Usage: state export [OPTIONS] [DESTINATION] localstack state export my-state localstack state export /home/johndoe/my-state - You can also specify a subset of services to export. By default, the state + You can also specify a subset of services to export. + By default, the state of all running services is exported. Options: - -s, --services TEXT Comma-delimited list of services to reset. By default, + -s, --services TEXT Comma-delimited list of services to reset. +By default, the state of all running services is exported. -f, --format [json] The formatting style for the save command output. @@ -377,9 +400,11 @@ Options: Usage: state import [OPTIONS] SOURCE - Load the state of LocalStack from a file into the running container. The + Load the state of LocalStack from a file into the running container. + The SOURCE file must have been generated from a previous `localstack state - export` command. Please be aware that it might not be possible to import a + export` command. + Please be aware that it might not be possible to import a state generated from a different version of LocalStack. Examples: @@ -400,15 +425,19 @@ Usage: state reset [OPTIONS] Reset the service states of the current LocalStack runtime. This command invokes a reset of services in the currently running - LocalStack container. By default, all services are rest. The `services` + LocalStack container. + By default, all services are rest. + The `services` options allows to select a subset of services which should be reset. This command tries to automatically discover the running LocalStack - instance. If LocalStack has not been started with `localstack start` (and + instance. + If LocalStack has not been started with `localstack start` (and is not automatically discoverable), please set `LOCALSTACK_HOST`. Options: - -s, --services TEXT Comma-delimited list of services to reset. By default, + -s, --services TEXT Comma-delimited list of services to reset. +By default, the state of all running services is reset. --help Show this message and exit. diff --git a/content/en/user-guide/state-management/support/index.md b/content/en/user-guide/state-management/support/index.md index 0c345dcc96..0007a22d2b 100644 --- a/content/en/user-guide/state-management/support/index.md +++ b/content/en/user-guide/state-management/support/index.md @@ -16,4 +16,3 @@ tags: ["Pro Image"] To test persistence, we use an approach similar to snapshot parity test: we first record API responses from LocalStack, we then reset and restore the snapshotted state, and finally verify that the same API responses matches with the initial ones. - diff --git a/content/en/user-guide/tools/_index.md b/content/en/user-guide/tools/_index.md index 3ab96c7b15..b432f70f17 100755 --- a/content/en/user-guide/tools/_index.md +++ b/content/en/user-guide/tools/_index.md @@ -18,4 +18,5 @@ With LocalStack Cloud Developer Tools you can: * hot-swap your Lambda code changes instantly * debug Lambda executions directly from your IDE * inject LocalStack service endpoints automatically into your application -* ... and much more! +* ... + and much more! diff --git a/content/en/user-guide/tools/dns-server/index.md b/content/en/user-guide/tools/dns-server/index.md index 43a0d1f103..a15a5555e4 100644 --- a/content/en/user-guide/tools/dns-server/index.md +++ b/content/en/user-guide/tools/dns-server/index.md @@ -22,7 +22,6 @@ On your host machine, `localhost.localstack.cloud` and any subdomains such as `m unless your router has [DNS rebind protection]({{< ref "dns-server#dns-rebind-protection" >}}) enabled. {{< / callout >}} - ### Fallback DNS server If you want to use another upstream DNS resolver than your default system DNS resolver or Google DNS (`8.8.8.8` fallback if detection fails), @@ -49,11 +48,10 @@ For example, `https://123456789012.dkr.ecr.us-west-2.amazonaws.com` will be forw This can be used for hybrid setups, where certain API calls (e.g., ECR, Lambda) target AWS, whereas other services will target LocalStack. The regex pattern follows Python flavored-regex and can be tested at [regex101.com](https://regex101.com/r/OzIsQa/1). -[The regex101 link is maintained by Joel Scheuner (requires linking to GitHub or Google account). It redirects to the main page if the saved example would not work.]: # {{< callout "warning" >}} -Use this configuration with caution because we generally do not recommend connecting to real AWS from within LocalStack. +Use this configuration with caution because we generally do not recommend connecting to real AWS from within LocalStack. {{< /callout >}} ### DNS Server bind address @@ -68,7 +66,6 @@ DNS_ADDRESS=0 We do not recommend disabling the DNS server since this disables resolving `localhost.localstack.cloud` to the LocalStack container. {{< /callout >}} - ### LocalStack endpoints If you operate behind an enterprise proxy and wish to customize the domain name returned by LocalStack services (e.g., SQS queue URL), @@ -77,7 +74,6 @@ check out the [Configuration]({{< ref "configuration#core" >}}) `LOCALSTACK_HOST If you wish to customize internal LocalStack DNS routing of `localhost.localstack.cloud`, refer to the instructions in the [Route53 documentation]({{< ref "user-guide/aws/route53#customizing-internal-endpoint-resolution" >}}). - ## DNS rebind protection If you rely on your local network's DNS, your router/DNS server might block requests due to the DNS Rebind Protection. @@ -101,11 +97,14 @@ $ dig test.localhost.localstack.cloud ;; OPT PSEUDOSECTION: ; EDNS: version: 0, flags:; udp: 65494 ;; QUESTION SECTION: -;test.localhost.localstack.cloud. IN A +;test.localhost.localstack.cloud. +IN A ;; ANSWER SECTION: -test.localhost.localstack.cloud. 10786 IN CNAME localhost.localstack.cloud. -localhost.localstack.cloud. 389 IN A 127.0.0.1 +test.localhost.localstack.cloud. +10786 IN CNAME localhost.localstack.cloud. +localhost.localstack.cloud. +389 IN A 127.0.0.1 ;; Query time: 16 msec ;; SERVER: 127.0.0.53#53(127.0.0.53) @@ -116,7 +115,6 @@ localhost.localstack.cloud. 389 IN A 127.0.0.1 If the DNS resolves the subdomain to your localhost (127.0.0.1), your setup is working. If not, please check the configuration of your router / DNS if the Rebind Protection is active or [enable the LocalStack DNS on your system]({{< ref "dns-server#system-dns-configuration" >}}). - ## System DNS configuration If you wish to use the DNS server on your host system, you need to expose the LocalStack DNS server and configure your operating system. @@ -129,7 +127,7 @@ Remember to save the default configuration and restore it after testing. 1. Expose the LocalStack DNS server: - a) Since version 3.5, the LocalStack CLI does not publish port `53` anymore by default. + a) Since version 3.5, the LocalStack CLI does not publish port `53` anymore by default. Use the CLI flag `--host-dns` to expose the port on the host. b) For Docker Compose, add the following port mappings to your `docker-compose.yml`: @@ -142,6 +140,7 @@ Remember to save the default configuration and restore it after testing. {{< callout >}} If port 53 is already bound, `docker-compose up` fails with the error: + ```plain Error response from daemon: Ports are not available: exposing port UDP 127.0.0.1:53 -> 0.0.0.0:0: command failed ``` @@ -222,13 +221,15 @@ If you want to perform this action manually, please do the following steps: 1. Configure the DNS resolver for the bridge network: {{< command >}} - # resolvectl dns +# resolvectl dns + {{< / command >}} 3. Set the DNS route to route only the above mentioned domain names (and subdomains) to LocalStack: {{< command >}} - # resolvectl domain ~amazonaws.com ~aws.amazon.com ~cloudfront.net ~localhost.localstack.cloud +# resolvectl domain ~amazonaws.com ~aws.amazon.com ~cloudfront.net ~localhost.localstack.cloud + {{< / command >}} In both cases, you can use `resolvectl query s3.amazonaws.com` or `resolvectl query example.com` to check which interface your DNS request is routed through, to confirm only the above mentioned domains (and its subdomains) are routed to LocalStack. diff --git a/content/en/user-guide/tools/localstack-desktop/index.md b/content/en/user-guide/tools/localstack-desktop/index.md index 5f4bf85755..5501d50cc1 100644 --- a/content/en/user-guide/tools/localstack-desktop/index.md +++ b/content/en/user-guide/tools/localstack-desktop/index.md @@ -7,10 +7,12 @@ aliases: - /user-guide/tools/cockpit/ --- -LocalStack Desktop is a desktop client that allows users to easily control and interact with their LocalStack instance. Using LocalStack Desktop, users can start and stop their LocalStack instance with a single click, create a new container, view logs, interact with LocalStack container via cli and use our resource browser. +LocalStack Desktop is a desktop client that allows users to easily control and interact with their LocalStack instance. +Using LocalStack Desktop, users can start and stop their LocalStack instance with a single click, create a new container, view logs, interact with LocalStack container via cli and use our resource browser. {{< callout >}} -LocalStack Desktop replaces the previous LocalStack Cockpit application. Cockpit isn't available or maintained anymore and we recommend you to use LocalStack Desktop instead. +LocalStack Desktop replaces the previous LocalStack Cockpit application. +Cockpit isn't available or maintained anymore and we recommend you to use LocalStack Desktop instead. {{< /callout >}} LocalStack Desktop @@ -18,15 +20,17 @@ LocalStack Desktop replaces the previous LocalStack Cockpit application. Cockpit ## Installation You can download LocalStack Desktop from our [web application](https://app.localstack.cloud/download). -To install LocalStack Desktop, **Docker** is the only prerequisite. +To install LocalStack Desktop, **Docker** is the only prerequisite. ## Features -LocalStack Desktop helps users to interact with their LocalStack instance with a simple and intuitive UI. Some of the features of LocalStack Desktop includes the ability to: Control LocalStack, Interact with LocalStack, get LocalStack insights and use the Resource browser. +LocalStack Desktop helps users to interact with their LocalStack instance with a simple and intuitive UI. +Some of the features of LocalStack Desktop includes the ability to: Control LocalStack, Interact with LocalStack, get LocalStack insights and use the Resource browser. ### Control LocalStack -Using our Desktop application you will be able to start, stop, delete and create new containers with just a click. It also allows to set up a custom URL if you are using LocalStack outside of Docker or in Kubernetes. +Using our Desktop application you will be able to start, stop, delete and create new containers with just a click. +It also allows to set up a custom URL if you are using LocalStack outside of Docker or in Kubernetes.

LocalStack Desktop container creation @@ -38,14 +42,13 @@ You can run commands within the LocalStack container by using our CLI LocalStack Desktop cli interaction - ### LocalStack Insights -LocalStack Desktop provides quick access to your LocalStack logs for instant insights. See what's happening in details from the Logs tab. +LocalStack Desktop provides quick access to your LocalStack logs for instant insights. +See what's happening in details from the Logs tab. LocalStack Desktop Logs tab - ### Resource browser You can also create, modify, delete and read all of your resources from the Resource Browser tab, having the same experience that you would have using it in our [web application](https://app.localstack.cloud/inst/default/resources) diff --git a/content/en/user-guide/tools/localstack-docker-extension/index.md b/content/en/user-guide/tools/localstack-docker-extension/index.md index 09176a7c15..d6dc6a350c 100644 --- a/content/en/user-guide/tools/localstack-docker-extension/index.md +++ b/content/en/user-guide/tools/localstack-docker-extension/index.md @@ -10,20 +10,23 @@ aliases: ## Introduction -The LocalStack Extension for Docker Desktop enables developers working with LocalStack to operate their LocalStack container via Docker Desktop, including checking service status, container logs, and configuring profiles. To install the LocalStack Extension for Docker Desktop, you need to have [Docker Desktop installed on your machine](https://www.docker.com/products/docker-desktop). +The LocalStack Extension for Docker Desktop enables developers working with LocalStack to operate their LocalStack container via Docker Desktop, including checking service status, container logs, and configuring profiles. +To install the LocalStack Extension for Docker Desktop, you need to have [Docker Desktop installed on your machine](https://www.docker.com/products/docker-desktop). LocalStack Extension for Docker Desktop ## Installation -To utilize LocalStack's Docker Extension, it is necessary to have a recent version of Docker Desktop (v4.8 or higher) installed on the local machine. To enable the extension, access the **Extensions** tab and select the **Enable Docker Extensions** and **Show Docker Extensions system containers** option. +To utilize LocalStack's Docker Extension, it is necessary to have a recent version of Docker Desktop (v4.8 or higher) installed on the local machine. +To enable the extension, access the **Extensions** tab and select the **Enable Docker Extensions** and **Show Docker Extensions system containers** option. - +Enable Docker Extensions in the Preferences within the Extensions tab

-The LocalStack Extension for Docker Desktop has been validated and can be accessed on the Extensions Marketplace. To begin using it, navigate to the **Extensions Marketplace**, search for **LocalStack**, and click the **Install** button to proceed with the installation. +The LocalStack Extension for Docker Desktop has been validated and can be accessed on the Extensions Marketplace. +To begin using it, navigate to the **Extensions Marketplace**, search for **LocalStack**, and click the **Install** button to proceed with the installation. - +Discover the LocalStack Extension on the Docker Desktop Marketplace and install it!

An alternative method for installing the LocalStack's Extension for Docker Desktop is pulling the [public Docker image](https://hub.docker.com/r/localstack/localstack-docker-desktop) from Docker Hub and installing it! @@ -32,34 +35,41 @@ An alternative method for installing the LocalStack's Extension for Docker Deskt $ docker extension install localstack/localstack-docker-desktop:0.5.3 {{< /command >}} -After installation, you can access the LocalStack Extension for Docker Desktop from the **Extensions** tab. Upon the initial launch of the extension, a prompt to select a mount point for the LocalStack container will appear. Select your username from the drop-down menu. Furthermore, you can modify this setting later by navigating to the **Configurations** tab and choosing a different mount point. +After installation, you can access the LocalStack Extension for Docker Desktop from the **Extensions** tab. +Upon the initial launch of the extension, a prompt to select a mount point for the LocalStack container will appear. +Select your username from the drop-down menu. +Furthermore, you can modify this setting later by navigating to the **Configurations** tab and choosing a different mount point. - +Select the mount point upon the launch of LocalStack's Docker extension ## Features -LocalStack's Docker Extension helps users to manage their LocalStack container with a simple and intuitive user interface through Docker Desktop. The extension includes container management, configuration profile management, service status, and container logs! +LocalStack's Docker Extension helps users to manage their LocalStack container with a simple and intuitive user interface through Docker Desktop. +The extension includes container management, configuration profile management, service status, and container logs! ### Container management -You can start, stop, and restart LocalStack from the Docker Desktop. You can also see the current status of your LocalStack container and navigate to LocalStack Web Application. +You can start, stop, and restart LocalStack from the Docker Desktop. +You can also see the current status of your LocalStack container and navigate to LocalStack Web Application. - +Start and Stop your LocalStack container with a single click of a button with LocalStack's extension ### Container logs You can see the log information of the LocalStack container and all the available services and their status on the service page. - +Check the logs of your running LocalStack container through LocalStack's Docker extension ### Configuration management You can manage and use your profiles via configurations and create new configurations for your LocalStack container. - +Create your configuration profiles within LocalStack's Extension to affect the state of LocalStack ## Configure an Auth Token -To configure an Auth Token for the LocalStack Docker Extension, you need to create a new configuration profile. Navigate to the **Configurations** tab and click the **New +** button. Enter the configuration name and add the `LOCALSTACK_AUTH_TOKEN` environment variable with the desired value. +To configure an Auth Token for the LocalStack Docker Extension, you need to create a new configuration profile. +Navigate to the **Configurations** tab and click the **New +** button. +Enter the configuration name and add the `LOCALSTACK_AUTH_TOKEN` environment variable with the desired value. To start the LocalStack Pro container with the Auth Token, select the configuration profile from the drop-down menu and click the **Start** button. diff --git a/content/en/user-guide/tools/localsurf/index.md b/content/en/user-guide/tools/localsurf/index.md index 8b6f42d6b6..d18a6b23d6 100644 --- a/content/en/user-guide/tools/localsurf/index.md +++ b/content/en/user-guide/tools/localsurf/index.md @@ -7,13 +7,17 @@ description: > ## Introduction -LocalSurf is a Chrome browser plugin to repoint AWS service calls to [LocalStack](https://localstack.cloud/). While developing and testing AWS cloud Web applications locally with LocalStack, we need to make the browser connect to the local endpoint (`http://localhost:4566`) instead of the AWS production servers (`*.amazonaws.com`). LocalSurf enables you to use the production code without changes, and have the browser make requests to LocalStack instead of AWS directly by explicitly setting the [`endpoint` attribute](https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/specifying-endpoints.html) in the [AWS JavaScript SDK](https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Endpoint.html). +LocalSurf is a Chrome browser plugin to repoint AWS service calls to [LocalStack](https://localstack.cloud/). +While developing and testing AWS cloud Web applications locally with LocalStack, we need to make the browser connect to the local endpoint (`http://localhost:4566`) instead of the AWS production servers (`*.amazonaws.com`). + LocalSurf enables you to use the production code without changes, and have the browser make requests to LocalStack instead of AWS directly by explicitly setting the [`endpoint` attribute](https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/specifying-endpoints.html) in the [AWS JavaScript SDK](https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Endpoint.html). -> This plugin is experimental and still under active development. Please report any issues or feature requests on our [GitHub repository](https://github.com/localstack/local-surf). +> This plugin is experimental and still under active development. +> Please report any issues or feature requests on our [GitHub repository](https://github.com/localstack/local-surf). ## Installation -This extension is not yet available in the Chrome Web Store, but can be installed directly from source. Clone the repository on your local machine to get started: +This extension is not yet available in the Chrome Web Store, but can be installed directly from source. +Clone the repository on your local machine to get started: {{< command >}} $ git clone git@github.com:localstack/local-surf.git @@ -21,7 +25,8 @@ $ git clone git@github.com:localstack/local-surf.git Head over to `chrome://extensions/` in Chrome, then select `"Load unpacked"` and point to the directory where the source code files are stored on the disk. -Once installed, a new icon should appear in the Chrome extensions bar. When clicking on the icon, the plugin can be enabled/disabled by toggling the **Enable local mode** checkbox. +Once installed, a new icon should appear in the Chrome extensions bar. +When clicking on the icon, the plugin can be enabled/disabled by toggling the **Enable local mode** checkbox.

{{< img src="localsurf-extension.png" class="img-fluid shadow rounded" >}} @@ -29,7 +34,8 @@ Once installed, a new icon should appear in the Chrome extensions bar. When clic ## Usage -To illustrate how the plugin works, we use the AWS [Serverlesspresso](https://github.com/aws-samples/serverless-coffee-workshop) sample application. This app consists of various backend components (e.g., DynamoDB tables, Lambda functions, Cognito user pools, etc), as well as a hosted Web app user interface (UI) that can be used to interact with the backend components. +To illustrate how the plugin works, we use the AWS [Serverlesspresso](https://github.com/aws-samples/serverless-coffee-workshop) sample application. +This app consists of various backend components (e.g., DynamoDB tables, Lambda functions, Cognito user pools, etc), as well as a hosted Web app user interface (UI) that can be used to interact with the backend components. We can deploy the backend components to LocalStack directly, using the `samlocal` command line interface (CLI): @@ -68,4 +74,5 @@ This sample demonstrates how we can take an existing Web application, without an ## Note -Use this extension at your own risk - it is provided on an as-is basis, **without** warranties or conditions of **any** kind. In particular, it is your obligation to ensure that the use of this extension is compliant with the user license agreement and the terms & conditions of Amazon Web Services (AWS) and their services. +Use this extension at your own risk - it is provided on an as-is basis, **without** warranties or conditions of **any** kind. +In particular, it is your obligation to ensure that the use of this extension is compliant with the user license agreement and the terms & conditions of Amazon Web Services (AWS) and their services. diff --git a/content/en/user-guide/tools/testing-utils/index.md b/content/en/user-guide/tools/testing-utils/index.md index e2333e8dc7..cf536c7b09 100644 --- a/content/en/user-guide/tools/testing-utils/index.md +++ b/content/en/user-guide/tools/testing-utils/index.md @@ -10,7 +10,8 @@ aliases: ## Introduction -LocalStack provides a set of tools to simplify application testing on LocalStack. These tools are available for Python and JVM (Java and Kotlin) and can be used to integrate with various unit testing frameworks and simplify the setup of AWS clients with LocalStack. +LocalStack provides a set of tools to simplify application testing on LocalStack. +These tools are available for Python and JVM (Java and Kotlin) and can be used to integrate with various unit testing frameworks and simplify the setup of AWS clients with LocalStack. ## Python diff --git a/content/en/user-guide/tools/transparent-endpoint-injection/index.md b/content/en/user-guide/tools/transparent-endpoint-injection/index.md index 2ec72f87d9..dafb13dc74 100644 --- a/content/en/user-guide/tools/transparent-endpoint-injection/index.md +++ b/content/en/user-guide/tools/transparent-endpoint-injection/index.md @@ -22,7 +22,7 @@ For example, the AWS SDK client for Python called boto3 needs to be configured u client = boto3.client("lambda", endpoint_url=os.environ['AWS_ENDPOINT_URL']) ``` -For [supported AWS SDKs](https://docs.aws.amazon.com/sdkref/latest/guide/feature-ss-endpoints.html#ss-endpoints-sdk-compat) +For [supported AWS SDKs](https://docs.aws.amazon.com/sdkref/latest/guide/feature-ss-endpoints.html#ss-endpoints-sdk-compat) (including boto3 since [1.28.0](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst#L892)), this configuration happens automatically without any custom code changes. @@ -52,7 +52,6 @@ Refer to the [DNS server configuration]({{< ref "dns-server#configuration" >}}) Use this configuration with caution because we generally do not recommend connecting to real AWS from within LocalStack. {{< /callout >}} - ## Self-signed certificates In LocalStack Pro and Lambda, Transparent Endpoint Injection automatically disables SSL certificate validation of the AWS SDK for the @@ -85,7 +84,9 @@ For Node.js, you can set this environment variable in your application, to allow process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0" ``` -If you are using the Java AWS SDK v2 in Lambda, LocalStack will per default use bytecode instrumentation to disable certificate validation, so the endpoint injection can work. You can opt out of this behavior by setting `LAMBDA_DISABLE_JAVA_SDK_V2_CERTIFICATE_VALIDATION=0`. Opting out will lead to certificate errors when using the AWS SDK without manually overriding the endpoint url to point to LocalStack. +If you are using the Java AWS SDK v2 in Lambda, LocalStack will per default use bytecode instrumentation to disable certificate validation, so the endpoint injection can work. +You can opt out of this behavior by setting `LAMBDA_DISABLE_JAVA_SDK_V2_CERTIFICATE_VALIDATION=0`. +Opting out will lead to certificate errors when using the AWS SDK without manually overriding the endpoint url to point to LocalStack. {{< callout "warning" >}} Disabling SSL validation may have undesired side effects and security implications. diff --git a/content/en/user-guide/web-application/accounts/index.md b/content/en/user-guide/web-application/accounts/index.md index 87d9c6ce27..525f17e6ca 100644 --- a/content/en/user-guide/web-application/accounts/index.md +++ b/content/en/user-guide/web-application/accounts/index.md @@ -8,6 +8,7 @@ description: > A LocalStack account is required to access features in the web app, and to access any of our paid offerings. ## Creating an Account + Visit [**app.localstack.cloud/sign-up**](https://app.localstack.cloud/sign-up) to create a user account for LocalStack. You can sign up with your email address or one of our supported social identity providers (such as GitHub). @@ -15,8 +16,8 @@ The Terms and Conditions can be found at - ## Updating Account Info and Settings + To update your account info, password and settings select the corresponding entry from the sidebar under the 'Account' menu entry. A screenshot of the 'Account Information' page with the 'Account' section highlighted in the navigation sidebar on the left. diff --git a/content/en/user-guide/web-application/ci-analytics/index.md b/content/en/user-guide/web-application/ci-analytics/index.md index ff90040e3b..9bb5cb1bc2 100644 --- a/content/en/user-guide/web-application/ci-analytics/index.md +++ b/content/en/user-guide/web-application/ci-analytics/index.md @@ -10,7 +10,9 @@ aliases: ## Introduction -CI Analytics is a feature of LocalStack Web Application that allows users to get insights into their CI builds. CI Analytics unifies additional features, such as Cloud Pods & Stack Insights, and augments them with an integrated view of CI builds to provide users with insights and facilitate debugging. The features include: +CI Analytics is a feature of LocalStack Web Application that allows users to get insights into their CI builds. +CI Analytics unifies additional features, such as Cloud Pods & Stack Insights, and augments them with an integrated view of CI builds to provide users with insights and facilitate debugging. +The features include: - **CI Project Runs**: Offers a unified view of all CI builds for a particular project. - **Log Output**: Enables viewing of detailed logs for individual CI builds. @@ -27,50 +29,59 @@ If you'd like to try it out, please [contact us](https://www.localstack.cloud/de ## Key Concepts -- **CI Project**: It represents an application executing builds and tests in a Continuous Integration (CI) pipeline. This corresponds to a repository or CI workflow on platforms like GitHub or GitLab. -- **CI Run**: It refers to a single CI build within a CI project. It typically corresponds to a single instance of a LocalStack container. -- **CI Logs**: These are the log outputs from the LocalStack container during a CI run. -- **CI Cloud Pod**: A Cloud Pod that records the state of a project at the end of a CI run, or at specific checkpoints during the run. -- **CI Stack Insights**: This includes the Stack Insights session and associated data for a particular CI run. +- **CI Project**: It represents an application executing builds and tests in a Continuous Integration (CI) pipeline. + This corresponds to a repository or CI workflow on platforms like GitHub or GitLab. +- **CI Run**: It refers to a single CI build within a CI project. + It typically corresponds to a single instance of a LocalStack container. +- **CI Logs**: These are the log outputs from the LocalStack container during a CI run. +- **CI Cloud Pod**: A Cloud Pod that records the state of a project at the end of a CI run, or at specific checkpoints during the run. +- **CI Stack Insights**: This includes the Stack Insights session and associated data for a particular CI run. ## Getting started -This guide is designed for users new to CI Analytics and assumes basic knowledge of GitHub Actions and YAML. Though this guide uses GitHub Actions as an example, the steps are similar for other CI/CD tools. In this example, we will configure CI Analytics with an existing application that uses GitHub Actions. +This guide is designed for users new to CI Analytics and assumes basic knowledge of GitHub Actions and YAML. +Though this guide uses GitHub Actions as an example, the steps are similar for other CI/CD tools. +In this example, we will configure CI Analytics with an existing application that uses GitHub Actions. ### Prerequisites - A [LocalStack Account](https://app.localstack.cloud/) and a [LocalStack API key](https://docs.localstack.cloud/getting-started/api-key/). - A [GitHub Account](https://github.com/). -For practical testing, you can use an existing application that employs LocalStack for cloud infrastructure deployment on GitHub Actions. Alternatively, you can start by forking one of the [Developer Hub samples](https://docs.localstack.cloud/applications/). +For practical testing, you can use an existing application that employs LocalStack for cloud infrastructure deployment on GitHub Actions. +Alternatively, you can start by forking one of the [Developer Hub samples](https://docs.localstack.cloud/applications/). ### Create a CI project To create a new CI project: -1. Go to the [**CI Projects**](https://app.localstack.cloud/ci) section in the LocalStack Web Application. -2. Click on **Create New Project**. +1. Go to the [**CI Projects**](https://app.localstack.cloud/ci) section in the LocalStack Web Application. +2. Click on **Create New Project**. Create a new CI project

When creating a new CI project, fill in the following information: -- **Project Name**: Enter a name for your CI project. -- **Settings**: Adjust the default-enabled options as needed: - - **Store LocalStack Logs**: Check this to save logs from your LocalStack container. - - **Store LocalStack Traces**: Check this to save traces of your infrastructure tests. - - **Track the State of Your CI Run in a Cloud Pod**: Check this to monitor the state of your LocalStack resources during the CI run. -- **Cloud Pod Services**: Specify the AWS services for which you want to store infrastructure states. Select from the available options. -- **Historical CI Runs**: Set the number of past CI runs to retain (default is 100). +- **Project Name**: Enter a name for your CI project. +- **Settings**: Adjust the default-enabled options as needed: + - **Store LocalStack Logs**: Check this to save logs from your LocalStack container. + - **Store LocalStack Traces**: Check this to save traces of your infrastructure tests. + - **Track the State of Your CI Run in a Cloud Pod**: Check this to monitor the state of your LocalStack resources during the CI run. +- **Cloud Pod Services**: Specify the AWS services for which you want to store infrastructure states. + Select from the available options. +- **Historical CI Runs**: Set the number of past CI runs to retain (default is 100). -The **Cloud Pod Name** field will auto-fill with the name of your Cloud Pod, which identifies the resources created by LocalStack for your CI project. Click **Create** to finalize your CI project. +The **Cloud Pod Name** field will auto-fill with the name of your Cloud Pod, which identifies the resources created by LocalStack for your CI project. +Click **Create** to finalize your CI project. ### Configure the CI pipeline -Go to the GitHub Action workflow where you intend to monitor CI analytics for your application stack. If you haven't already set up a CI pipeline using LocalStack for infrastructure deployments and tests, follow the instructions in our [GitHub Actions documentation](https://docs.localstack.cloud/user-guide/ci/github-actions/). +Go to the GitHub Action workflow where you intend to monitor CI analytics for your application stack. +If you haven't already set up a CI pipeline using LocalStack for infrastructure deployments and tests, follow the instructions in our [GitHub Actions documentation](https://docs.localstack.cloud/user-guide/ci/github-actions/). -To link your CI pipeline to the project you created, use the `LS_CI_PROJECT` configuration variable. For instance, if your CI project is named `ls-platform-integration-tests`, include the line `LS_CI_PROJECT: ls-platform-integration-tests` in your GitHub Action workflow. +To link your CI pipeline to the project you created, use the `LS_CI_PROJECT` configuration variable. +For instance, if your CI project is named `ls-platform-integration-tests`, include the line `LS_CI_PROJECT: ls-platform-integration-tests` in your GitHub Action workflow. Here's an example of a basic workflow configuration: @@ -100,21 +111,29 @@ jobs: ### Configure the API key -The LocalStack CLI utilizes the `LOCALSTACK_API_KEY` for authentication with the LocalStack Platform. This process enables the storage of logs and request/response traces from your CI run in the LocalStack Web Application. For setting up an API key, refer our [documentation on configuring an API key](https://docs.localstack.cloud/user-guide/ci/github-actions/#configuring-an-api-key). +The LocalStack CLI utilizes the `LOCALSTACK_API_KEY` for authentication with the LocalStack Platform. +This process enables the storage of logs and request/response traces from your CI run in the LocalStack Web Application. +For setting up an API key, refer our [documentation on configuring an API key](https://docs.localstack.cloud/user-guide/ci/github-actions/#configuring-an-api-key). ### Review the Analytics -You can now trigger the CI pipeline to run your application stack in a CI environment. Navigate to the [**CI Projects**](https://app.localstack.cloud/ci) dashboard on the LocalStack Web Application. Here, you'll find the CI project you created earlier. Click on the project to access its CI runs. +You can now trigger the CI pipeline to run your application stack in a CI environment. +Navigate to the [**CI Projects**](https://app.localstack.cloud/ci) dashboard on the LocalStack Web Application. +Here, you'll find the CI project you created earlier. +Click on the project to access its CI runs. CI Project Runs

-In the project's detail view, you can select a specific **Run ID** to examine its details. The CI run details page will display various information, including logs and other relevant data. +In the project's detail view, you can select a specific **Run ID** to examine its details. +The CI run details page will display various information, including logs and other relevant data. Logs being displayed on the CI Project run

-For in-depth analysis, you have the option to view **Request/Response Traces** for the CI run. These traces provide detailed insights into each request and response during the run. You can expand these traces to explore specific details and scroll down to review the traces for each test case. +For in-depth analysis, you have the option to view **Request/Response Traces** for the CI run. +These traces provide detailed insights into each request and response during the run. +You can expand these traces to explore specific details and scroll down to review the traces for each test case. Request/Response traces from the CI Project run

@@ -124,4 +143,5 @@ You can additionally scroll down to review the traces for each test case. Request/Response traces from the application tests

-Lastly, you can navigate to the top of the page to explore the **Cloud Pod** and **Stack Insights** related to the CI run. These features offer a broader view of the CI run's impact and performance. +Lastly, you can navigate to the top of the page to explore the **Cloud Pod** and **Stack Insights** related to the CI run. +These features offer a broader view of the CI run's impact and performance. diff --git a/content/en/user-guide/web-application/ci-keys/index.md b/content/en/user-guide/web-application/ci-keys/index.md index 7c9d4b0f71..cf69759292 100644 --- a/content/en/user-guide/web-application/ci-keys/index.md +++ b/content/en/user-guide/web-application/ci-keys/index.md @@ -10,18 +10,26 @@ aliases: LocalStack requires a **CI Key** for use in Continuous Integration (CI) or similar machine environments. Each instance startup in a CI or comparable environment consumes one CI token. -CI Keys are administered on the [CI Keys page](https://app.localstack.cloud/workspace/ci-keys) of the LocalStack Web Application. These keys are linked to specific CI pipelines or projects, rather than individual developers. +CI Keys are administered on the [CI Keys page](https://app.localstack.cloud/workspace/ci-keys) of the LocalStack Web Application. +These keys are linked to specific CI pipelines or projects, rather than individual developers. A screenshot of the LocalStack web app. The screenshot shows the page to manage CI keys -CI Keys are not meant for individual developers. To grant a developer access to LocalStack and its advanced features, assign a license to them on the [Users & Licenses page](https://app.localstack.cloud/workspace/members). +CI Keys are not meant for individual developers. +To grant a developer access to LocalStack and its advanced features, assign a license to them on the [Users & Licenses page](https://app.localstack.cloud/workspace/members). {{< callout >}} -We have recently introduced Auth Tokens to replace _developer_ API keys. However, this change does not affect **CI Keys**, which remain the sole method for activating a LocalStack instance in Continuous Integration (CI) or other automated test environments. +We have recently introduced Auth Tokens to replace _developer_ API keys. +However, this change does not affect **CI Keys**, which remain the sole method for activating a LocalStack instance in Continuous Integration (CI) or other automated test environments. {{< /callout >}} ## Managing CI keys -To create a new CI key, input a meaningful name in the provided field and click the 'Generate CI Key' button. For better management, it's advisable to use a distinct CI key for each project or CI pipeline. You can manage existing CI keys by renaming, rotating, or deleting them through the options available in the list. +To create a new CI key, input a meaningful name in the provided field and click the 'Generate CI Key' button. +For better management, it's advisable to use a distinct CI key for each project or CI pipeline. +You can manage existing CI keys by renaming, rotating, or deleting them through the options available in the list. -The top section of the CI page displays the usage of CI tokens for the current period. Each period lasts one month, and the token count resets at the beginning of a new period. The dates for the current period are indicated near the usage graph. While there's no limit to the number of CI keys a workspace can have, all the keys contribute to the same quota. +The top section of the CI page displays the usage of CI tokens for the current period. +Each period lasts one month, and the token count resets at the beginning of a new period. +The dates for the current period are indicated near the usage graph. +While there's no limit to the number of CI keys a workspace can have, all the keys contribute to the same quota. diff --git a/content/en/user-guide/web-application/instance-management/index.md b/content/en/user-guide/web-application/instance-management/index.md index 9a89aeecf6..f3382d9b41 100644 --- a/content/en/user-guide/web-application/instance-management/index.md +++ b/content/en/user-guide/web-application/instance-management/index.md @@ -8,24 +8,26 @@ aliases: ## Introduction -LocalStack Instance Management lets you view and manage your LocalStack instances while you build and test your cloud applications locally. You can access this feature through the [**LocalStack Instances**](https://app.localstack.cloud/instances) section in the sidebar of the LocalStack Web Application. +LocalStack Instance Management lets you view and manage your LocalStack instances while you build and test your cloud applications locally. +You can access this feature through the [**LocalStack Instances**](https://app.localstack.cloud/instances) section in the sidebar of the LocalStack Web Application. Instance Management offers these features: -- **Status**: Shows the status of the services running in the LocalStack container. -- **Resource Browser**: Lets you view and manage your local AWS resources. -- **State**: Allows you to export and import the state of your LocalStack instances. -- **Extensions**: Provides extra integrations to improve your LocalStack experience. +- **Status**: Shows the status of the services running in the LocalStack container. +- **Resource Browser**: Lets you view and manage your local AWS resources. +- **State**: Allows you to export and import the state of your LocalStack instances. +- **Extensions**: Provides extra integrations to improve your LocalStack experience. LocalStack Web Application's Instance Management page ## Instance Bookmark -Instance Bookmark lets users save references to instances without directly creating or managing them. To create an Instance Bookmark, do the following: +Instance Bookmark lets users save references to instances without directly creating or managing them. +To create an Instance Bookmark, do the following: -- Click on the **Add Bookmark** button on the Instance Management page. -- Enter a name for the bookmark, specify the endpoint, and add a description. -- Click on the **Save Bookmark** button. +- Click on the **Add Bookmark** button on the Instance Management page. +- Enter a name for the bookmark, specify the endpoint, and add a description. +- Click on the **Save Bookmark** button. Instance Bookmark @@ -33,12 +35,17 @@ Instance Bookmark lets users save references to instances without directly creat You can use the Instance Bookmark feature to connect the LocalStack Web Application to a LocalStack instance running on a different machine. -To connect the Web Application with your running LocalStack instance, you need to ensure the endpoint URL’s server SSL certificate corresponds to the hostname/IP address of the URL. This is necessary when the endpoint URL is set as something like `https://myhost:4566` or uses an IP address like `https://1.2.3.4:4566`. Sites with an `https://...` URL must use HTTPS for requests, and the SSL certificate must match the hostname (e.g., localhost.localstack.cloud). +To connect the Web Application with your running LocalStack instance, you need to ensure the endpoint URL’s server SSL certificate corresponds to the hostname/IP address of the URL. +This is necessary when the endpoint URL is set as something like `https://myhost:4566` or uses an IP address like `https://1.2.3.4:4566`. +Sites with an `https://...` URL must use HTTPS for requests, and the SSL certificate must match the hostname (e.g., localhost.localstack.cloud). -To address this, consider setting up a local TCP proxy server that listens on `127.0.0.1:4566` and forwards all requests to the endpoint where your LocalStack instance runs. In the Web user interface, you can keep the default setting, `https://localhost.localstack.cloud:4566`. Tools like [simpleproxy](https://manpages.ubuntu.com/manpages/trusty/man1/simpleproxy.1.html) or [proxy.py](https://github.com/abhinavsingh/proxy.py) can help set this up. +To address this, consider setting up a local TCP proxy server that listens on `127.0.0.1:4566` and forwards all requests to the endpoint where your LocalStack instance runs. +In the Web user interface, you can keep the default setting, `https://localhost.localstack.cloud:4566`. +Tools like [simpleproxy](https://manpages.ubuntu.com/manpages/trusty/man1/simpleproxy.1.html) or [proxy.py](https://github.com/abhinavsingh/proxy.py) can help set this up. Alternatively, you can direct `localhost.localstack.cloud` to your target machine's IP address by modifying the `/etc/hosts` file, which is useful if you’re using the LocalStack Web UI on a macOS or Linux-based machine. {{< callout >}} -To bind to a custom IP address and port, configure the ['GATEWAY_LISTEN' configuration variable](https://docs.localstack.cloud/references/configuration/#core). For troubleshooting, refer to the [network troubleshooting guide](https://docs.localstack.cloud/references/network-troubleshooting/). +To bind to a custom IP address and port, configure the ['GATEWAY_LISTEN' configuration variable](https://docs.localstack.cloud/references/configuration/#core). +For troubleshooting, refer to the [network troubleshooting guide](https://docs.localstack.cloud/references/network-troubleshooting/). {{< /callout >}} diff --git a/content/en/user-guide/web-application/resource-browser/index.md b/content/en/user-guide/web-application/resource-browser/index.md index 70ff40e898..098f2b0326 100644 --- a/content/en/user-guide/web-application/resource-browser/index.md +++ b/content/en/user-guide/web-application/resource-browser/index.md @@ -7,70 +7,76 @@ description: > ## Introduction -The LocalStack Resource Browser allow you to view, manage, and deploy AWS resources locally while building & testing their cloud applications locally. It provides an internal, integrated experience, similar to the AWS Management Console, to manage the ephemeral resources in a LocalStack container on your local machine. +The LocalStack Resource Browser allow you to view, manage, and deploy AWS resources locally while building & testing their cloud applications locally. +It provides an internal, integrated experience, similar to the AWS Management Console, to manage the ephemeral resources in a LocalStack container on your local machine. LocalStack Web Application's Resource Browsers outlining various local AWS services -The Resource Browser provide an experience similar to the AWS Management Console. However, the Resource Browser is not a replacement for the AWS Management Console and only replicate some of the features of the AWS Management Console. We recommend using our [integrations](https://docs.localstack.cloud/user-guide/integrations/) to create your resources, with the Resource Browser being used for quick viewing and management of your resources. +The Resource Browser provide an experience similar to the AWS Management Console. +However, the Resource Browser is not a replacement for the AWS Management Console and only replicate some of the features of the AWS Management Console. +We recommend using our [integrations](https://docs.localstack.cloud/user-guide/integrations/) to create your resources, with the Resource Browser being used for quick viewing and management of your resources. -The LocalStack Web Application connects to your LocalStack container and retrieves the information about your local resources directly via `localhost` without using the internet. None of the information is sent to the internet, or stored on any external servers maintained by LocalStack. +The LocalStack Web Application connects to your LocalStack container and retrieves the information about your local resources directly via `localhost` without using the internet. +None of the information is sent to the internet, or stored on any external servers maintained by LocalStack. {{< callout "tip" >}} -An AWS region dropdown menu in the dashboard is located on the top right of the page. You can select your desired region to ensure that you can view your resources. If you cannot view resources that you have recently created, you should verify that you are checking the resources in the correct region. +An AWS region dropdown menu in the dashboard is located on the top right of the page. +You can select your desired region to ensure that you can view your resources. +If you cannot view resources that you have recently created, you should verify that you are checking the resources in the correct region. {{< /callout >}} ## Supported services The Resource Browser supports the following AWS services: -| Resource Group | Service | +| Resource Group | Service | |------------------------------|-------------------------------------------------------------------------------------------------------| -| **App Integration** | [API Gateway](https://app.localstack.cloud/inst/default/resources/apigateway) | -| | [Amazon MQ](https://app.localstack.cloud/inst/default/resources/mq/brokers) | -| | [Amazon MWAA](https://app.localstack.cloud/inst/default/resources/mwaa/environments) | -| | [Amazon SNS](https://app.localstack.cloud/inst/default/resources/sns) | -| | [Amazon SQS](https://app.localstack.cloud/inst/default/resources/sqs) | -| | [Application Auto Scaling](https://app.localstack.cloud/inst/default/resources/application-autoscaling) | -| | [AWS Step Functions](https://app.localstack.cloud/inst/default/resources/stepfunctions) | -| **Compute** | [Amazon EC2](https://app.localstack.cloud/inst/default/resources/ec2) | -| | [Amazon ECS](https://app.localstack.cloud/inst/default/resources/ecs) | -| | [Amazon ECR](https://app.localstack.cloud/inst/default/resources/ecr/repositories) | -| | [Amazon EKS](https://app.localstack.cloud/inst/default/resources/eks/clusters) | -| | [AWS Lambda](https://app.localstack.cloud/inst/default/resources/lambda/functions) | +| **App Integration** | [API Gateway](https://app.localstack.cloud/inst/default/resources/apigateway) | +| | [Amazon MQ](https://app.localstack.cloud/inst/default/resources/mq/brokers) | +| | [Amazon MWAA](https://app.localstack.cloud/inst/default/resources/mwaa/environments) | +| | [Amazon SNS](https://app.localstack.cloud/inst/default/resources/sns) | +| | [Amazon SQS](https://app.localstack.cloud/inst/default/resources/sqs) | +| | [Application Auto Scaling](https://app.localstack.cloud/inst/default/resources/application-autoscaling) | +| | [AWS Step Functions](https://app.localstack.cloud/inst/default/resources/stepfunctions) | +| **Compute** | [Amazon EC2](https://app.localstack.cloud/inst/default/resources/ec2) | +| | [Amazon ECS](https://app.localstack.cloud/inst/default/resources/ecs) | +| | [Amazon ECR](https://app.localstack.cloud/inst/default/resources/ecr/repositories) | +| | [Amazon EKS](https://app.localstack.cloud/inst/default/resources/eks/clusters) | +| | [AWS Lambda](https://app.localstack.cloud/inst/default/resources/lambda/functions) | | **Management/Governance** | [AWS Account](https://app.localstack.cloud/inst/default/resources/account/contactinfo) | -| | [AWS CloudFormation](https://app.localstack.cloud/inst/default/resources/cloudformation) | -| | [Amazon CloudWatch](https://app.localstack.cloud/inst/default/resources/cloudwatch) | -| | [Amazon CloudTrail](https://app.localstack.cloud/inst/default/resources/cloudtrail/events) | -| | [Amazon EventBridge (CloudWatch Events)](https://app.localstack.cloud/inst/default/resources/events) | -| | [AWS Systems Manager (SSM)](https://app.localstack.cloud/inst/default/resources/ssm) | -| **Business Applications** | [Amazon SES](https://app.localstack.cloud/inst/default/resources/ses) | -| **Developer Tools** | [AWS AppConfig](https://app.localstack.cloud/inst/default/resources/appconfig/applications) | -| | [AWS CodeCommit](https://app.localstack.cloud/inst/default/resources/codecommit/repositories) | -| **Front-end Web & Mobile** | [AWS Amplify](https://app.localstack.cloud/inst/default/resources/amplify/apps) | -| | [AWS AppSync](https://app.localstack.cloud/inst/default/resources/appsync) | -| **Security Identity Compliance** | [AWS ACM (Certificate Manager)](https://app.localstack.cloud/inst/default/resources/acm/certificates) | -| | [Amazon Cognito Identity](https://app.localstack.cloud/inst/default/resources/cognito-idp) | -| | [AWS IAM (Identity and Access Management)](https://app.localstack.cloud/inst/default/resources/iam) | -| | [AWS Key Management Service (KMS)](https://app.localstack.cloud/inst/default/resources/kms) | -| | [AWS Secrets Manager](https://app.localstack.cloud/inst/default/resources/secretsmanager) | -| **Storage** | [Amazon S3](https://app.localstack.cloud/inst/default/resources/s3) | +| | [AWS CloudFormation](https://app.localstack.cloud/inst/default/resources/cloudformation) | +| | [Amazon CloudWatch](https://app.localstack.cloud/inst/default/resources/cloudwatch) | +| | [Amazon CloudTrail](https://app.localstack.cloud/inst/default/resources/cloudtrail/events) | +| | [Amazon EventBridge (CloudWatch Events)](https://app.localstack.cloud/inst/default/resources/events) | +| | [AWS Systems Manager (SSM)](https://app.localstack.cloud/inst/default/resources/ssm) | +| **Business Applications** | [Amazon SES](https://app.localstack.cloud/inst/default/resources/ses) | +| **Developer Tools** | [AWS AppConfig](https://app.localstack.cloud/inst/default/resources/appconfig/applications) | +| | [AWS CodeCommit](https://app.localstack.cloud/inst/default/resources/codecommit/repositories) | +| **Front-end Web & Mobile** | [AWS Amplify](https://app.localstack.cloud/inst/default/resources/amplify/apps) | +| | [AWS AppSync](https://app.localstack.cloud/inst/default/resources/appsync) | +| **Security Identity Compliance** | [AWS ACM (Certificate Manager)](https://app.localstack.cloud/inst/default/resources/acm/certificates) | +| | [Amazon Cognito Identity](https://app.localstack.cloud/inst/default/resources/cognito-idp) | +| | [AWS IAM (Identity and Access Management)](https://app.localstack.cloud/inst/default/resources/iam) | +| | [AWS Key Management Service (KMS)](https://app.localstack.cloud/inst/default/resources/kms) | +| | [AWS Secrets Manager](https://app.localstack.cloud/inst/default/resources/secretsmanager) | +| **Storage** | [Amazon S3](https://app.localstack.cloud/inst/default/resources/s3) | | | [AWS Backup](https://app.localstack.cloud/inst/default/resources/backup/plans) | -| **Machine Learning** | [Amazon SageMaker](https://app.localstack.cloud/inst/default/resources/sagemaker/models) | -| | [Amazon Transcribe](https://app.localstack.cloud/inst/default/resources/transcribe/transcriptionjobs) | -| **Database** | [Amazon DynamoDB](https://app.localstack.cloud/inst/default/resources/dynamodb) | -| | [Amazon RDS](https://app.localstack.cloud/inst/default/resources/rds) | -| | [Amazon ElastiCache](https://app.localstack.cloud/inst/default/resources/elasticache) | -| | [Amazon QLDB](https://app.localstack.cloud/inst/default/resources/qldb/ledgers) | -| | [Amazon DocumentDB](https://app.localstack.cloud/inst/default/resources/docdb/clusters) | +| **Machine Learning** | [Amazon SageMaker](https://app.localstack.cloud/inst/default/resources/sagemaker/models) | +| | [Amazon Transcribe](https://app.localstack.cloud/inst/default/resources/transcribe/transcriptionjobs) | +| **Database** | [Amazon DynamoDB](https://app.localstack.cloud/inst/default/resources/dynamodb) | +| | [Amazon RDS](https://app.localstack.cloud/inst/default/resources/rds) | +| | [Amazon ElastiCache](https://app.localstack.cloud/inst/default/resources/elasticache) | +| | [Amazon QLDB](https://app.localstack.cloud/inst/default/resources/qldb/ledgers) | +| | [Amazon DocumentDB](https://app.localstack.cloud/inst/default/resources/docdb/clusters) | | | [Amazon Neptune](https://app.localstack.cloud/inst/default/resources/neptune/clusters) | | | [Amazon Timestream](https://app.localstack.cloud/inst/default/resources/timestream-write) | -| **Analytics** | [Amazon Athena](https://app.localstack.cloud/inst/default/resources/athena/databases) | -| | [Amazon Kinesis](https://app.localstack.cloud/inst/default/resources/kinesis) | +| **Analytics** | [Amazon Athena](https://app.localstack.cloud/inst/default/resources/athena/databases) | +| | [Amazon Kinesis](https://app.localstack.cloud/inst/default/resources/kinesis) | | | [Amazon MSK (Managed Streaming for Kafka)](https://app.localstack.cloud/inst/default/resources/kafka) | -| | [AWS Glue](https://app.localstack.cloud/inst/default/resources/glue) | -| | [Amazon Route 53](https://app.localstack.cloud/inst/default/resources/route53) | -| | [Amazon CloudFront](https://app.localstack.cloud/inst/default/resources/cloudfront/distributions) | -| | [Amazon OpenSearch Service](https://app.localstack.cloud/inst/default/resources/opensearch/domains) | +| | [AWS Glue](https://app.localstack.cloud/inst/default/resources/glue) | +| | [Amazon Route 53](https://app.localstack.cloud/inst/default/resources/route53) | +| | [Amazon CloudFront](https://app.localstack.cloud/inst/default/resources/cloudfront/distributions) | +| | [Amazon OpenSearch Service](https://app.localstack.cloud/inst/default/resources/opensearch/domains) | | **Cloud Financial Management** | [AWS Cost Explorer](https://app.localstack.cloud/inst/default/resources/ce/costcategorydefinitions) | ## Troubleshooting diff --git a/content/en/user-guide/web-application/single-sign-on/_index.md b/content/en/user-guide/web-application/single-sign-on/_index.md index 31c69234cb..81be5d38df 100644 --- a/content/en/user-guide/web-application/single-sign-on/_index.md +++ b/content/en/user-guide/web-application/single-sign-on/_index.md @@ -15,16 +15,16 @@ In your profile settings, navigate to the Single Sign-on tab which will list exi Next, click the button to create a new identity provider (IdP), where you can choose between the two leading industry standards: -- OpenID Connect (OIDC): [openid.net/connect](https://openid.net/connect/) -- SAML: [saml.xml.org/saml-specifications](http://saml.xml.org/saml-specifications) +- OpenID Connect (OIDC): [openid.net/connect](https://openid.net/connect/) +- SAML: [saml.xml.org/saml-specifications](http://saml.xml.org/saml-specifications) ## Configuring SSO using OpenID Connect (OIDC) In the form illustrated below, you can then enter the main information for the new IdP (using OpenID Connect): -- Name of your identity provider -- Client ID, Client Secret, Attributes request method, OIDC issues, Authorize scopes, and more. - - You should be able to find these attributes in your OIDC IdP configuration. +- Name of your identity provider +- Client ID, Client Secret, Attributes request method, OIDC issues, Authorize scopes, and more. + - You should be able to find these attributes in your OIDC IdP configuration. Configuring SSO using OpenID Connect (OIDC) @@ -43,9 +43,9 @@ These attributes can be defined to automatically map attributes of user entities The following user attribute mappings can currently be configured: -- Email -- First name -- Last name +- Email +- First name +- Last name The Email should be configured to ensure correct functionality. @@ -55,17 +55,19 @@ The Email should be configured to ensure correct functionality. After configuring the base details for your Identity Provider (IdP), the following additional information can be copied from the UI: -- **Callback URL**: The Callback URL that you may need to configure in the settings of your IdP. -- **Identifier (Entity Id)**: The Identifier (Entity Id) that you may need to configure in the settings of your IdP. -- **Sign Up Portal URL**: This is the URL that can be shared with your users to start the SSO signup flow for the LocalStack Web Application. The format of this endpoint is `https://app.localstack.cloud/auth/sso//` +- **Callback URL**: The Callback URL that you may need to configure in the settings of your IdP. +- **Identifier (Entity Id)**: The Identifier (Entity Id) that you may need to configure in the settings of your IdP. +- **Sign Up Portal URL**: This is the URL that can be shared with your users to start the SSO signup flow for the LocalStack Web Application. + The format of this endpoint is `https://app.localstack.cloud/auth/sso//` Callback URL, Sign Up Portal URL, and Identifier (Entity Id) ## User Roles and Permissions For each new member that joins your org, you can specify user roles and permissions that should be assigned to them. -- **Default User Role**: The Role that should be assigned to users of your organization signing up via SSO. In most cases, this should be a Member. -- **Default User Permissions**: Use this to define which permissions should be assigned to users of your organization signing up via SSO. - - Tip: In order to enable self-serve licences (i.e., allowing your users to allocate themselves their own license), make sure to select the **Allow member to issue a license for themselves (or a legacy API key)** permission. +- **Default User Role**: The Role that should be assigned to users of your organization signing up via SSO. + In most cases, this should be a Member. +- **Default User Permissions**: Use this to define which permissions should be assigned to users of your organization signing up via SSO. + - Tip: In order to enable self-serve licences (i.e., allowing your users to allocate themselves their own license), make sure to select the **Allow member to issue a license for themselves (or a legacy API key)** permission. User Roles and Permissions diff --git a/content/en/user-guide/web-application/single-sign-on/azure-ad/index.md b/content/en/user-guide/web-application/single-sign-on/azure-ad/index.md index 8c31a004cf..8259402305 100644 --- a/content/en/user-guide/web-application/single-sign-on/azure-ad/index.md +++ b/content/en/user-guide/web-application/single-sign-on/azure-ad/index.md @@ -28,10 +28,10 @@ To configure SSO with an Azure AD Enterprise application, we provide a simple st 5. Navigate to our web application, or follow this
link, and: - * Create a new Identity provider - * Enter a name for you Identity provider, and choose SAML as the provider type. - * Select URL for the Metadata file and paste the link that you copied previously in step 4. - * For the attribute mapping, provide the following value for the Email attribute: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` - (This should match the Claim name of user.userprincipalname in your Attributes & Claims) - * Leave First name attribute and Last name attribute blank. +* Create a new Identity provider +* Enter a name for you Identity provider, and choose SAML as the provider type. +* Select URL for the Metadata file and paste the link that you copied previously in step 4. +* For the attribute mapping, provide the following value for the Email attribute: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` - (This should match the Claim name of user.userprincipalname in your Attributes & Claims) +* Leave First name attribute and Last name attribute blank. 6. Let your team members sign up to your LocalStack Organization via the Sign Up Portal Link. diff --git a/content/en/user-guide/web-application/stack-insights/index.md b/content/en/user-guide/web-application/stack-insights/index.md index a9aa3a9da2..646f6c69de 100644 --- a/content/en/user-guide/web-application/stack-insights/index.md +++ b/content/en/user-guide/web-application/stack-insights/index.md @@ -10,12 +10,14 @@ aliases: ## Introduction -LocalStack collects execution events to provide usage analytics and insights into development and testing. Stack Insights let users report AWS API usage telemetry to their LocalStack account. +LocalStack collects execution events to provide usage analytics and insights into development and testing. +Stack Insights let users report AWS API usage telemetry to their LocalStack account. Stack Insights show which APIs are used, which clients or integrations use specific services and API operations, and which services cause the most API errors. {{< callout "note" >}} -Your privacy matters to us! We only collect anonymized and sanitized data. +Your privacy matters to us! +We only collect anonymized and sanitized data. No sensitive information about your application is ever collected or exposed. The data is only used to provide you with insights into the usage of LocalStack and to help us improve the product. {{< /callout >}} @@ -26,7 +28,8 @@ The data is only used to provide you with insights into the usage of LocalStack

-To start using this feature, log in to your [LocalStack account](https://app.localstack.cloud/) and start a [LocalStack instance on your local machine]({{< ref "getting-started/auth-token" >}}). The system will start making your events accessible on the [Stack Insights dashboard](https://app.localstack.cloud/stacks). +To start using this feature, log in to your [LocalStack account](https://app.localstack.cloud/) and start a [LocalStack instance on your local machine]({{< ref "getting-started/auth-token" >}}). +The system will start making your events accessible on the [Stack Insights dashboard](https://app.localstack.cloud/stacks). Click on the Stack widget to see: @@ -45,6 +48,7 @@ Click on an individual stack for more details, such as: Stack insights are collected only if the session runs for less than 24 hours. View the list of events during the entire Stack lifetime, including: + - Service - Operation - Status code diff --git a/content/en/user-guide/web-application/users-licences/index.md b/content/en/user-guide/web-application/users-licences/index.md index c136acf346..afbd0f6c89 100644 --- a/content/en/user-guide/web-application/users-licences/index.md +++ b/content/en/user-guide/web-application/users-licences/index.md @@ -11,27 +11,33 @@ This page allows to manage users and assign licenses to them. Illustrative screenshot of the LocalStack web app showing the page 'Users & Licenses' ## Managing Users + ### Inviting Users to the Workspace + New and existing LocalStack users can be invited to a workspace in the 'Invite Users' section. To invite a user, provide the user's name and email address. If the invitee does not have a LocalStack account yet, an invitation to create an account will be sent to the provided email address. The user's name is used as placeholder for easier managing and will be replaced by the actual username once the account is created. {{< callout >}} -Administrators can invite users to a workspace, and can freely assign and unassign licenses or legacy API keys to users. LocalStack automatically assigns a license to the user who is making the purchase, which can be reassigned again with no restrictions. +Administrators can invite users to a workspace, and can freely assign and unassign licenses or legacy API keys to users. +LocalStack automatically assigns a license to the user who is making the purchase, which can be reassigned again with no restrictions. {{< /callout >}} ### Removing Users + A user can be removed from a workspace by clicking the user entry in the list to show the user's details. This users detail view also shows the 'Remove User from Workspace' button. Removed users can be shown by enabling the 'Show Removed' toggle on the top right hand corner of the 'Workspace Members' section. Removed users can be reinvited into a workspace with the 'Resend Invitation' button. ### Managing User Permissions + User permissions can be managed by clicking the user in the list. This will expand the users detailed settings where a predefined role or advanced permissions can be set. ## Managing Licenses + A license is required to use advanced features of LocalStack. Licenses are contained in subscriptions and plans. The section 'Licenses' lists the active plan/subscription in the workspace and also shows how many licenses (and legacy API keys) are currently in use. @@ -43,9 +49,8 @@ To unassign a user's license, again, find the user in the list and click the 'x' Changes to licenses take effect immediately and require no further action of the user. - - ## Moving from legacy API Keys to Licenses + In the past, access to LocalStack and advanced features was granted to individual developers by providing them with a (now legacy) API key. With the recent change, now the recommended way is to assign a 'license' to a user instead. @@ -64,6 +69,7 @@ The transition to auth tokens only affects _developer_ API keys. **CI keys** are {{< /callout >}} ### Migrating Users to Auth Tokens and Licenses + To migrate users from legacy API keys, assign a license to them in the 'Workspace Members' list. The list also shows the legacy API key that is currently assigned to them. If a user already has a legacy API key assigned, assigning a license to them will not consume an additional license. @@ -75,6 +81,6 @@ Once the license is assigned to the user, and the user set up their system to us A user can find their personal auth token either in the 'Auth Token' or in the 'Getting Started' section of the web app. ### Sunsetting legacy API keys + In this transition period we continue to support legacy API keys. We will gradually phase them out over the next months, helping customers to smoothly transition over to the new license management. - diff --git a/content/en/user-guide/web-application/workspaces/index.md b/content/en/user-guide/web-application/workspaces/index.md index c89946b561..abdff54da1 100644 --- a/content/en/user-guide/web-application/workspaces/index.md +++ b/content/en/user-guide/web-application/workspaces/index.md @@ -11,4 +11,3 @@ A workspace represents the base organizational unit in the web application. Users can be invited to join a workspace, and an admin can manage their license and permissions inside a workspace. A screenshot of the LocalStack web application. The section labeled 'Workspace' in the sidebar on the left is highlighted and shows sub menu entries like 'Workspace Info', 'Auth Token', Users & Licenses', 'Subscriptions'. - diff --git a/package-lock.json b/package-lock.json index b7b5ec2826..a946fa313c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,6 +12,8 @@ "autoprefixer": "^10.4.0", "dedent": "^1.5.1", "inquirer": "^8.2.6", + "markdownlint": "^0.34.0", + "markdownlint-rule-max-one-sentence-per-line": "^0.0.2", "postcss": "^8.3.7", "postcss-cli": "^9.0.2", "stylelint-config-standard": "^34.0.0" @@ -280,8 +282,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, - "peer": true + "dev": true }, "node_modules/array-union": { "version": "3.0.1", @@ -881,6 +882,18 @@ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "dev": true }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "dev": true, + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, "node_modules/error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", @@ -1660,6 +1673,15 @@ "dev": true, "peer": true }, + "node_modules/linkify-it": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-5.0.0.tgz", + "integrity": "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==", + "dev": true, + "dependencies": { + "uc.micro": "^2.0.0" + } + }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -1780,6 +1802,72 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/markdown-it": { + "version": "14.1.0", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.0.tgz", + "integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1", + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0" + }, + "bin": { + "markdown-it": "bin/markdown-it.mjs" + } + }, + "node_modules/markdownlint": { + "version": "0.34.0", + "resolved": "https://registry.npmjs.org/markdownlint/-/markdownlint-0.34.0.tgz", + "integrity": "sha512-qwGyuyKwjkEMOJ10XN6OTKNOVYvOIi35RNvDLNxTof5s8UmyGHlCdpngRHoRGNvQVGuxO3BJ7uNSgdeX166WXw==", + "dev": true, + "dependencies": { + "markdown-it": "14.1.0", + "markdownlint-micromark": "0.1.9" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/DavidAnson" + } + }, + "node_modules/markdownlint-micromark": { + "version": "0.1.9", + "resolved": "https://registry.npmjs.org/markdownlint-micromark/-/markdownlint-micromark-0.1.9.tgz", + "integrity": "sha512-5hVs/DzAFa8XqYosbEAEg6ok6MF2smDj89ztn9pKkCtdKHVdPQuGMH7frFfYL9mLkvfFe4pTyAMffLbjf3/EyA==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/DavidAnson" + } + }, + "node_modules/markdownlint-rule-helpers": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/markdownlint-rule-helpers/-/markdownlint-rule-helpers-0.18.0.tgz", + "integrity": "sha512-UEdWfsoLr8ylXxfh4fzY5P6lExN+7Un7LbfqDXPlq5VLwwEDFdcZ7EMXoaEKNzncBKG/KWrt2sVt7KiCJgPyMQ==", + "dev": true, + "engines": { + "node": ">=14.18.0" + } + }, + "node_modules/markdownlint-rule-max-one-sentence-per-line": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/markdownlint-rule-max-one-sentence-per-line/-/markdownlint-rule-max-one-sentence-per-line-0.0.2.tgz", + "integrity": "sha512-31Zn2qDT5B3kKu7oMoYVeiXroWIoWYT3Y5vGhezsCwPNJV5ecunx62gquH9TO3ekgBRX0XmozYRS+yAPQNVRWg==", + "dev": true, + "dependencies": { + "markdownlint-rule-helpers": "~0.18.0" + }, + "engines": { + "node": ">=14.18.0" + } + }, "node_modules/mathml-tag-names": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/mathml-tag-names/-/mathml-tag-names-2.1.3.tgz", @@ -1798,6 +1886,12 @@ "dev": true, "peer": true }, + "node_modules/mdurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-2.0.0.tgz", + "integrity": "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==", + "dev": true + }, "node_modules/meow": { "version": "10.1.5", "resolved": "https://registry.npmjs.org/meow/-/meow-10.1.5.tgz", @@ -2370,6 +2464,15 @@ "node": ">=6" } }, + "node_modules/punycode.js": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode.js/-/punycode.js-2.3.1.tgz", + "integrity": "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -3064,6 +3167,12 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/uc.micro": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-2.1.0.tgz", + "integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==", + "dev": true + }, "node_modules/universalify": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", @@ -3413,8 +3522,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, - "peer": true + "dev": true }, "array-union": { "version": "3.0.1", @@ -3819,6 +3927,12 @@ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "dev": true }, + "entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "dev": true + }, "error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", @@ -4404,6 +4518,15 @@ "dev": true, "peer": true }, + "linkify-it": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-5.0.0.tgz", + "integrity": "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==", + "dev": true, + "requires": { + "uc.micro": "^2.0.0" + } + }, "locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -4487,6 +4610,51 @@ "dev": true, "peer": true }, + "markdown-it": { + "version": "14.1.0", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.0.tgz", + "integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", + "dev": true, + "requires": { + "argparse": "^2.0.1", + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0" + } + }, + "markdownlint": { + "version": "0.34.0", + "resolved": "https://registry.npmjs.org/markdownlint/-/markdownlint-0.34.0.tgz", + "integrity": "sha512-qwGyuyKwjkEMOJ10XN6OTKNOVYvOIi35RNvDLNxTof5s8UmyGHlCdpngRHoRGNvQVGuxO3BJ7uNSgdeX166WXw==", + "dev": true, + "requires": { + "markdown-it": "14.1.0", + "markdownlint-micromark": "0.1.9" + } + }, + "markdownlint-micromark": { + "version": "0.1.9", + "resolved": "https://registry.npmjs.org/markdownlint-micromark/-/markdownlint-micromark-0.1.9.tgz", + "integrity": "sha512-5hVs/DzAFa8XqYosbEAEg6ok6MF2smDj89ztn9pKkCtdKHVdPQuGMH7frFfYL9mLkvfFe4pTyAMffLbjf3/EyA==", + "dev": true + }, + "markdownlint-rule-helpers": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/markdownlint-rule-helpers/-/markdownlint-rule-helpers-0.18.0.tgz", + "integrity": "sha512-UEdWfsoLr8ylXxfh4fzY5P6lExN+7Un7LbfqDXPlq5VLwwEDFdcZ7EMXoaEKNzncBKG/KWrt2sVt7KiCJgPyMQ==", + "dev": true + }, + "markdownlint-rule-max-one-sentence-per-line": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/markdownlint-rule-max-one-sentence-per-line/-/markdownlint-rule-max-one-sentence-per-line-0.0.2.tgz", + "integrity": "sha512-31Zn2qDT5B3kKu7oMoYVeiXroWIoWYT3Y5vGhezsCwPNJV5ecunx62gquH9TO3ekgBRX0XmozYRS+yAPQNVRWg==", + "dev": true, + "requires": { + "markdownlint-rule-helpers": "~0.18.0" + } + }, "mathml-tag-names": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/mathml-tag-names/-/mathml-tag-names-2.1.3.tgz", @@ -4501,6 +4669,12 @@ "dev": true, "peer": true }, + "mdurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-2.0.0.tgz", + "integrity": "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==", + "dev": true + }, "meow": { "version": "10.1.5", "resolved": "https://registry.npmjs.org/meow/-/meow-10.1.5.tgz", @@ -4884,6 +5058,12 @@ "dev": true, "peer": true }, + "punycode.js": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode.js/-/punycode.js-2.3.1.tgz", + "integrity": "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==", + "dev": true + }, "queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -5382,6 +5562,12 @@ "dev": true, "peer": true }, + "uc.micro": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-2.1.0.tgz", + "integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==", + "dev": true + }, "universalify": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", diff --git a/package.json b/package.json index 30bda0f78c..0e91869b52 100644 --- a/package.json +++ b/package.json @@ -22,6 +22,8 @@ "autoprefixer": "^10.4.0", "dedent": "^1.5.1", "inquirer": "^8.2.6", + "markdownlint": "^0.34.0", + "markdownlint-rule-max-one-sentence-per-line": "^0.0.2", "postcss": "^8.3.7", "postcss-cli": "^9.0.2", "stylelint-config-standard": "^34.0.0"