Skip to content

Commit 9906b2d

Browse files
committed
AWS adding tags (confluent specific only) #6615
1 parent 026afac commit 9906b2d

26 files changed

+231
-25
lines changed

ccloud/connect-centralized-license/redshift.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ set -e
5757

5858
log "Create AWS Redshift cluster"
5959
# https://docs.aws.amazon.com/redshift/latest/mgmt/getting-started-cli.html
60-
aws redshift create-cluster --cluster-identifier $CLUSTER_NAME --master-username masteruser --master-user-password myPassword1 --node-type dc2.large --cluster-type single-node --publicly-accessible
60+
aws redshift create-cluster --cluster-identifier $CLUSTER_NAME --master-username masteruser --master-user-password myPassword1 --node-type dc2.large --cluster-type single-node --publicly-accessible --tags "cflt_managed_by=user,cflt_managed_id=$USER"
6161

6262
# Verify AWS Redshift cluster has started within MAX_WAIT seconds
6363
MAX_WAIT=480

ccloud/fm-aws-dynamodb-cdc-source/fully-managed-dynamodb-cdc-source.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@ aws dynamodb create-table \
3030
--attribute-definitions AttributeName=first_name,AttributeType=S AttributeName=last_name,AttributeType=S \
3131
--key-schema AttributeName=first_name,KeyType=HASH AttributeName=last_name,KeyType=RANGE \
3232
--provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 \
33-
--endpoint-url https://dynamodb.$AWS_REGION.amazonaws.com
33+
--endpoint-url https://dynamodb.$AWS_REGION.amazonaws.com \
34+
--tags "cflt_managed_by=user,cflt_managed_id=$USER"
3435

3536
log "Waiting for table to be created"
3637
while true

ccloud/fm-aws-kinesis-source/fully-managed-kinesis-source.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ set -e
2525
sleep 5
2626

2727
log "Create a Kinesis stream $KINESIS_STREAM_NAME"
28-
aws kinesis create-stream --stream-name $KINESIS_STREAM_NAME --shard-count 1 --region $AWS_REGION
28+
aws kinesis create-stream --stream-name $KINESIS_STREAM_NAME --shard-count 1 --region $AWS_REGION --tags "cflt_managed_by=user,cflt_managed_id=$USER"
2929

3030
function cleanup_cloud_resources {
3131
set +e

ccloud/fm-aws-redshift-sink/fully-managed-redshift-sink.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ set -e
5252

5353
log "Create AWS Redshift cluster"
5454
# https://docs.aws.amazon.com/redshift/latest/mgmt/getting-started-cli.html
55-
aws redshift create-cluster --cluster-identifier $CLUSTER_NAME --master-username masteruser --master-user-password "$PASSWORD" --node-type dc2.large --cluster-type single-node --publicly-accessible
55+
aws redshift create-cluster --cluster-identifier $CLUSTER_NAME --master-username masteruser --master-user-password "$PASSWORD" --node-type dc2.large --cluster-type single-node --publicly-accessible --tags "cflt_managed_by=user,cflt_managed_id=$USER"
5656

5757
function cleanup_cloud_resources {
5858
set +e

ccloud/fm-aws-sqs-source/fully-managed-sqs-source.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ set -e
1717
QUEUE_NAME=pgfm${USER}sqs${TAG}
1818
QUEUE_NAME=${QUEUE_NAME//[-._]/}
1919

20-
QUEUE_URL_RAW=$(aws sqs create-queue --queue-name $QUEUE_NAME --region ${AWS_REGION} | jq .QueueUrl)
20+
QUEUE_URL_RAW=$(aws sqs create-queue --queue-name $QUEUE_NAME --region ${AWS_REGION} --tags "cflt_managed_by=user,cflt_managed_id=$USER" | jq .QueueUrl)
2121
AWS_ACCOUNT_NUMBER=$(echo "$QUEUE_URL_RAW" | cut -d "/" -f 4)
2222
# https://docs.amazonaws.cn/sdk-for-net/v3/developer-guide/how-to/sqs/QueueURL.html
2323
# https://{REGION_ENDPOINT}/queue.|api-domain|/{YOUR_ACCOUNT_NUMBER}/{YOUR_QUEUE_NAME}
@@ -35,7 +35,7 @@ fi
3535
set -e
3636

3737
log "Create a FIFO queue $QUEUE_NAME in region ${AWS_REGION}"
38-
aws sqs create-queue --queue-name $QUEUE_NAME --region ${AWS_REGION}
38+
aws sqs create-queue --queue-name $QUEUE_NAME --region ${AWS_REGION} --tags "cflt_managed_by=user,cflt_managed_id=$USER"
3939

4040
function cleanup_cloud_resources {
4141
set +e
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
# Fully Managed Azure Cosmos DB V2 Source connector
2+
3+
4+
5+
## Objective
6+
7+
Quickly test [Fully Managed Azure Cosmos DB V2 Source](https://docs.confluent.io/cloud/current/connectors/cc-azure-cosmos-source-v2.html) connector.
8+
9+
10+
## Prerequisites
11+
12+
See [here](https://kafka-docker-playground.io/#/how-to-use?id=%f0%9f%8c%a4%ef%b8%8f-confluent-cloud-examples)
13+
14+
15+
## How to run
16+
17+
Simply run:
18+
19+
```
20+
$ just use <playground run> command
21+
```
22+
23+
Note if you have multiple [Azure subscriptions](https://github.com/MicrosoftDocs/azure-docs-cli/blob/main/docs-ref-conceptual/manage-azure-subscriptions-azure-cli.md#change-the-active-subscription) make sure to set `AZURE_SUBSCRIPTION_NAME` environment variable to create Azure resource group in correct subscription (for confluent support, subscription is `COPS`).
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
FROM python:3
2+
3+
ADD insert-data.py /
4+
5+
RUN pip install azure-cosmos
6+
7+
CMD sleep infinity
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
from azure.cosmos import CosmosClient
2+
3+
import os
4+
5+
url = os.environ['AZURE_COSMOSDB_DB_ENDPOINT_URI']
6+
key = os.environ['AZURE_COSMOSDB_PRIMARY_CONNECTION_KEY']
7+
database_name = os.environ['AZURE_COSMOSDB_DB_NAME']
8+
container_name = os.environ['AZURE_COSMOSDB_CONTAINER_NAME']
9+
client = CosmosClient(url, credential=key)
10+
database = client.get_database_client(database_name)
11+
container = database.get_container_client(container_name)
12+
13+
for i in range(1, 100):
14+
container.upsert_item({
15+
'id': 'item{0}'.format(i),
16+
'productName': 'Widget',
17+
'productModel': 'Model {0}'.format(i)
18+
}
19+
)
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
---
2+
services:
3+
# https://pypi.org/project/azure-cosmos/
4+
azure-cosmos-client:
5+
build:
6+
context: ../../ccloud/fm-azure-cosmosdb-source/azure-cosmos-client
7+
hostname: azure-cosmos-client
8+
container_name: azure-cosmos-client
Lines changed: 129 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
#!/bin/bash
2+
set -e
3+
4+
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
5+
source ${DIR}/../../scripts/utils.sh
6+
7+
login_and_maybe_set_azure_subscription
8+
9+
10+
bootstrap_ccloud_environment
11+
12+
set +e
13+
playground topic delete --topic apparels
14+
sleep 3
15+
playground topic create --topic apparels --nb-partitions 1
16+
set -e
17+
18+
# https://github.com/microsoft/kafka-connect-cosmosdb/blob/dev/doc/CosmosDB_Setup.md
19+
AZURE_NAME=pgfm${USER}ck${GITHUB_RUN_NUMBER}${TAG}
20+
AZURE_NAME=${AZURE_NAME//[-._]/}
21+
if [ ${#AZURE_NAME} -gt 24 ]; then
22+
AZURE_NAME=${AZURE_NAME:0:24}
23+
fi
24+
AZURE_REGION=westeurope
25+
AZURE_RESOURCE_GROUP=$AZURE_NAME
26+
AZURE_COSMOSDB_SERVER_NAME=$AZURE_NAME
27+
AZURE_COSMOSDB_DB_NAME=$AZURE_NAME
28+
AZURE_COSMOSDB_CONTAINER_NAME=$AZURE_NAME
29+
30+
set +e
31+
log "Delete Cosmos DB instance and resource group (it might fail)"
32+
az cosmosdb delete -g $AZURE_RESOURCE_GROUP -n $AZURE_COSMOSDB_SERVER_NAME --yes
33+
az group delete --name $AZURE_RESOURCE_GROUP --yes
34+
set -e
35+
36+
log "Creating Azure Resource Group $AZURE_RESOURCE_GROUP"
37+
az group create \
38+
--name $AZURE_RESOURCE_GROUP \
39+
--location $AZURE_REGION \
40+
--tags owner_email=$AZ_USER
41+
42+
sleep 10
43+
44+
log "Creating Cosmos DB server $AZURE_COSMOSDB_SERVER_NAME"
45+
az cosmosdb create \
46+
--name $AZURE_COSMOSDB_SERVER_NAME \
47+
--resource-group $AZURE_RESOURCE_GROUP \
48+
--locations regionName=$AZURE_REGION
49+
50+
function cleanup_cloud_resources {
51+
set +e
52+
log "Delete Cosmos DB instance"
53+
check_if_continue
54+
az cosmosdb delete -g $AZURE_RESOURCE_GROUP -n $AZURE_COSMOSDB_SERVER_NAME --yes
55+
56+
log "Deleting resource group $AZURE_RESOURCE_GROUP"
57+
check_if_continue
58+
az group delete --name $AZURE_RESOURCE_GROUP --yes --no-wait
59+
}
60+
trap cleanup_cloud_resources EXIT
61+
62+
log "Create the database"
63+
az cosmosdb sql database create \
64+
--name $AZURE_COSMOSDB_DB_NAME \
65+
--resource-group $AZURE_RESOURCE_GROUP \
66+
--account-name $AZURE_COSMOSDB_SERVER_NAME \
67+
--throughput 400
68+
69+
log "Create the container"
70+
az cosmosdb sql container create \
71+
--name $AZURE_COSMOSDB_CONTAINER_NAME \
72+
--resource-group $AZURE_RESOURCE_GROUP \
73+
--account-name $AZURE_COSMOSDB_SERVER_NAME \
74+
--database-name $AZURE_COSMOSDB_DB_NAME \
75+
--partition-key-path "/id"
76+
77+
# With the Azure Cosmos DB instance setup, you will need to get the Cosmos DB endpoint URI and primary connection key. These values will be used to setup the Cosmos DB Source and Sink connectors.
78+
AZURE_COSMOSDB_DB_ENDPOINT_URI="https://${AZURE_COSMOSDB_DB_NAME}.documents.azure.com:443/"
79+
log "Cosmos DB endpoint URI is $AZURE_COSMOSDB_DB_ENDPOINT_URI"
80+
81+
# get Cosmos DB primary connection key
82+
AZURE_COSMOSDB_PRIMARY_CONNECTION_KEY=$(az cosmosdb keys list -n $AZURE_COSMOSDB_DB_NAME -g $AZURE_RESOURCE_GROUP --query primaryMasterKey -o tsv)
83+
84+
docker compose build
85+
docker compose down -v --remove-orphans
86+
docker compose up -d --quiet-pull
87+
88+
connector_name="CosmosDbSourceV2_$USER"
89+
set +e
90+
playground connector delete --connector $connector_name > /dev/null 2>&1
91+
set -e
92+
93+
log "Creating fully managed connector"
94+
playground connector create-or-update --connector $connector_name << EOF
95+
{
96+
"connector.class": "CosmosDbSourceV2",
97+
"name": "$connector_name",
98+
"kafka.auth.mode": "KAFKA_API_KEY",
99+
"kafka.api.key": "$CLOUD_KEY",
100+
"kafka.api.secret": "$CLOUD_SECRET",
101+
"input.data.format": "JSON",
102+
"azure.cosmos.account.endpoint": "$AZURE_COSMOSDB_DB_ENDPOINT_URI",
103+
"azure.cosmos.account.key": "$AZURE_COSMOSDB_PRIMARY_CONNECTION_KEY",
104+
"azure.cosmos.source.database.name": "$AZURE_COSMOSDB_DB_NAME",
105+
"azure.cosmos.source.containers.includeAll": "true",
106+
"azure.cosmos.source.containers.topicMap": "apparels#${AZURE_COSMOSDB_DB_NAME}",
107+
108+
"output.data.format": "AVRO",
109+
"topics": "hotels",
110+
"tasks.max" : "1"
111+
}
112+
EOF
113+
wait_for_ccloud_connector_up $connector_name 180
114+
115+
log "Send messages to Azure Cosmos DB"
116+
docker exec -e AZURE_COSMOSDB_DB_ENDPOINT_URI="$AZURE_COSMOSDB_DB_ENDPOINT_URI" -e AZURE_COSMOSDB_PRIMARY_CONNECTION_KEY="$AZURE_COSMOSDB_PRIMARY_CONNECTION_KEY" -e AZURE_COSMOSDB_DB_NAME="$AZURE_COSMOSDB_DB_NAME" -e AZURE_COSMOSDB_CONTAINER_NAME="$AZURE_COSMOSDB_CONTAINER_NAME" azure-cosmos-client bash -c "python /insert-data.py"
117+
118+
sleep 30
119+
120+
log "Send again messages to Azure Cosmos DB"
121+
docker exec -e AZURE_COSMOSDB_DB_ENDPOINT_URI="$AZURE_COSMOSDB_DB_ENDPOINT_URI" -e AZURE_COSMOSDB_PRIMARY_CONNECTION_KEY="$AZURE_COSMOSDB_PRIMARY_CONNECTION_KEY" -e AZURE_COSMOSDB_DB_NAME="$AZURE_COSMOSDB_DB_NAME" -e AZURE_COSMOSDB_CONTAINER_NAME="$AZURE_COSMOSDB_CONTAINER_NAME" azure-cosmos-client bash -c "python /insert-data.py"
122+
123+
log "Verifying topic apparels"
124+
playground topic consume --topic apparels --min-expected-messages 2 --timeout 60
125+
126+
log "Do you want to delete the fully managed connector $connector_name ?"
127+
check_if_continue
128+
129+
playground connector delete --connector $connector_name

0 commit comments

Comments
 (0)