Skip to content

Commit 56b465b

Browse files
author
Victor Joukov
committed
Release 0.1.2
1 parent 3b85ec2 commit 56b465b

28 files changed

+2225
-163
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,3 +39,4 @@ creds.sh
3939
/*logs
4040
iam-policy.json
4141
clouseau*
42+
submit-and-wait-for-results.sh

Makefile

Lines changed: 50 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ VENV?=.env
3333
BLAST_USAGE_REPORT=false
3434

3535
# Cluster/GCP configuration
36-
ELB_CLUSTER_NAME?=elasticblast-${USER}
36+
ELB_CLUSTER_NAME?=`make -s results2clustername`
3737
ELB_NUM_NODES?=1
3838
# FIXME: should this be made the default? Allow enabling via env. var.? Something else? EB-297
3939
ELB_USE_PREEMPTIBLE?=
@@ -194,6 +194,8 @@ ELB_TEST_TIMEOUT_BLASTP_NOPAL?=960 # Based on EB-718: 20% more than 13h:20m (i.e
194194

195195
ELB_TEST_TIMEOUT_BLASTN_16S_CHICKEN_GUT_METAGENOME?=10300 # Based on EB-736: 20% more than 143h (i.e.: 8,580 mins)
196196

197+
ELB_TEST_TIMEOUT_BLASTP_NR_SMALL_DARK_MATTER?=45
198+
197199
#############################################################################
198200
# Real world, performance tests
199201

@@ -328,7 +330,7 @@ aws_regression_blastn_non_default_params: elastic-blast
328330
-ELB_CLUSTER_NAME=${ELB_CLUSTER_NAME} \
329331
ELB_RESULTS=${ELB_RESULTS} \
330332
tests/tc-bash-runner.sh tests/integration-test.sh share/etc/elb-aws-blastn-non-default-params.ini ${ELB_TEST_TIMEOUT_MANE_VS_PDBNT}
331-
test $$(zcat batch_000.out.gz | wc -l) -eq 5
333+
test $$(zcat batch_000-blastn-pdbnt.out.gz | wc -l) -eq 5
332334

333335
.PHONY: aws_regression_pdbnt_vs_mane_single_node_sync
334336
aws_regression_pdbnt_vs_mane_single_node_sync: elastic-blast
@@ -343,12 +345,17 @@ aws_regression_pdbnt_vs_mane_optimal_instance_type: elastic-blast
343345
-ELB_RESULTS_BUCKET=${ELB_RESULTS_BUCKET} \
344346
tests/tc-bash-runner.sh tests/integration-test.sh share/etc/elb-aws-spot-optimal-instance-type-blastn-pdbnt.ini ${ELB_TEST_TIMEOUT_MANE_VS_PDBNT_OPTIMAL_INSTANCE_TYPE}
345347

346-
.PHONY: aws_regression_nt_vs_hepatitis_multi_node_sync
347-
aws_regression_nt_vs_hepatitis_multi_node_sync: elastic-blast
348-
#-ELB_CLUSTER_NAME=${ELB_CLUSTER_NAME} \
349-
#ELB_RESULTS=${ELB_RESULTS} \
350-
# tests/tc-bash-runner.sh tests/integration-test-synchronous.sh share/etc/elb-aws-blastn-nt-8-nodes.ini ${ELB_TEST_TIMEOUT_HEPATITIS_VS_NT}
351-
true
348+
.PHONY: aws_regression_nt_vs_hepatitis_multi_node
349+
aws_regression_nt_vs_hepatitis_multi_node: elastic-blast
350+
-ELB_CLUSTER_NAME=${ELB_CLUSTER_NAME} \
351+
ELB_RESULTS=${ELB_RESULTS} \
352+
tests/tc-bash-runner.sh tests/integration-test.sh share/etc/elb-aws-blastn-nt-8-nodes.ini ${ELB_TEST_TIMEOUT_HEPATITIS_VS_NT}
353+
354+
.PHONY: aws_regression_blastp_nr_vs_small_dark_matter
355+
aws_regression_blastp_nr_vs_small_dark_matter: elastic-blast
356+
-ELB_CLUSTER_NAME=${ELB_CLUSTER_NAME} \
357+
ELB_RESULTS=${ELB_RESULTS} \
358+
tests/tc-bash-runner.sh tests/integration-test.sh share/etc/elb-aws-blastp-nr-small-dark-matter.ini ${ELB_TEST_TIMEOUT_BLASTP_NR_SMALL_DARK_MATTER}
352359

353360
.PHONY: aws_regression_blastp_pataa_vs_dark_matter_multi_node_sync
354361
aws_regression_blastp_pataa_vs_dark_matter_multi_node_sync: elastic-blast
@@ -362,9 +369,9 @@ aws_regression_blastn_taxid_filtering: elastic-blast
362369
ELB_CLUSTER_NAME=${ELB_CLUSTER_NAME} \
363370
ELB_RESULTS=${ELB_RESULTS} \
364371
tests/tc-bash-runner.sh tests/integration-test.sh share/etc/elb-aws-blastn-taxidfiltering.ini ${ELB_TEST_TIMEOUT_MANE_VS_PDBNT}
365-
test $$(zcat batch_000.out.gz | grep 246196 | wc -l) -gt 0
366-
test $$(zcat batch_000.out.gz | grep 3562 | wc -l) -gt 0
367-
test $$(zcat batch_000.out.gz | cut -f 13 | sort | uniq | wc -l) -eq 2
372+
test $$(zcat batch_000-blastn-pdbnt.out.gz | grep 246196 | wc -l) -gt 0
373+
test $$(zcat batch_000-blastn-pdbnt.out.gz | grep 3562 | wc -l) -gt 0
374+
test $$(zcat batch_000-blastn-pdbnt.out.gz | cut -f 13 | sort | uniq | wc -l) -eq 2
368375

369376
.PHONY: aws_regression_blastn_multi_file
370377
aws_regression_blastn_multi_file: elastic-blast
@@ -375,6 +382,14 @@ aws_regression_blastn_multi_file: elastic-blast
375382
test $$(cat batch_003.fa | awk '/>/ {print substr($$L, 1, 11);}' | grep SRR5665119 | wc -l) -gt 0
376383
test $$(cat batch_004.fa | awk '/>/ {print substr($$L, 1, 11);}' | grep RFQT0100 | wc -l) -gt 0
377384

385+
.PHONY: aws_regression_failed_db_download
386+
aws_regression_failed_db_download: elastic-blast
387+
ELB_CLUSTER_NAME=${ELB_CLUSTER_NAME} \
388+
ELB_RESULTS=${ELB_RESULTS} \
389+
tests/tc-bash-runner.sh tests/integration-test.sh share/etc/elb-aws-blastn-corrupted-db.ini ${ELB_TEST_TIMEOUT_MANE_VS_PDBNT} false
390+
./elastic-blast run-summary --cfg share/etc/elb-aws-blastn-corrupted-db.ini -l batch-logs >/dev/null
391+
test $$(aws batch describe-jobs --jobs $$(cat batch-logs | grep ^job | cut -f 2) | jq .jobs[0].attempts[].container.taskArn | wc -l) -gt 1
392+
378393
.PHONY: regression_blastn_multi_file
379394
regression_blastn_multi_file: elastic-blast
380395
ELB_CLUSTER_NAME=${ELB_CLUSTER_NAME} \
@@ -806,6 +821,19 @@ monitor:
806821
-kubectl top pods --containers
807822
kubectl top nodes
808823

824+
# AWS Batch job management
825+
job_queue?=$(shell aws cloudformation describe-stacks --stack-name ${ELB_CLUSTER_NAME} --region ${ELB_AWS_REGION} --query "Stacks[0].Outputs[?OutputKey=='JobQueueName'].OutputValue" --output text)
826+
comp_env_name=$(shell aws cloudformation describe-stacks --stack-name ${ELB_CLUSTER_NAME} --region ${ELB_AWS_REGION} --query "Stacks[0].Outputs[?OutputKey=='ComputeEnvironmentName'].OutputValue" --output text)
827+
job_def=$(shell aws cloudformation describe-stacks --stack-name ${ELB_CLUSTER_NAME} --region ${ELB_AWS_REGION} --query "Stacks[0].Outputs[?OutputKey=='JobDefinitionName'].OutputValue" --output text)
828+
829+
.PHONY: aws-monitor
830+
aws-monitor: AWS_PAGER=''
831+
aws-monitor:
832+
for s in SUBMITTED PENDING RUNNABLE STARTING RUNNING SUCCEEDED FAILED; do \
833+
echo "Checking $$s jobs"; \
834+
aws batch list-jobs --region ${ELB_AWS_REGION} --job-queue ${job_queue} --job-status $$s; \
835+
done
836+
809837
progress:
810838
for status in Pending Running Succeeded Failed; do \
811839
echo -n "$$status "; \
@@ -835,6 +863,13 @@ list_resources: init
835863
-gcloud compute disks list
836864
-gcloud compute instances list
837865

866+
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#finding-an-interrupted-Spot-Instance
867+
aws_list_interrupted_spot_instances: creds.sh
868+
aws ec2 describe-instances \
869+
--filters Name=instance-lifecycle,Values=spot Name=instance-state-name,Values=terminated,stopped Name=tag:billingcode,Values=elastic-blast Name=tag:Owner,Values=${USER} \
870+
--query "Reservations[*].Instances[*].InstanceId"
871+
872+
838873
aws_list_resources: export AWS_PAGER=
839874
aws_list_resources: creds.sh
840875
-aws cloudformation describe-stacks --stack-name ${ELB_CLUSTER_NAME}
@@ -843,8 +878,9 @@ aws_list_resources: creds.sh
843878
-aws batch describe-job-definitions --status ACTIVE --output json
844879
-aws batch describe-compute-environments --output json
845880

846-
aws_monitor: creds.sh
847-
-source creds.sh && aws batch describe-jobs --jobs `aws s3 cp ${ELB_RESULTS}/metadata/job-ids - | jq -r .[] | tr '\n' ' ' `
881+
# 100 is the limit on number of arguments to --jobs
882+
aws_get_100_job_ids: creds.sh
883+
-source creds.sh && aws batch describe-jobs --jobs `aws s3 cp ${ELB_RESULTS}/metadata/job-ids.json - | jq -r .[] | head -100 | tr '\n' ' '`
848884

849885
###############################################################################
850886
# AWS ElasticBLAST suport
@@ -903,4 +939,4 @@ scrub-code: clouseau_venv
903939

904940
.PHONY: results2clustername
905941
results2clustername:
906-
./share/tools/results2clustername.sh ${ELB_RESULTS}
942+
@./share/tools/results2clustername.sh ${ELB_RESULTS}

README.md

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,19 @@
11
# ElasticBLAST
22

3-
The National Center for Biotechnology (NCBI), part of the National Library of
4-
Medicine, is making the source code for ElasticBLAST available on GitHub as an
3+
ElasticBLAST is a cloud-based tool to perform your BLAST searches faster and make you more effective.
4+
5+
ElasticBLAST is ideal for users who have a large number (thousands or more) of queries to BLAST or who prefer to use cloud infrastructure for their searches. It can run BLAST searches that cannot be done on [NCBI WebBLAST](https://blast.ncbi.nlm.nih.gov) and runs them more quickly than stand-alone [BLAST+](https://www.ncbi.nlm.nih.gov/books/NBK279690/).
6+
7+
ElasticBLAST speeds up your work by distributing your BLAST+ searches across multiple cloud instances. The ability to scale resources in this way allows larger numbers of queries to be searched in a shorter time than you could with BLAST+ on a single host.
8+
9+
The National Center for Biotechnology Information ([NCBI](https://www.ncbi.nlm.nih.gov)), part of the National Library of
10+
Medicine at the NIH, developed and maintains ElasticBLAST.
11+
12+
The NCBI is making the source code for ElasticBLAST available on GitHub as an
513
Open Distribution to allow the user community to easily obtain and examine
614
that code. GitHub also provides a means for users to report issues and
715
suggest modifications through pull requests.
816

9-
1017
The NCBI will use internal source code control as the repository of record and
1118
push regular releases of the ElasticBLAST
1219
source code to GitHub. The BLAST developers will work to ensure that

share/etc/elb-aws-blastn-nt-8-nodes.ini

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,13 @@ aws-region = us-east-1
66
[cluster]
77
machine-type = m5.8xlarge
88
num-nodes = 8
9-
num-cpus = 30
10-
disk-type = gp2
11-
# minimal size to trigger faster IO speed for gp2
12-
pd-size = 334G
13-
use-preemptible = true
9+
num-cpus = 16
10+
# Try more modern and larger disk size to prevent running out of burst credits
11+
disk-type = gp3
12+
pd-size = 1000G
1413

1514
[blast]
1615
program = blastn
1716
db = nt
18-
mem-request = 64G
19-
mem-limit = 110G
17+
mem-limit = 60G
2018
queries = s3://elasticblast-test/queries/hepatitis.fsa.gz
21-
batch-len = 2000000

src/elb/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def __new__(cls, value):
9898
str_value = str(value)
9999
number_re = re.compile(r'^\d+[kKmMgGtT]$|^\d+.\d+[kKmMgGtT]$')
100100
if not number_re.match(str_value):
101-
raise ValueError('Memory request or limit must be specifed by a number followed by a unit, for example 100m')
101+
raise ValueError('Memory request or limit must be specified by a number followed by a unit, for example 100m')
102102
if float(str_value[:-1]) <= 0:
103103
raise ValueError('Memory request or limit must be larger than zero')
104104
return super(cls, cls).__new__(cls, str_value)

src/elb/commands/run_summary.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,11 @@
4242
from elb.constants import CLUSTER_ERROR, ELB_AWS_JOB_IDS, ELB_AWS_QUERY_LENGTH, ELB_METADATA_DIR, PERMISSIONS_ERROR
4343
from elb.constants import ELB_LOG_DIR, CSP, ElbCommand
4444

45+
# Artificial exit codes to differentiate failure modes
46+
# of AWS job.
4547
JOB_EXIT_CODE_UNINITIALIZED = -1
4648
JOB_EXIT_CODE_FAILED_WITH_NO_ATTEMPT = 100000
49+
JOB_EXIT_CODE_FAILED_WITH_NO_EXIT_CODE = 100001
4750

4851

4952
@dataclass
@@ -458,11 +461,14 @@ def _read_job_logs_aws(cfg, write_logs):
458461
if len(attempts) > 0:
459462
attempt = attempts[-1]
460463
container = attempt['container']
461-
job_exit_code = container['exitCode']
462-
created = job['createdAt'] / 1000
463-
started = job['startedAt'] / 1000
464-
stopped = job['stoppedAt'] / 1000
465-
parameters = job['parameters']
464+
if 'exitCode' in container:
465+
job_exit_code = container['exitCode']
466+
created = job['createdAt'] / 1000
467+
started = job['startedAt'] / 1000
468+
stopped = job['stoppedAt'] / 1000
469+
parameters = job['parameters']
470+
else:
471+
job_exit_code = JOB_EXIT_CODE_FAILED_WITH_NO_EXIT_CODE
466472
else:
467473
# Signal that job failed without attempts
468474
job_exit_code = JOB_EXIT_CODE_FAILED_WITH_NO_ATTEMPT

src/elb/config.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
import getpass
3636
from hashlib import md5
3737
from .util import check_positive_int, get_query_batch_size
38-
from .util import get_blastdb_mem_requirements, ElbSupportedPrograms
38+
from .util import ElbSupportedPrograms
3939
from .util import validate_gcp_string, validate_gke_cluster_name
4040
from .util import validate_aws_region
4141
from .constants import APP_STATE, CFG_BLAST, CFG_BLAST_BATCH_LEN, CFG_BLAST_DB, CFG_BLAST_DB_MEM_MARGIN, CFG_BLAST_DB_SRC, CFG_BLAST_MEM_LIMIT, CFG_BLAST_MEM_REQUEST, CFG_BLAST_OPTIONS, CFG_BLAST_PROGRAM, CFG_BLAST_QUERY, CFG_BLAST_RESULTS, CFG_CLOUD_PROVIDER, CFG_CLUSTER, CFG_CLUSTER_BID_PERCENTAGE, CFG_CLUSTER_DISK_TYPE, CFG_CLUSTER_DRY_RUN, CFG_CLUSTER_EXP_USE_LOCAL_SSD, CFG_CLUSTER_MACHINE_TYPE, CFG_CLUSTER_MAX_NODES, CFG_CLUSTER_MIN_NODES, CFG_CLUSTER_NAME, CFG_CLUSTER_NUM_CPUS, CFG_CLUSTER_NUM_NODES, CFG_CLUSTER_PD_SIZE, CFG_CLUSTER_PROVISIONED_IOPS, CFG_CLUSTER_RUN_LABEL, CFG_CLUSTER_USE_PREEMPTIBLE, CFG_CP_AWS_REGION, CFG_CP_GCP_NETWORK, CFG_CP_GCP_PROJECT, CFG_CP_GCP_REGION, CFG_CP_GCP_SUBNETWORK, CFG_CP_GCP_ZONE, CFG_CP_NAME, CFG_TIMEOUTS, CFG_TIMEOUT_BLAST_K8S_JOB, CFG_TIMEOUT_INIT_PV
@@ -49,9 +49,6 @@
4949
from .util import UserReportError
5050
from .filehelper import parse_bucket_name_key
5151
from typing import List
52-
from .gcp_traits import get_machine_properties as gcp_get_machine_properties
53-
from .aws_traits import get_machine_properties as aws_get_machine_properties
54-
from .aws_traits import create_aws_config
5552

5653

5754
def _set_sections(cfg: configparser.ConfigParser) -> None:

src/elb/constants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ class ElbCommand(Enum):
119119
MOL_TYPE_NUCL = 'nucl'
120120

121121
ELB_DFLT_AWS_REGION = 'us-east-1'
122-
ELB_DOCKER_IMAGE = 'ncbi/elb:0.0.20'
122+
ELB_DOCKER_IMAGE = 'ncbi/elb:0.0.24'
123123
ELB_DFLT_AWS_DISK_TYPE = 'gp2'
124124
# minimal size of gp2 disk which triggers fastest speed
125125
ELB_DFLT_AWS_PD_SIZE = '334G'

src/elb/templates/elastic-blast-cf.yaml

Lines changed: 89 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -511,13 +511,44 @@ Resources:
511511
ComputeEnvironmentOrder:
512512
- ComputeEnvironment: !Ref ComputeEnvironment
513513
Order: 1
514+
Tags:
515+
Project: BLAST
516+
billingcode: elastic-blast
517+
Owner: !Ref Owner
518+
Name: !Join [-, [elasticblast, !Ref Owner, !Ref RandomToken]]
514519

515520
Ec2LaunchTemplate:
516521
Type: AWS::EC2::LaunchTemplate
517522
Properties:
518523
LaunchTemplateName: !Join [-, [elasticblast, !Ref Owner, !Ref RandomToken]]
519524
LaunchTemplateData:
520525
EbsOptimized: true
526+
UserData:
527+
Fn::Base64: !Sub |
528+
MIME-Version: 1.0
529+
Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="
530+
531+
--==MYBOUNDARY==
532+
Content-Type: text/x-shellscript; charset="us-ascii"
533+
534+
#!/bin/bash -xe
535+
536+
yum install -y unzip
537+
curl -s "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
538+
unzip awscliv2.zip
539+
./aws/install
540+
rm -f awscliv2.zip
541+
542+
# Set tags on attached volume
543+
AWS_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
544+
ROOT_VOLUME_IDS=$(aws ec2 describe-instances --region ${AWS::Region} --instance-id $AWS_INSTANCE_ID --output text --query Reservations[0].Instances[0].BlockDeviceMappings[0].Ebs.VolumeId)
545+
aws ec2 create-tags --resources $ROOT_VOLUME_IDS --region ${AWS::Region} --tags Key=Name,Value=${AWS::StackName}
546+
aws ec2 create-tags --resources $ROOT_VOLUME_IDS --region ${AWS::Region} --tags Key=Owner,Value=${Owner}
547+
aws ec2 create-tags --resources $ROOT_VOLUME_IDS --region ${AWS::Region} --tags Key=billingcode,Value=elastic-blast
548+
aws ec2 create-tags --resources $ROOT_VOLUME_IDS --region ${AWS::Region} --tags Key=Project,Value=BLAST
549+
aws ec2 create-tags --resources $ROOT_VOLUME_IDS --region ${AWS::Region} --tags Key=Created-For-Instance-ID,Value=$AWS_INSTANCE_ID
550+
551+
--==MYBOUNDARY==
521552
BlockDeviceMappings:
522553
- DeviceName: /dev/xvda
523554
Ebs:
@@ -529,6 +560,15 @@ Resources:
529560
- ProvisionedIopsRequested
530561
- !Ref ProvisionedIops
531562
- !Ref AWS::NoValue
563+
TagSpecifications:
564+
- ResourceType: launch-template
565+
Tags:
566+
- Key: Project
567+
Value: BLAST
568+
- Key: Owner
569+
Value: !Ref Owner
570+
- Key: billingcode
571+
Value: elastic-blast
532572

533573
BlastSearchJobDefinition:
534574
Type: AWS::Batch::JobDefinition
@@ -582,9 +622,21 @@ Resources:
582622
# retry on infrastructure problems (including terminated spot instance)
583623
- OnStatusReason: Host EC2*
584624
Action: RETRY
585-
# no retries for other reasons (like a non-zero exit code)
625+
# retry on database-related errors
626+
- OnExitCode: 2
627+
Action: RETRY
628+
# These may occur if burst credits on EBS are exhausted
629+
- OnReason: DockerTimeoutError*
630+
Action: RETRY
631+
- OnReason: CannotInspectContainer*
632+
Action: RETRY
586633
- OnReason: "*"
587634
Action: EXIT
635+
Tags:
636+
Project: BLAST
637+
billingcode: elastic-blast
638+
Owner: !Ref Owner
639+
Name: !Join [-, [elasticblast, !Ref Owner, !Ref RandomToken]]
588640

589641

590642
# AWS advises to use region-specific IAM resource names. Otherwise using the
@@ -614,6 +666,15 @@ Resources:
614666
Action: sts:AssumeRole
615667
ManagedPolicyArns:
616668
- arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role
669+
Tags:
670+
- Key: Name
671+
Value: !Join [-, [elasticblast, !Ref Owner, !Ref RandomToken]]
672+
- Key: Project
673+
Value: BLAST
674+
- Key: Owner
675+
Value: !Ref Owner
676+
- Key: billingcode
677+
Value: elastic-blast
617678

618679
CreatedBatchServiceRole:
619680
Type: AWS::IAM::Role
@@ -631,6 +692,15 @@ Resources:
631692
Action: sts:AssumeRole
632693
ManagedPolicyArns:
633694
- arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole
695+
Tags:
696+
- Key: Name
697+
Value: !Join [-, [elasticblast, !Ref Owner, !Ref RandomToken]]
698+
- Key: Project
699+
Value: BLAST
700+
- Key: Owner
701+
Value: !Ref Owner
702+
- Key: billingcode
703+
Value: elastic-blast
634704

635705
CreatedBatchJobRole:
636706
Type: AWS::IAM::Role
@@ -647,6 +717,15 @@ Resources:
647717
Action: sts:AssumeRole
648718
ManagedPolicyArns:
649719
- arn:aws:iam::aws:policy/AmazonS3FullAccess
720+
Tags:
721+
- Key: Name
722+
Value: !Join [-, [elasticblast, !Ref Owner, !Ref RandomToken]]
723+
- Key: Project
724+
Value: BLAST
725+
- Key: Owner
726+
Value: !Ref Owner
727+
- Key: billingcode
728+
Value: elastic-blast
650729

651730
CreatedSpotFleetRole:
652731
Type: AWS::IAM::Role
@@ -664,6 +743,15 @@ Resources:
664743
Action: sts:AssumeRole
665744
ManagedPolicyArns:
666745
- arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetTaggingRole
746+
Tags:
747+
- Key: Name
748+
Value: !Join [-, [elasticblast, !Ref Owner, !Ref RandomToken]]
749+
- Key: Project
750+
Value: BLAST
751+
- Key: Owner
752+
Value: !Ref Owner
753+
- Key: billingcode
754+
Value: elastic-blast
667755

668756
Outputs:
669757
JobQueueName:

0 commit comments

Comments
 (0)