Coverage for cli / commands / stacks_cmd.py: 72%
498 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-30 21:47 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-30 21:47 +0000
1"""Stack deployment and management commands."""
3import sys
4from typing import Any
6import click
8from ..config import GCOConfig
9from ..output import get_output_formatter
11pass_config = click.make_pass_decorator(GCOConfig, ensure=True)
14@click.group()
15@pass_config
16def stacks(config: Any) -> None:
17 """Deploy and manage GCO CDK stacks."""
18 pass
21@stacks.command("list")
22@click.option("--refresh", is_flag=True, help="Force refresh from AWS")
23@pass_config
24def list_stacks(config: Any, refresh: Any) -> None:
25 """List all GCO stacks (local CDK and deployed)."""
26 from ..stacks import get_stack_manager
28 formatter = get_output_formatter(config)
30 try:
31 manager = get_stack_manager(config)
32 local_stacks = manager.list_stacks()
34 formatter.print_info("Available CDK stacks:")
35 for stack in local_stacks:
36 print(f" - {stack}")
38 except Exception as e:
39 formatter.print_error(f"Failed to list stacks: {e}")
40 sys.exit(1)
43@stacks.command("synth")
44@click.argument("stack_name", required=False)
45@click.option("--quiet", "-q", is_flag=True, default=True, help="Quiet output")
46@pass_config
47def synth_stack(config: Any, stack_name: Any, quiet: Any) -> None:
48 """Synthesize CloudFormation templates."""
49 from ..stacks import get_stack_manager
51 formatter = get_output_formatter(config)
53 try:
54 manager = get_stack_manager(config)
55 output = manager.synth(stack_name, quiet=quiet)
56 if output: 56 ↛ 58line 56 didn't jump to line 58 because the condition on line 56 was always true
57 print(output)
58 formatter.print_success("CDK synthesis completed")
59 except Exception as e:
60 formatter.print_error(f"CDK synth failed: {e}")
61 sys.exit(1)
64@stacks.command("diff")
65@click.argument("stack_name", required=False)
66@pass_config
67def diff_stack(config: Any, stack_name: Any) -> None:
68 """Show differences between deployed and local stacks."""
69 from ..stacks import get_stack_manager
71 formatter = get_output_formatter(config)
73 try:
74 manager = get_stack_manager(config)
75 diff_output = manager.diff(stack_name)
76 if diff_output:
77 print(diff_output)
78 else:
79 formatter.print_success("No differences found")
80 except Exception as e:
81 formatter.print_error(f"CDK diff failed: {e}")
82 sys.exit(1)
85@stacks.command("deploy")
86@click.argument("stack_name")
87@click.option("--yes", "-y", is_flag=True, help="Skip approval prompts")
88@click.option("--outputs-file", "-o", help="Write outputs to file")
89@click.option("--tag", "-t", multiple=True, help="Add tags (key=value)")
90@pass_config
91def deploy_stack(config: Any, stack_name: Any, yes: Any, outputs_file: Any, tag: Any) -> None:
92 """Deploy a single CDK stack to AWS.
94 For deploying all stacks in the correct order, use 'deploy-all'.
96 Examples:
97 gco stacks deploy gco-us-east-1
98 gco stacks deploy gco-global -y
99 gco stacks deploy gco-us-east-1 -t Environment=prod
100 """
101 from ..stacks import get_stack_manager
103 formatter = get_output_formatter(config)
105 # Parse tags
106 tags = {}
107 for t in tag: 107 ↛ 108line 107 didn't jump to line 108 because the loop on line 107 never started
108 if "=" in t:
109 k, v = t.split("=", 1)
110 tags[k] = v
112 try:
113 manager = get_stack_manager(config)
115 formatter.print_info(f"Deploying {stack_name}...")
117 success = manager.deploy(
118 stack_name=stack_name,
119 require_approval=not yes,
120 outputs_file=outputs_file,
121 tags=tags if tags else None,
122 )
124 if success:
125 formatter.print_success("Deployment completed successfully")
126 else:
127 formatter.print_error("Deployment failed")
128 sys.exit(1)
130 except Exception as e:
131 formatter.print_error(f"Deployment failed: {e}")
132 sys.exit(1)
135@stacks.command("destroy")
136@click.argument("stack_name")
137@click.option("--yes", "-y", is_flag=True, help="Skip confirmation")
138@pass_config
139def destroy_stack(config: Any, stack_name: Any, yes: Any) -> None:
140 """Destroy a single CDK stack.
142 For destroying all stacks in the correct order, use 'destroy-all'.
144 Examples:
145 gco stacks destroy gco-us-east-1
146 gco stacks destroy gco-us-east-1 -y
147 """
148 from ..stacks import get_stack_manager
150 formatter = get_output_formatter(config)
152 if not yes: 152 ↛ 153line 152 didn't jump to line 153 because the condition on line 152 was never true
153 click.confirm(f"Are you sure you want to destroy {stack_name}?", abort=True)
155 try:
156 manager = get_stack_manager(config)
158 formatter.print_info(f"Destroying {stack_name}...")
160 success = manager.destroy(
161 stack_name=stack_name,
162 force=yes,
163 )
165 if success:
166 formatter.print_success(f"Stack {stack_name} destroyed successfully")
167 else:
168 formatter.print_error("Destroy failed")
169 sys.exit(1)
171 except Exception as e:
172 formatter.print_error(f"Destroy failed: {e}")
173 sys.exit(1)
176@stacks.command("deploy-all")
177@click.option("--yes", "-y", is_flag=True, help="Skip approval prompts")
178@click.option("--outputs-file", "-o", help="Write outputs to file")
179@click.option("--tag", "-t", multiple=True, help="Add tags (key=value)")
180@click.option("--parallel", "-p", is_flag=True, help="Deploy regional stacks in parallel")
181@click.option("--max-workers", "-w", default=4, help="Max parallel deployments (default: 4)")
182@pass_config
183def deploy_all_orchestrated(
184 config: Any, yes: Any, outputs_file: Any, tag: Any, parallel: Any, max_workers: Any
185) -> None:
186 """Deploy all stacks in the correct order.
188 Deploys in three phases:
189 1. Global stacks (gco-global, gco-api-gateway)
190 2. Regional stacks (gco-us-east-1, etc.) - can be parallelized
191 3. Monitoring stack (gco-monitoring) - depends on regional stacks
193 Use --parallel to deploy regional stacks concurrently, which can
194 significantly reduce total deployment time when deploying to
195 multiple regions.
197 Examples:
198 gco stacks deploy-all -y
199 gco stacks deploy-all -y --parallel
200 gco stacks deploy-all -y -p --max-workers 8
201 gco stacks deploy-all -y -t Environment=prod
202 """
203 from ..stacks import get_stack_manager
205 formatter = get_output_formatter(config)
207 # Parse tags
208 tags = {}
209 for t in tag:
210 if "=" in t: 210 ↛ 209line 210 didn't jump to line 209 because the condition on line 210 was always true
211 k, v = t.split("=", 1)
212 tags[k] = v
214 try:
215 manager = get_stack_manager(config)
216 stacks = manager.list_stacks()
218 formatter.print_info(f"Found {len(stacks)} stacks to deploy")
219 if parallel:
220 formatter.print_info(f"Parallel mode enabled (max workers: {max_workers})")
222 def on_start(stack_name: str) -> None:
223 formatter.print_info(f"Deploying {stack_name}...")
225 def on_complete(stack_name: str, success: bool) -> None:
226 if success:
227 formatter.print_success(f" ✓ {stack_name} deployed")
228 else:
229 formatter.print_error(f" ✗ {stack_name} failed")
231 success, successful, failed = manager.deploy_orchestrated(
232 require_approval=not yes,
233 outputs_file=outputs_file,
234 tags=tags if tags else None,
235 on_stack_start=on_start,
236 on_stack_complete=on_complete,
237 parallel=parallel,
238 max_workers=max_workers,
239 )
241 formatter.print_info("")
242 formatter.print_info(f"Deployed: {len(successful)}/{len(stacks)} stacks")
244 if success:
245 formatter.print_success("All stacks deployed successfully")
246 else:
247 formatter.print_error(f"Deployment failed. Failed stacks: {', '.join(failed)}")
248 sys.exit(1)
250 except Exception as e:
251 formatter.print_error(f"Deployment failed: {e}")
252 sys.exit(1)
255@stacks.command("destroy-all")
256@click.option("--yes", "-y", is_flag=True, help="Skip confirmation")
257@click.option("--parallel", "-p", is_flag=True, help="Destroy regional stacks in parallel")
258@click.option("--max-workers", "-w", default=4, help="Max parallel destructions (default: 4)")
259@pass_config
260def destroy_all_orchestrated(config: Any, yes: Any, parallel: Any, max_workers: Any) -> None:
261 """Destroy all stacks in the correct order.
263 Destroys in three phases:
264 1. Monitoring stack (gco-monitoring)
265 2. Regional stacks (gco-us-east-1, etc.) - can be parallelized
266 3. Global stacks (gco-api-gateway, gco-global)
268 Automatically retries up to 3 times (with 30s waits) if any stacks fail,
269 which handles transient issues like orphaned resources during teardown.
271 Use --parallel to destroy regional stacks concurrently, which can
272 significantly reduce total teardown time when destroying multiple
273 regional stacks.
275 Examples:
276 gco stacks destroy-all -y
277 gco stacks destroy-all -y --parallel
278 gco stacks destroy-all -y -p --max-workers 8
279 """
280 import time
282 from ..stacks import get_stack_destroy_order, get_stack_manager
284 formatter = get_output_formatter(config)
285 # Retry up to 3 times total. CloudFormation stack deletions can fail
286 # transiently — e.g., EKS leaves behind a cluster security group that
287 # blocks VPC deletion, but it gets cleaned up async. A 30-second wait
288 # between attempts is usually enough for the orphaned resources to clear.
289 max_attempts = 3
291 try:
292 manager = get_stack_manager(config)
293 stacks = manager.list_stacks()
294 ordered = get_stack_destroy_order(stacks)
296 if not yes: 296 ↛ 297line 296 didn't jump to line 297 because the condition on line 296 was never true
297 formatter.print_warning("This will destroy ALL GCO stacks:")
298 for stack in ordered:
299 formatter.print_info(f" - {stack}")
300 click.confirm("\nAre you sure you want to destroy all stacks?", abort=True)
302 total_stacks = len(stacks)
304 for attempt in range(1, max_attempts + 1):
305 if attempt > 1:
306 # Clean up EKS-managed security groups between retries.
307 # After the first attempt, the EKS cluster is deleted but its
308 # security group (eks-cluster-sg-*) may linger and block VPC deletion.
309 formatter.print_info("Cleaning up orphaned EKS resources...")
310 manager.cleanup_eks_security_groups()
311 formatter.print_warning(
312 f"Attempt {attempt}/{max_attempts}: waiting 30 seconds before retrying..."
313 )
314 time.sleep(30)
316 formatter.print_info(f"Destroying {len(stacks)} stacks...")
317 if parallel:
318 formatter.print_info(f"Parallel mode enabled (max workers: {max_workers})")
320 def on_start(stack_name: str) -> None:
321 formatter.print_info(f"Destroying {stack_name}...")
323 def on_complete(stack_name: str, success: bool) -> None:
324 if success:
325 formatter.print_success(f" ✓ {stack_name} destroyed")
326 else:
327 formatter.print_error(f" ✗ {stack_name} failed")
329 success, successful, failed = manager.destroy_orchestrated(
330 force=True,
331 on_stack_start=on_start,
332 on_stack_complete=on_complete,
333 parallel=parallel,
334 max_workers=max_workers,
335 )
337 if success:
338 break
340 if attempt < max_attempts:
341 formatter.print_warning(f"{len(failed)} stack(s) failed: {', '.join(failed)}")
343 formatter.print_info("")
344 formatter.print_info(f"Destroyed: {total_stacks - len(failed)}/{total_stacks} stacks")
346 if success:
347 formatter.print_success("All stacks destroyed successfully")
348 else:
349 formatter.print_error(f"Some stacks failed to destroy: {', '.join(failed)}")
350 sys.exit(1)
352 except Exception as e:
353 formatter.print_error(f"Destroy failed: {e}")
354 sys.exit(1)
357@stacks.command("bootstrap")
358@click.option("--account", "-a", help="AWS account ID")
359@click.option("--region", "-r", required=True, help="AWS region")
360@pass_config
361def bootstrap_cdk(config: Any, account: Any, region: Any) -> None:
362 """Bootstrap CDK in an AWS account/region.
364 This is required before deploying stacks to a new account/region.
366 Example:
367 gco stacks bootstrap --region us-east-1
368 gco stacks bootstrap -a 123456789012 -r eu-west-1
369 """
370 from ..stacks import get_stack_manager
372 formatter = get_output_formatter(config)
374 try:
375 manager = get_stack_manager(config)
376 formatter.print_info(f"Bootstrapping CDK in {region}...")
378 success = manager.bootstrap(account=account, region=region)
380 if success:
381 formatter.print_success(f"CDK bootstrapped in {region}")
382 else:
383 formatter.print_error("Bootstrap failed")
384 sys.exit(1)
386 except Exception as e:
387 formatter.print_error(f"Bootstrap failed: {e}")
388 sys.exit(1)
391@stacks.command("status")
392@click.argument("stack_name")
393@click.option("--region", "-r", required=True, help="AWS region")
394@pass_config
395def stack_status(config: Any, stack_name: Any, region: Any) -> None:
396 """Get detailed status of a deployed stack."""
397 from ..stacks import get_stack_manager
399 formatter = get_output_formatter(config)
401 try:
402 manager = get_stack_manager(config)
403 status = manager.get_stack_status(stack_name, region)
405 if status:
406 formatter.print(status.to_dict())
407 else:
408 formatter.print_error(f"Stack {stack_name} not found in {region}")
409 sys.exit(1)
411 except Exception as e:
412 formatter.print_error(f"Failed to get stack status: {e}")
413 sys.exit(1)
416@stacks.command("outputs")
417@click.argument("stack_name")
418@click.option("--region", "-r", required=True, help="AWS region")
419@pass_config
420def stack_outputs(config: Any, stack_name: Any, region: Any) -> None:
421 """Get outputs from a deployed stack."""
422 from ..stacks import get_stack_manager
424 formatter = get_output_formatter(config)
426 try:
427 manager = get_stack_manager(config)
428 outputs = manager.get_outputs(stack_name, region)
430 if outputs:
431 formatter.print(outputs)
432 else:
433 formatter.print_warning(f"No outputs found for {stack_name}")
435 except Exception as e:
436 formatter.print_error(f"Failed to get outputs: {e}")
437 sys.exit(1)
440@stacks.command("access")
441@click.option("--cluster", "-c", help="Cluster name (default: gco-{region})")
442@click.option("--region", "-r", help="AWS region (default: first deployment region)")
443@pass_config
444def setup_access(config: Any, cluster: Any, region: Any) -> None:
445 """Configure kubectl access to a GCO EKS cluster.
447 Updates kubeconfig, creates an EKS access entry for your IAM principal,
448 and associates the cluster admin policy. Handles assumed roles automatically.
450 Examples:
451 gco stacks access
452 gco stacks access -r us-west-2
453 gco stacks access -c my-cluster -r eu-west-1
454 """
455 import subprocess
457 from ..config import _load_cdk_json
459 formatter = get_output_formatter(config)
461 # Determine region
462 if not region:
463 cdk_regions = _load_cdk_json()
464 if cdk_regions and "regional" in cdk_regions: 464 ↛ 467line 464 didn't jump to line 467 because the condition on line 464 was always true
465 region = cdk_regions["regional"][0]
466 else:
467 region = config.default_region or "us-east-1"
469 # Determine cluster name
470 if not cluster:
471 cluster = f"gco-{region}"
473 formatter.print_info(f"Setting up access to cluster: {cluster} in region: {region}")
475 try:
476 # Step 1: Update kubeconfig
477 formatter.print_info("Updating kubeconfig...")
478 subprocess.run(
479 ["aws", "eks", "update-kubeconfig", "--name", cluster, "--region", region],
480 check=True,
481 capture_output=True,
482 text=True,
483 )
485 # Step 2: Get IAM principal
486 formatter.print_info("Getting your IAM principal...")
487 result = subprocess.run(
488 ["aws", "sts", "get-caller-identity", "--query", "Arn", "--output", "text"],
489 check=True,
490 capture_output=True,
491 text=True,
492 )
493 principal_arn = result.stdout.strip()
494 formatter.print_info(f"Principal: {principal_arn}")
496 # Handle assumed roles — extract the role ARN from the assumed-role ARN
497 if ":assumed-role/" in principal_arn:
498 import re
500 role_name = re.search(r":assumed-role/([^/]+)/", principal_arn)
501 if role_name: 501 ↛ 521line 501 didn't jump to line 521 because the condition on line 501 was always true
502 account_result = subprocess.run(
503 [
504 "aws",
505 "sts",
506 "get-caller-identity",
507 "--query",
508 "Account",
509 "--output",
510 "text",
511 ],
512 check=True,
513 capture_output=True,
514 text=True,
515 )
516 account_id = account_result.stdout.strip()
517 principal_arn = f"arn:aws:iam::{account_id}:role/{role_name.group(1)}"
518 formatter.print_info(f"Using role ARN: {principal_arn}")
520 # Step 3: Create access entry
521 formatter.print_info("Creating EKS access entry...")
522 try:
523 subprocess.run(
524 [
525 "aws",
526 "eks",
527 "create-access-entry",
528 "--cluster-name",
529 cluster,
530 "--region",
531 region,
532 "--principal-arn",
533 principal_arn,
534 ],
535 check=True,
536 capture_output=True,
537 text=True,
538 )
539 except subprocess.CalledProcessError:
540 formatter.print_info("Access entry may already exist")
542 # Step 4: Associate admin policy
543 formatter.print_info("Associating cluster admin policy...")
544 try:
545 subprocess.run(
546 [
547 "aws",
548 "eks",
549 "associate-access-policy",
550 "--cluster-name",
551 cluster,
552 "--region",
553 region,
554 "--principal-arn",
555 principal_arn,
556 "--policy-arn",
557 "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy",
558 "--access-scope",
559 "type=cluster",
560 ],
561 check=True,
562 capture_output=True,
563 text=True,
564 )
565 except subprocess.CalledProcessError:
566 formatter.print_info("Policy may already be associated")
568 # Step 5: Verify access
569 formatter.print_info("Waiting for permissions to propagate...")
570 import time
572 time.sleep(10)
574 result = subprocess.run(
575 ["kubectl", "get", "nodes", "--request-timeout=10s"],
576 capture_output=True,
577 text=True,
578 )
579 if result.returncode == 0: 579 ↛ 586line 579 didn't jump to line 586 because the condition on line 579 was always true
580 node_count = len(
581 [line for line in result.stdout.strip().split("\n")[1:] if line.strip()]
582 )
583 print(result.stdout)
584 formatter.print_info(f"Access configured successfully. {node_count} node(s) ready.")
585 else:
586 formatter.print_warning(
587 "kubectl connected but no nodes found (cluster may be scaling to zero)"
588 )
590 except subprocess.CalledProcessError as e:
591 formatter.print_error(f"Command failed: {e.stderr or e.stdout or str(e)}")
592 sys.exit(1)
593 except FileNotFoundError as e:
594 formatter.print_error(f"Required tool not found: {e}")
595 sys.exit(1)
596 except Exception as e:
597 formatter.print_error(f"Failed to set up access: {e}")
598 sys.exit(1)
601@stacks.group("fsx")
602@pass_config
603def fsx_cmd(config: Any) -> None:
604 """Manage FSx for Lustre configuration."""
605 pass
608@fsx_cmd.command("status")
609@click.option("--region", "-r", help="Show config for specific region")
610@pass_config
611def fsx_status(config: Any, region: Any) -> None:
612 """Show current FSx for Lustre configuration status."""
613 from ..stacks import get_fsx_config
615 formatter = get_output_formatter(config)
617 try:
618 fsx_config = get_fsx_config(region)
619 if region:
620 formatter.print_info(f"FSx config for region: {region}")
621 else:
622 formatter.print_info("Global FSx config:")
623 formatter.print(fsx_config)
624 except Exception as e:
625 formatter.print_error(f"Failed to get FSx config: {e}")
626 sys.exit(1)
629@fsx_cmd.command("enable")
630@click.option("--region", "-r", help="Enable FSx for specific region only")
631@click.option("--storage-capacity", "-s", default=1200, help="Storage capacity in GiB (min 1200)")
632@click.option(
633 "--deployment-type",
634 "-d",
635 type=click.Choice(["SCRATCH_1", "SCRATCH_2", "PERSISTENT_1", "PERSISTENT_2"]),
636 default="SCRATCH_2",
637 help="FSx deployment type",
638)
639@click.option("--throughput", "-t", default=200, help="Per-unit storage throughput (MB/s)")
640@click.option("--compression", "-c", type=click.Choice(["LZ4", "NONE"]), default="LZ4")
641@click.option("--import-path", help="S3 path for data import (s3://bucket/prefix)")
642@click.option("--export-path", help="S3 path for data export (s3://bucket/prefix)")
643@click.option("--yes", "-y", is_flag=True, help="Skip confirmation")
644@pass_config
645def fsx_enable(
646 config: Any,
647 region: Any,
648 storage_capacity: Any,
649 deployment_type: Any,
650 throughput: Any,
651 compression: Any,
652 import_path: Any,
653 export_path: Any,
654 yes: Any,
655) -> None:
656 """Enable FSx for Lustre in the stack configuration.
658 FSx for Lustre provides high-performance parallel file system storage
659 ideal for ML training workloads requiring high throughput and low latency.
661 Examples:
662 gco stacks fsx enable
663 gco stacks fsx enable --region us-east-1
664 gco stacks fsx enable --storage-capacity 2400 --deployment-type PERSISTENT_2
665 gco stacks fsx enable -r us-west-2 --import-path s3://my-bucket/training-data
666 """
667 from ..stacks import update_fsx_config
669 formatter = get_output_formatter(config)
671 if storage_capacity < 1200:
672 formatter.print_error("Storage capacity must be at least 1200 GiB")
673 sys.exit(1)
675 scope = f"region {region}" if region else "all regions (global)"
677 if not yes: 677 ↛ 678line 677 didn't jump to line 678 because the condition on line 677 was never true
678 formatter.print_info(f"FSx for Lustre configuration for {scope}:")
679 formatter.print_info(f" Storage Capacity: {storage_capacity} GiB")
680 formatter.print_info(f" Deployment Type: {deployment_type}")
681 formatter.print_info(f" Throughput: {throughput} MB/s per TiB")
682 formatter.print_info(f" Compression: {compression}")
683 if import_path:
684 formatter.print_info(f" Import Path: {import_path}")
685 if export_path:
686 formatter.print_info(f" Export Path: {export_path}")
687 click.confirm(f"\nEnable FSx for Lustre for {scope}?", abort=True)
689 try:
690 fsx_settings = {
691 "enabled": True,
692 "storage_capacity_gib": storage_capacity,
693 "deployment_type": deployment_type,
694 "per_unit_storage_throughput": throughput,
695 "data_compression_type": compression,
696 "import_path": import_path,
697 "export_path": export_path,
698 "auto_import_policy": "NEW_CHANGED_DELETED" if import_path else None,
699 }
701 update_fsx_config(fsx_settings, region)
702 formatter.print_success(f"FSx for Lustre enabled in cdk.json for {scope}")
703 if region:
704 formatter.print_info(f"Run 'gco stacks deploy gco-{region}' to apply changes")
705 else:
706 formatter.print_info("Run 'gco stacks deploy' to apply changes")
708 except Exception as e:
709 formatter.print_error(f"Failed to enable FSx: {e}")
710 sys.exit(1)
713@fsx_cmd.command("disable")
714@click.option("--region", "-r", help="Disable FSx for specific region only")
715@click.option("--yes", "-y", is_flag=True, help="Skip confirmation")
716@pass_config
717def fsx_disable(config: Any, region: Any, yes: Any) -> None:
718 """Disable FSx for Lustre in the stack configuration.
720 Note: This only updates the configuration. Run 'gco stacks deploy'
721 to apply changes. Existing FSx file systems will be deleted.
723 Examples:
724 gco stacks fsx disable
725 gco stacks fsx disable --region us-east-1
726 """
727 from ..stacks import update_fsx_config
729 formatter = get_output_formatter(config)
731 scope = f"region {region}" if region else "all regions (global)"
733 if not yes: 733 ↛ 734line 733 didn't jump to line 734 because the condition on line 733 was never true
734 formatter.print_warning(f"This will disable FSx for Lustre for {scope}.")
735 formatter.print_warning("Existing FSx file systems will be deleted on next deploy.")
736 click.confirm("Are you sure?", abort=True)
738 try:
739 update_fsx_config({"enabled": False}, region)
740 formatter.print_success(f"FSx for Lustre disabled in cdk.json for {scope}")
741 if region:
742 formatter.print_info(f"Run 'gco stacks deploy gco-{region}' to apply changes")
743 else:
744 formatter.print_info("Run 'gco stacks deploy' to apply changes")
746 except Exception as e:
747 formatter.print_error(f"Failed to disable FSx: {e}")
748 sys.exit(1)
751# =============================================================================
752# Valkey commands
753# =============================================================================
756@stacks.group("valkey")
757@pass_config
758def valkey_cmd(config: Any) -> None:
759 """Manage Valkey Serverless cache configuration."""
760 pass
763@valkey_cmd.command("status")
764@pass_config
765def valkey_status(config: Any) -> None:
766 """Show current Valkey Serverless configuration status."""
767 from ..stacks import get_valkey_config
769 formatter = get_output_formatter(config)
771 try:
772 valkey_config = get_valkey_config()
773 formatter.print_info("Valkey config:")
774 formatter.print(valkey_config)
775 except Exception as e:
776 formatter.print_error(f"Failed to get Valkey config: {e}")
777 sys.exit(1)
780@valkey_cmd.command("enable")
781@click.option("--max-storage", default=5, help="Max data storage in GB (default: 5)")
782@click.option("--max-ecpu", default=5000, help="Max eCPU per second (default: 5000)")
783@click.option("--snapshot-retention", default=1, help="Snapshot retention in days (default: 1)")
784@click.option("--yes", "-y", is_flag=True, help="Skip confirmation")
785@pass_config
786def valkey_enable(
787 config: Any,
788 max_storage: Any,
789 max_ecpu: Any,
790 snapshot_retention: Any,
791 yes: Any,
792) -> None:
793 """Enable Valkey Serverless cache in the stack configuration.
795 Valkey provides a serverless key-value cache for prompt caching,
796 feature stores, session state, and low-latency data access.
798 Examples:
799 gco stacks valkey enable
800 gco stacks valkey enable --max-storage 10 --max-ecpu 10000
801 """
802 from ..stacks import update_valkey_config
804 formatter = get_output_formatter(config)
806 if not yes:
807 formatter.print_info("Valkey Serverless configuration:")
808 formatter.print_info(f" Max Data Storage: {max_storage} GB")
809 formatter.print_info(f" Max eCPU/second: {max_ecpu}")
810 formatter.print_info(f" Snapshot Retention: {snapshot_retention} days")
811 click.confirm("\nEnable Valkey Serverless?", abort=True)
813 try:
814 valkey_settings = {
815 "enabled": True,
816 "max_data_storage_gb": max_storage,
817 "max_ecpu_per_second": max_ecpu,
818 "snapshot_retention_limit": snapshot_retention,
819 }
821 update_valkey_config(valkey_settings)
822 formatter.print_success("Valkey Serverless enabled in cdk.json")
823 formatter.print_info("Run 'gco stacks deploy-all -y' to apply changes")
825 except Exception as e:
826 formatter.print_error(f"Failed to enable Valkey: {e}")
827 sys.exit(1)
830@valkey_cmd.command("disable")
831@click.option("--yes", "-y", is_flag=True, help="Skip confirmation")
832@pass_config
833def valkey_disable(config: Any, yes: Any) -> None:
834 """Disable Valkey Serverless cache in the stack configuration.
836 Note: This only updates the configuration. Run 'gco stacks deploy-all -y'
837 to apply changes. Existing Valkey caches will be deleted.
839 Examples:
840 gco stacks valkey disable
841 """
842 from ..stacks import update_valkey_config
844 formatter = get_output_formatter(config)
846 if not yes:
847 formatter.print_warning("This will disable Valkey Serverless.")
848 formatter.print_warning("Existing Valkey caches will be deleted on next deploy.")
849 click.confirm("Are you sure?", abort=True)
851 try:
852 update_valkey_config({"enabled": False})
853 formatter.print_success("Valkey Serverless disabled in cdk.json")
854 formatter.print_info("Run 'gco stacks deploy-all -y' to apply changes")
856 except Exception as e:
857 formatter.print_error(f"Failed to disable Valkey: {e}")
858 sys.exit(1)
861# =============================================================================
862# Aurora pgvector commands
863# =============================================================================
866@stacks.group("aurora")
867@pass_config
868def aurora_cmd(config: Any) -> None:
869 """Manage Aurora PostgreSQL (pgvector) configuration."""
870 pass
873@aurora_cmd.command("status")
874@pass_config
875def aurora_status(config: Any) -> None:
876 """Show current Aurora PostgreSQL (pgvector) configuration status."""
877 from ..stacks import get_aurora_config
879 formatter = get_output_formatter(config)
881 try:
882 aurora_config = get_aurora_config()
883 formatter.print_info("Aurora pgvector config:")
884 formatter.print(aurora_config)
885 except Exception as e:
886 formatter.print_error(f"Failed to get Aurora config: {e}")
887 sys.exit(1)
890@aurora_cmd.command("enable")
891@click.option("--min-acu", default=0, help="Minimum ACU (0 = scale to zero, default: 0)")
892@click.option("--max-acu", default=16, help="Maximum ACU (default: 16)")
893@click.option("--backup-retention", default=7, help="Backup retention in days (default: 7)")
894@click.option(
895 "--deletion-protection/--no-deletion-protection",
896 default=False,
897 help="Enable deletion protection",
898)
899@click.option("--yes", "-y", is_flag=True, help="Skip confirmation")
900@pass_config
901def aurora_enable(
902 config: Any,
903 min_acu: Any,
904 max_acu: Any,
905 backup_retention: Any,
906 deletion_protection: Any,
907 yes: Any,
908) -> None:
909 """Enable Aurora PostgreSQL with pgvector in the stack configuration.
911 Aurora Serverless v2 with pgvector provides vector similarity search
912 for RAG applications, semantic search, and embedding storage.
914 Examples:
915 gco stacks aurora enable
916 gco stacks aurora enable --min-acu 2 --max-acu 32 --deletion-protection
917 """
918 from ..stacks import update_aurora_config
920 formatter = get_output_formatter(config)
922 if min_acu < 0:
923 formatter.print_error("Minimum ACU must be >= 0")
924 sys.exit(1)
925 if max_acu < 1:
926 formatter.print_error("Maximum ACU must be >= 1")
927 sys.exit(1)
928 if max_acu < min_acu:
929 formatter.print_error("Maximum ACU must be >= minimum ACU")
930 sys.exit(1)
932 if not yes:
933 formatter.print_info("Aurora pgvector configuration:")
934 formatter.print_info(f" Min ACU: {min_acu} {'(scale to zero)' if min_acu == 0 else ''}")
935 formatter.print_info(f" Max ACU: {max_acu}")
936 formatter.print_info(f" Backup Retention: {backup_retention} days")
937 formatter.print_info(f" Deletion Protection: {deletion_protection}")
938 click.confirm("\nEnable Aurora pgvector?", abort=True)
940 try:
941 aurora_settings = {
942 "enabled": True,
943 "min_acu": min_acu,
944 "max_acu": max_acu,
945 "backup_retention_days": backup_retention,
946 "deletion_protection": deletion_protection,
947 }
949 update_aurora_config(aurora_settings)
950 formatter.print_success("Aurora pgvector enabled in cdk.json")
951 formatter.print_info("Run 'gco stacks deploy-all -y' to apply changes")
953 except Exception as e:
954 formatter.print_error(f"Failed to enable Aurora: {e}")
955 sys.exit(1)
958@aurora_cmd.command("disable")
959@click.option("--yes", "-y", is_flag=True, help="Skip confirmation")
960@pass_config
961def aurora_disable(config: Any, yes: Any) -> None:
962 """Disable Aurora PostgreSQL (pgvector) in the stack configuration.
964 Note: This only updates the configuration. Run 'gco stacks deploy-all -y'
965 to apply changes. Existing Aurora clusters will be deleted unless
966 deletion protection is enabled.
968 Examples:
969 gco stacks aurora disable
970 """
971 from ..stacks import update_aurora_config
973 formatter = get_output_formatter(config)
975 if not yes:
976 formatter.print_warning("This will disable Aurora pgvector.")
977 formatter.print_warning(
978 "Existing Aurora clusters will be deleted on next deploy "
979 "(unless deletion protection is enabled)."
980 )
981 click.confirm("Are you sure?", abort=True)
983 try:
984 update_aurora_config({"enabled": False})
985 formatter.print_success("Aurora pgvector disabled in cdk.json")
986 formatter.print_info("Run 'gco stacks deploy-all -y' to apply changes")
988 except Exception as e:
989 formatter.print_error(f"Failed to disable Aurora: {e}")
990 sys.exit(1)