Skip to content

Conversation

@Sazwanismail
Copy link

# Ruby on Rails Framework Summary
# The Complete Web Application Framework

rails:
  full_name: "Ruby on Rails"
  description: "Full-stack web application framework written in Ruby"
  creator: "David Heinemeier Hansson (DHH)"
  initial_release: "2004"
  latest_version: "Rails 7.1.3 (as of 2024)"
  license: "MIT License"
  philosophy: "Convention over Configuration, Don't Repeat Yourself (DRY)"

core_principles:
  convention_over_configuration:
    description: "Sensible defaults reduce decision fatigue and configuration"
    benefits: ["Faster development", "Consistent structure", "Less boilerplate"]
    examples: ["Database table naming", "File structure", "URL routing"]

  dont_repeat_yourself:
    description: "Every piece of knowledge should have single, unambiguous representation"
    benefits: ["Maintainable code", "Fewer bugs", "Easier refactoring"]
    examples: ["Helpers", "Partials", "Concerns"]

  restful_architecture:
    description: "Representational State Transfer as default pattern"
    benefits: ["Standardized APIs", "Predictable URLs", "Clean separation"]
    components: ["Resources", "HTTP verbs", "Standard actions"]

mvc_architecture:
  model:
    responsibility: "Business logic and data rules"
    components: ["ActiveRecord", "Validations", "Callbacks", "Associations"]
    location: "app/models/"

  view:
    responsibility: "Presentation layer"
    components: ["ERB templates", "Partials", "Helpers", "Layouts"]
    location: "app/views/"

  controller:
    responsibility: "Request handling and coordination"
    components: ["Actions", "Filters", "Strong Parameters", "Sessions"]
    location: "app/controllers/"

key_components:
  active_record:
    purpose: "Object-Relational Mapping (ORM) layer"
    features:
      - "Database abstraction"
      - "Migrations"
      - "Validations"
      - "Associations (has_many, belongs_to, etc.)"
      - "Query interface"
    databases_supported: ["PostgreSQL", "MySQL", "SQLite", "Oracle", "SQL Server"]

  action_controller:
    purpose: "Request handling and response generation"
    features:
      - "Routing"
      - "Parameters handling"
      - "Sessions and cookies"
      - "Filters (before_action, after_action)"
      - "Strong Parameters"

  action_view:
    purpose: "Template rendering and presentation"
    features:
      - "ERB (Embedded Ruby) templates"
      - "Form helpers"
      - "Asset pipeline"
      - "Partials and layouts"
    templating_engines: ["ERB", "Haml", "Slim"]

  action_mailer:
    purpose: "Email composition and delivery"
    features:
      - "Email templates"
      - "Attachments"
      - "Multiple delivery methods"
      - "Preview functionality"

  active_job:
    purpose: "Background job processing"
    features:
      - "Unified interface for job queues"
      - "Multiple backend support"
      - "Scheduled jobs"
    backends: ["Sidekiq", "Resque", "Delayed Job", "Que"]

  action_cable:
    purpose: "WebSocket integration for real-time features"
    features:
      - "Real-time updates"
      - "Channels"
      - "Connection management"
      - "Broadcasting"

  active_storage:
    purpose: "File uploads and cloud storage"
    features:
      - "Multiple storage services"
      - "Image processing"
      - "Direct uploads"
    supported_services: ["AWS S3", "Google Cloud", "Microsoft Azure", "Local"]

development_workflow:
  project_creation:
    command: "rails new project_name"
    options: ["--database=postgresql", "--skip-test-unit", "--api", "--webpack=react"]

  database_operations:
    - "rails db:create (Create database)"
    - "rails db:migrate (Run migrations)"
    - "rails db:seed (Seed data)"
    - "rails db:rollback (Rollback migration)"

  development_server:
    command: "rails server or rails s"
    default_port: 3000
    features: ["Auto-reload", "Development mode logging", "Error pages"]

  code_generation:
    - "rails generate model User name:string email:string"
    - "rails generate controller Users index show"
    - "rails generate migration AddAgeToUsers age:integer"
    - "rails generate scaffold Post title:string content:text"

file_structure:
  app/
    controllers/    # Controller classes
    models/         # Model classes
    views/          # View templates
    helpers/        # View helper methods
    assets/         # CSS, JavaScript, images
    jobs/           # Active Job classes
    mailers/        # Action Mailer classes
    channels/       # Action Cable channels

  config/
    routes.rb       # URL routing
    database.yml    # Database configuration
    environments/   # Environment-specific settings

  db/
    migrate/        # Database migration files
    schema.rb       # Current database schema
    seeds.rb        # Seed data

  public/           # Static files
  test/ or spec/    # Test files

popular_gems:
  authentication:
    - "devise (Full-featured authentication)"
    - "omniauth (OAuth authentication)"
    - "bcrypt (Password hashing)"

  authorization:
    - "pundit (Policy-based authorization)"
    - "cancancan (Ability-based authorization)"

  testing:
    - "rspec-rails (Testing framework)"
    - "factory_bot_rails (Test data factories)"
    - "capybara (Integration testing)"

  frontend:
    - "webpacker (Webpack integration)"
    - "importmap-rails (ES modules)"
    - "propshaft (CSS and assets)"

  development:
    - "pry-rails (Enhanced console)"
    - "bullet (N+1 query detection)"
    - "rubocop (Code linting)"

testing_framework:
  minitest:
    description: "Rails default testing framework"
    features: ["Unit tests", "Integration tests", "System tests"]

  rspec:
    description: "Popular alternative testing framework"
    structure:
      - "spec/models/ (Model tests)"
      - "spec/controllers/ (Controller tests)"
      - "spec/features/ (Feature tests)"
      - "spec/jobs/ (Job tests)"

  test_types:
    - "Unit tests (Models, helpers)"
    - "Functional tests (Controllers)"
    - "Integration tests (Multiple controllers)"
    - "System tests (Full browser testing)"

deployment_options:
  platform_as_a_service:
    - "Heroku (Most popular for Rails)"
    - "Render"
    - "Fly.io"
    - "Railway"

  vps_deployment:
    - "Capistrano (Automated deployment)"
    - "Docker containers"
    - "Manual server setup"

  cloud_platforms:
    - "AWS (Elastic Beanstalk, ECS)"
    - "Google Cloud Platform"
    - "Microsoft Azure"

performance_optimization:
  database:
    - "Eager loading (includes, preload)"
    - "Database indexing"
    - "Query optimization"
    - "Caching strategies"

  application:
    - "Russian doll caching"
    - "Background job processing"
    - "Asset compression"
    - "CDN integration"

  monitoring:
    - "New Relic"
    - "Skylight"
    - "Scout APM"

security_features:
  built_in_protections:
    - "CSRF protection"
    - "SQL injection prevention"
    - "XSS protection"
    - "Strong parameters"
    - "Secure headers"

  authentication_security:
    - "BCrypt password hashing"
    - "Session security"
    - "Remember me tokens"

  additional_measures:
    - "Content Security Policy"
    - "SSL enforcement"
    - "Security headers"

rails_7_features:
  asset_management: "Import maps and propshaft replace Sprockets"
  javascript: "Hotwire (Turbo + Stimulus)"
  database: "Multiple databases per model"
  encryption: "Active Record encryption"
  async_processing: "Improved Active Job"

learning_resources:
  official: 
    - "Ruby on Rails Guides (guides.rubyonrails.org)"
    - "API Documentation (api.rubyonrails.org)"
    - "Rails Blog (weblog.rubyonrails.org)"

  community:
    - "Ruby on Rails Forum (discuss.rubyonrails.org)"
    - "Stack Overflow"
    - "Ruby on Rails Slack"

  tutorials:
    - "Ruby on Rails Tutorial by Michael Hartl"
    - "GoRails video tutorials"
    - "The Odin Project"

companies_using_rails:
  - "GitHub"
  - "Shopify"
  - "Basecamp"
  - "Airbnb"
  - "Twitch"
  - "SoundCloud"
  - "Hulu"
  - "Zendesk"

strengths:
  - "Rapid development"
  - "Strong conventions"
  - "Large ecosystem and community"
  - "Excellent documentation"
  - "Proven scalability"
  - "Security by default"

weaknesses:
  - "Performance overhead compared to lighter frameworks"
  - "Learning curve for Ruby"
  - "Less flexible than micro-frameworks"
  - "Memory usage can be high"

community:
  size: "Large and active"
  conferences: ["RailsConf", "RubyConf", "Brighton Ruby"]
  meetups: "Global Ruby and Rails meetups"
  contribution: "Open source with strong community involvement"

future_outlook:
  current_focus: "Modern web standards, performance, developer experience"
  adoption_trend: "Mature framework with steady enterprise adoption"
  innovation_areas: ["Real-time features", "API development", "Developer tools"]
# Rails Command Line Cheat Sheet

commands:
  project_management:
    - "rails new APP_NAME - Create new Rails application"
    - "rails server - Start development server"
    - "rails console - Interactive console"
    - "rails generate - Code generator"
    - "rails destroy - Remove generated code"

  database:
    - "rails db:create - Create database"
    - "rails db:migrate - Run migrations"
    - "rails db:rollback - Rollback last migration"
    - "rails db:seed - Load seed data"
    - "rails db:setup - Create and seed database"

  testing:
    - "rails test - Run all tests"
    - "rails test:system - Run system tests"
    - "rails test:controllers - Run controller tests"

  maintenance:
    - "rails routes - List all routes"
    - "rails logs - View application logs"
    - "rails credentials:edit - Edit encrypted credentials"
    - "rails assets:precompile - Precompile assets"

generators:
  model: "rails generate model ModelName field:type"
  controller: "rails generate controller ControllerName action1 action2"
  scaffold: "rails generate scaffold ModelName field:type"
  migration: "rails generate migration AddFieldToTable field:type"
  mailer: "rails generate mailer MailerName method1 method2"

This summary provides a comprehensive overview of Ruby on Rails, covering its philosophy, architecture, components, and ecosystem. Rails continues to be a popular choice for web development due to its productivity-focused approach and mature ecosystem.

# Ruby on Rails - Advanced Features & Latest Updates

rails_8_preview:
  expected_release: "Late 2024 / Early 2025"
  key_features:
    - "Solid Cache (new default cache store)"
    - "Active Record async queries"
    - "Improved Hotwire integration"
    - "Better developer experience"
    - "Enhanced security defaults"

advanced_patterns:
  service_objects:
    purpose: "Extract complex business logic from models/controllers"
    example: |
      class UserRegistration
        def initialize(user_params)
          @user_params = user_params
        end
        
        def call
          User.transaction do
            user = User.create!(@user_params)
            WelcomeEmailJob.perform_later(user.id)
            user
          end
        end
      end

  form_objects:
    purpose: "Handle complex forms spanning multiple models"
    gems: ["reform", "active_model_forms"]
    benefits: ["Validation separation", "Complex form handling", "Better testing"]

  query_objects:
    purpose: "Encapsulate complex database queries"
    example: |
      class ActiveUsersQuery
        def initialize(relation = User.all)
          @relation = relation
        end
        
        def call
          @relation.where(active: true)
                   .where('last_login_at > ?', 30.days.ago)
                   .order(:last_login_at)
        end
      end

  policy_objects:
    purpose: "Centralize authorization logic"
    gems: ["pundit"]
    example: |
      class PostPolicy
        attr_reader :user, :post
        
        def initialize(user, post)
          @user = user
          @post = post
        end
        
        def update?
          user.admin? || post.user == user
        end
      end

performance_advanced:
  database_optimization:
    - "Counter caches for frequent counts"
    - "Materialized views for complex queries"
    - "Database constraints and indexes"
    - "Connection pooling tuning"

  caching_strategies:
    russian_doll: "Nested fragment caching"
    low_level: "Rails.cache for arbitrary data"
    http_caching: "ETags and expiration headers"
    database: "Query result caching"

  background_processing:
    sidekiq: "Redis-backed, multi-threaded"
    good_job: "Database-backed, Rails 7+ default"
    solid_queue: "New in Rails, database-backed"
    sucker_punch: "In-process, simple"

api_development:
  rails_api:
    creation: "rails new my_api --api"
    features_removed: ["Views", "Assets", "Helpers"]
    focused_on: "JSON responses, API authentication"

  popular_gems:
    serialization: ["fast_jsonapi", "blueprinter", "jsonapi-serializer"]
    authentication: ["jwt", "knock", "devise_token_auth"]
    documentation: ["rswag", "apipie-rails"]

  graphql:
    gems: ["graphql-ruby", "graphql-rails"]
    benefits: ["Strongly typed", "Single endpoint", "Client-specified queries"]

testing_advanced:
  test_types:
    contract_testing: "Pact for API contracts"
    performance_testing: "RSpec benchmarks"
    security_testing: "Brakeman for static analysis"
    mutation_testing: "Mutant for test coverage quality"

  factories:
    factory_bot: 
      features: ["Traits", "Associations", "Sequences"]
      best_practices: ["Build strategies", "Only necessary attributes"]

  test_doubles:
    rspec_mocks: "Method stubs and expectations"
    test_doubles: "Isolate components during testing"

deployment_advanced:
  docker:
    structure: "Multi-stage Dockerfile"
    benefits: ["Consistent environments", "Easy scaling", "CI/CD integration"]

  kubernetes:
    configuration: "Deployments, Services, Ingress"
    tools: ["kubectl", "helm", "kustomize"]

  monitoring:
    application: ["New Relic", "AppSignal", "Skylight"]
    infrastructure: ["Prometheus", "Grafana"]
    logging: ["Lograge", "Semantic logger"]

  zero_downtime:
    strategies: ["Blue-green deployment", "Canary releases", "Database migrations best practices"]

security_advanced:
  common_vulnerabilities:
    sql_injection: "Prevented by ActiveRecord, but careful with raw SQL"
    xss: "Automatic escaping in views, careful with html_safe"
    csrf: "Built-in protection, API apps need different handling"
    mass_assignment: "Strong parameters protection"

  advanced_measures:
    content_security_policy: "Configure in config/initializers"
    secure_headers: "gem 'secure_headers'"
    rate_limiting: "gem 'rack-attack'"
    security_headers: "HSTS, X-Frame-Options, etc."

  authentication_advanced:
    multi_factor: "gem 'rotp' for TOTP"
    biometric: "WebAuthn integration"
    session_management: "Secure session rotation"

frontend_integration:
  hotwire:
    turbo: "SPA-like experience without JavaScript"
    stimulus: "Minimal JavaScript framework"
    strada: "Mobile app integration (upcoming)"

  import_maps:
    purpose: "ES modules without build step"
    configuration: "config/importmap.rb"
    benefits: ["Simpler deployment", "No Node.js required"]

  alternative_approaches:
    react_on_rails: "React integration"
    inertia_rails: "Share data with frontend frameworks"
    view_component: "Component-based views"

database_advanced:
  multiple_databases:
    setup: "database.yml configuration"
    use_cases: ["Read replicas", "Sharding", "Separate data domains"]

  advanced_postgresql:
    features: ["JSONB columns", "Full-text search", "Geospatial queries", "Materialized views"]
    gems: ["pg_search", "postgres_ext"]

  data_migrations:
    purpose: "Transform data safely"
    tools: ["Strong migrations gem", "Data migration patterns"]

scaling_strategies:
  application:
    horizontal: "Multiple application servers"
    vertical: "Larger server instances"
    microservices: "Extract bounded contexts"

  database:
    read_replicas: "Handle read traffic"
    partitioning: "Split large tables"
    caching_layers: "Redis, Memcached"

  background_jobs:
    queues: "Priority-based job processing"
    horizontal_scaling: "Multiple worker processes"

latest_ecosystem:
  new_gems:
    - "propshaft (Rails 7+ asset pipeline)"
    - "kamal (Deployment tool)"
    - "turbo_ready (Enhanced Hotwire)"
    - "fiesta (Structured logging)"

  tools:
    - "ruby-lsp (Modern Ruby language server)"
    - "debug.rb (New debugger)"
    - "lefthook (Git hooks management)"

  trends:
    - "Simpler frontend setups"
    - "Better developer experience"
    - "Enhanced performance"
    - "Stronger security defaults"

community_resources:
  newsletters:
    - "Ruby Weekly"
    - "Rails Weekly"
    - "This Week in Rails"

  podcasts:
    - "The Ruby on Rails Podcast"
    - "Remote Ruby"
    - "Bike Shed"

  youtube:
    - "Gorails"
    - "Drifting Ruby"
    - "RailsCasts (archived)"

learning_path:
  beginner:
    - "Ruby basics"
    - "Rails guides"
    - "Build CRUD apps"
    - "Basic testing"

  intermediate:
    - "Advanced ActiveRecord"
    - "Background jobs"
    - "API development"
    - "Performance optimization"

  advanced:
    - "Security deep dive"
    - "Scaling strategies"
    - "System design"
    - "Contributing to Rails"

  expert:
    - "Rails internals"
    - "Gem development"
    - "Database architecture"
    - "DevOps and deployment"

job_market:
  in_demand_skills:
    - "Rails 7+ experience"
    - "Hotwire/Stimulus"
    - "API design"
    - "Performance optimization"
    - "Testing (RSpec, Capybara)"
    - "DevOps (Docker, AWS, CI/CD)"

  salary_ranges:
    junior: "$70,000 - $90,000"
    mid_level: "$90,000 - $130,000"
    senior: "$130,000 - $180,000"
    staff_plus: "$180,000 - $250,000+"

  industries:
    - "Startups"
    - "E-commerce"
    - "SaaS companies"
    - "Fintech"
    - "Education technology"
# Rails 8 Preview Features (Expected)

solid_cache:
  description: "New default cache store using database tables"
  benefits: ["No external dependencies", "Database consistency", "Simpler deployment"]
  status: "Planned for Rails 8"

active_record_async:
  features:
    - "Async queries for non-blocking database operations"
    - "Background model processing"
    - "Improved connection handling"

hotwire_enhancements:
  turbo: "Better native app integration"
  stimulus: "Enhanced component lifecycle"
  new_features: ["Stream notifications", "Enhanced forms"]

developer_experience:
  improvements:
    - "Faster test suite runs"
    - "Better error messages"
    - "Enhanced debugging tools"
    - "Simplified configuration"

security:
  new_defaults:
    - "Stronger CSP defaults"
    - "Enhanced cookie security"
    - "Automatic security headers"

This advanced Rails summary covers the latest patterns, performance optimizations, security practices, and upcoming features. Rails continues to evolve with a focus on developer happiness, performance, and modern web development practices.

Added documentation and example scripts for Google Cloud Storage usage, including quickstart guides, merging data, and CORS configuration.````markdown name=README.md
# storage.cloud — Google Cloud Storage (GCS) docs & quickstart

storage.cloud is a compact documentation and example repo with copy-pastable commands and small scripts for common Google Cloud Storage tasks:

- Authentication (gcloud ADC, service accounts, access tokens)
- Browser access vs programmatic access (storage.cloud.google.com vs API)
- Signed URLs and sharing
- CORS configuration for browser clients
- Practical patterns to merge many objects (CSV/text) in GCS
- Load patterns for BigQuery and recommendations for large datasets

Repository layout
- index.html — simple landing page / site overview
- docs/
  - quickstart.md — commands & short how-tos
  - merge-data.md — Malay quick guide for combining files
  - signed-urls.md — signing approaches and examples
- examples/
  - merge_csv_gcs.py — small Python script to merge CSVs in a GCS prefix
- cors.json — CORS example
- LICENSE — suggested license (MIT)

Quickstart highlights
- Authenticate locally (ADC):
  gcloud auth application-default login

- Get an access token for Authorization header:
  gcloud auth print-access-token

- List buckets:
  gsutil ls gs://

- Copy to/from GCS:
  gsutil cp ./local-file gs://BUCKET/OBJECT
  gsutil cp gs://BUCKET/OBJECT ./local-file

- Create a signed URL:
  gsutil signurl -d 1h /path/to/key.json gs://BUCKET/OBJECT

Security reminders
- Use service accounts with least privilege in production.
- Signed URLs grant access to anyone with the URL until expiry.
- Avoid making sensitive objects public; prefer signed URLs or IAM roles.

Contributing
- Send PRs or issues with improvements.
- Keep examples minimal and targeted.
- Include tests or usage examples when adding scripts.

If you want, I can:
- Expand any doc into a full tutorial (step-by-step with screenshots).
- Add a shell script to compose >32 objects safely.
- Create a Dataflow/Beam starter pipeline for large-scale merges.

````

```html name=index.html
<!doctype html>
<html lang="en">
<head>
  <meta charset="utf-8"/>
  <title>storage.cloud — Google Cloud Storage quickstart</title>
  <meta name="viewport" content="width=device-width,initial-scale=1"/>
  <style>
    body { font-family: system-ui, -apple-system, Roboto, "Segoe UI", Arial; max-width: 980px; margin: 36px auto; line-height: 1.6; padding: 0 20px; color:Visual-Studio-Code#111 }
    pre{ background:#f6f8fa; padding:12px; overflow:auto; border-radius:6px; }
    header {border-bottom:1px solid #eee; margin-bottom:20px; padding-bottom:12px}
    h1{margin:0;padding:0}
    nav a{margin-right:12px;color:#1a73e8;text-decoration:none}
    small { color:#666 }
  </style>
</head>
<body>
  <header>
    <h1>storage.cloud</h1>
    <p><small>Quick reference and starter content for Google Cloud Storage (GCS)</small></p>
    <nav>
      <a href="docs/quickstart.md">Quickstart</a>
      <a href="docs/merge-data.md">Combine data</a>
      <a href="docs/signed-urls.md">Signed URLs</a>
      <a href="examples/merge_csv_gcs.py">Examples</a>
    </nav>
  </header>

  <main>
    <section>
      <h2>Overview</h2>
      <p>storage.cloud collects short guides, command snippets, and small reusable scripts for common GCS workflows:</p>
      <ul>
        <li>Authenticated access (gcloud, service accounts, access tokens)</li>
        <li>Signed URLs for temporary public access</li>
        <li>CORS configuration for browser-based apps</li>
        <li>Merging objects (CSV/text) and loading to BigQuery</li>
      </ul>
    </section>

    <section>
      <h2>Helpful links</h2>
      <ul>
        <li><a href="https://storage.cloud.google.com/">storage.cloud.google.com</a> — web UI (requires Google sign-in)</li>
        <li><a href="https://console.cloud.google.com/storage/browser">Cloud Console — Storage browser</a></li>
        <li><a href="https://cloud.google.com/storage/docs">GCS documentation</a></li>
      </ul>
    </section>

    <section>
      <h2>Get started</h2>
      <p>Open <code>docs/quickstart.md</code> for commands and brief examples you can run locally. For combining files, see <code>docs/merge-data.md</code> and <code>examples/merge_csv_gcs.py</code>.</p>
    </section>
  </main>
</body>
</html>
```

````markdown name=docs/quickstart.md
# Quickstart — Google Cloud Storage (GCS)

This page collects the most-used commands and short examples for getting started with GCS.

Prerequisites
- Install Google Cloud SDK (gcloud, gsutil)
- For programmatic examples, install the relevant client libraries (Python: google-cloud-storage)

Authentication
- Local developer (Application Default Credentials - ADC):
  gcloud auth application-default login

- Service account (recommended for server apps):
  1. Create:
     gcloud iam service-accounts create my-sa --display-name="My SA"
  2. Grant roles (example: objectViewer):
     gcloud projects add-iam-policy-binding PROJECT_ID \
       --member="serviceAccount:my-sa@PROJECT_ID.iam.gserviceaccount.com" \
       --role="roles/storage.objectViewer"
  3. (Optional) Create key for local use:
     gcloud iam service-accounts keys create key.json \
       --iam-account=my-sa@PROJECT_ID.iam.gserviceaccount.com

Access tokens
- Print an access token for HTTP Authorization:
  gcloud auth print-access-token
- Use as header: Authorization: Bearer <ACCESS_TOKEN>

Common gsutil commands
- List buckets:
  gsutil ls gs://

- List objects in a bucket/prefix:
  gsutil ls gs://BUCKET/PREFIX/

- Download object:
  gsutil cp gs://BUCKET/OBJECT ./local-file

- Upload file:
  gsutil cp ./local-file gs://BUCKET/OBJECT

- Make object public (use sparingly):
  gsutil acl ch -u AllUsers:R gs://BUCKET/OBJECT

Signed URLs
- Short way to create an expiring URL using a service account key:
  gsutil signurl -d 1h /path/to/key.json gs://BUCKET/OBJECT
- Notes:
  - V4 signed URLs are supported; maximum expiry is 7 days.
  - Signed URLs allow access without a Google account.

CORS (for browser clients)
- Example file: cors.json (in repo)
- Apply:
  gsutil cors set cors.json gs://BUCKET

BigQuery ingestion
- BigQuery accepts wildcards — you can load many CSVs without pre-merging:
  bq load --autodetect --source_format=CSV dataset.table gs://BUCKET/PATH/*.csv

Troubleshooting
- Permission denied: check IAM roles (roles/storage.objectViewer or a custom role).
- Invalid credentials: re-run `gcloud auth application-default login` or refresh service account keys.
- CORS errors: ensure bucket CORS includes your domain and required methods/headers.

Security recommendations
- Use service accounts with least privilege.
- Prefer IAM + uniform bucket-level access over ACLs.
- Do not embed long-lived keys in client-side code; use signed URLs for browser access.

Further reading
- GCS docs: https://cloud.google.com/storage/docs
- Signed URLs: https://cloud.google.com/storage/docs/access-control/signed-urls
- gsutil reference: https://cloud.google.com/storage/docs/gsutil
````

````markdown name=docs/merge-data.md
# Gabungkan Semua Data (Combine all data)

Dokumen ringkas (Bahasa Melayu) untuk menggabungkan fail yang tersimpan di Google Cloud Storage.

Sebelum mula
- Pastikan anda mempunyai akses ke bucket (roles/storage.objectViewer atau storage.objectAdmin untuk penulisan).
- Jika dataset besar (GB/TB), pertimbangkan Dataflow/Dataproc atau import terus ke BigQuery.

Pilihan umum

1) Quick merge (fail kecil/sederhana)
- Jika saiz kecil supaya muat dalam memori:
  gsutil cat gs://BUCKET/PATH/*.csv | gsutil cp - gs://BUCKET/PATH/combined.csv
- Risiko: memori dan rangkaian. Pastikan hanya digunakan untuk set kecil.

2) gsutil compose (gabungkan objek di GCS tanpa muat turun)
- Gabungkan hingga 32 objek per operasi:
  gsutil compose gs://BUCKET/part1.csv gs://BUCKET/part2.csv gs://BUCKET/combined.csv
- Untuk >32 objek, buat compose berperingkat (tree compose).
- Nota: compose menggabungkan byte; pastikan setiap objek mempunyai newline yang sesuai dan tidak menggabungkan header berganda.

3) Load terus ke BigQuery (disarankan untuk analisis)
- BigQuery boleh baca wildcard CSV:
  bq load --autodetect --source_format=CSV dataset.table gs://BUCKET/PATH/*.csv
- Kelebihan: skala, tiada pra-gabungan, schema handling.

4) Skrip tersuai (contoh Python)
- Kelebihan: kawalan penuh (buang header berganda, normalisasi).
- Lihat `examples/merge_csv_gcs.py` untuk contoh.

Contoh strategi untuk >32 objek menggunakan gsutil compose (pattern)
- Bahagikan senarai objek kepada kumpulan 32, compose setiap kumpulan ke objek sementara, kemudian compose hasil sementara (ulang sehingga satu objek terakhir).
- Atau gunakan pipeline Dataflow untuk penstriman dan penulisan semula.

Amalan terbaik
- Untuk output akhir yang akan dianalitik: lebih baik tulis ke BigQuery atau Parquet (kolumnar) untuk kos dan prestasi.
- Pastikan logging dan versi objek dihidupkan jika data penting (object versioning).
- Gunakan encryption keys (CMEK) jika diperlukan.

Contoh ringkas penggunaan skrip
- Jika anda mahu, berikan:
  - Bucket name (contoh: gs://my-bucket/data/)
  - Prefix dan jenis fail (.csv)
  - Saiz anggaran (GB/TB)
  Saya akan hasilkan skrip automatik yang sesuai (bash atau Python) untuk anda.
````

````markdown name=docs/signed-urls.md
# Signed URLs — Quick reference

Signed URLs grant temporary access to an object without requiring the requester to have a Google account.

Key points
- Signed URLs include authentication in query parameters.
- Anyone with the URL can access the object until expiry.
- Maximum expiry for V4 signed URLs is 7 days.

Create with gsutil (using service account key file)
  gsutil signurl -d 1h /path/to/key.json gs://BUCKET/OBJECT

Create programmatically (Python example sketch)
- Use google-auth library to sign and generate a V4 URL, or use google-cloud-storage's blob.generate_signed_url method.
- Prefer service account credentials with least privilege.

Security tips
- Monitor signed-URL usage where possible (Cloud Storage logs).
- Shorter expiry reduces risk.
- Do not embed long-lived private keys in public repositories.

When to use
- Temporary downloads for users without Google accounts.
- Browser uploads (PUT) when combined with appropriate CORS settings.
````

```python name=examples/merge_csv_gcs.py
#!/usr/bin/env python3
"""
examples/merge_csv_gcs.py

Merge CSV files in a GCS prefix into one CSV while keeping only the first header.

Requirements:
  pip install google-cloud-storage

Usage:
  export GOOGLE_APPLICATION_CREDENTIALS="/path/to/key.json"
  python3 examples/merge_csv_gcs.py my-bucket data/prefix/ output/combined.csv
"""
import sys
import csv
from io import StringIO
from google.cloud import storage

def merge_csvs(bucket_name, prefix, output_blob_name):
    client = storage.Client()
    bucket = client.bucket(bucket_name)
    blobs = sorted([b for b in bucket.list_blobs(prefix=prefix) if b.name.endswith('.csv')])

    if not blobs:
        print("No CSV files found with prefix:", prefix)
        return

    out_buf = StringIO()
    writer = None
    first = True

    for blob in blobs:
        print("Reading:", blob.name)
        # Use streaming download if files are large: download_as_text() may load entire object
        data = blob.download_as_text()
        reader = csv.reader(StringIO(data))
        header = next(reader, None)
        if first:
            writer = csv.writer(out_buf)
            if header:
                writer.writerow(header)
            for row in reader:
                writer.writerow(row)
            first = False
        else:
            # skip header for subsequent files
            for row in reader:
                writer.writerow(row)

    out_blob = bucket.blob(output_blob_name)
    out_blob.upload_from_string(out_buf.getvalue(), content_type='text/csv')
    print(f'Uploaded gs://{bucket_name}/{output_blob_name}')

if __name__ == "__main__":
    if len(sys.argv) != 4:
        print("Usage: merge_csv_gcs.py BUCKET PREFIX OUTPUT_BLOB")
        print("Example: merge_csv_gcs.py my-bucket data/ output/combined.csv")
        sys.exit(1)
    merge_csvs(sys.argv[1], sys.argv[2], sys.argv[3])
```

```json name=cors.json
[
  {
    "origin": ["https://example.com"],
    "method": ["GET", "HEAD", "PUT", "POST"],
    "responseHeader": ["Content-Type", "x-goog-meta-custom"],
    "maxAgeSeconds": 3600
  }
]
```

```text name=LICENSE
MIT License

Copyright (c) 2025

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction...
(You can replace with your full MIT text or preferred license.)
```

Would you like me to:
- generate a shell helper to compose >32 objects safely (tree compose), or
- produce a Dataflow/Beam starter for large merges, or
- create a translated Malay full-site version?

Tell me which follow-up and I will produce the exact script or pipeline.
Add storage.cloud documentation and example scripts
Updated README.md to version 5 with streamlined content, added tree-compose helper pattern, and improved clarity on common commands and best practices.
````markdown name=README.md
# storage.cloud — Google Cloud Storage docs & examples (v5)

A compact, practical collection of reference notes, copy‑paste commands, and small example scripts for working with Google Cloud Storage (GCS). This repo provides streamlined content, an included tree‑compose helper pattern for composing >32 objects, and improved clarity on common commands and best practices.

Status: v5 — 2025-11-06  
Maintainer: Sazwanismail

Table of contents
- About
- Repo layout
- Quickstart (install, auth)
- Common commands (concise)
- Sharing & signed URLs
- Merging strategies (small → large)
- Tree‑compose helper (pattern & usage)
- CORS & browser uploads
- Examples included
- Security & best practices
- Troubleshooting (quick)
- Contributing & license

About
storage.cloud is focused on fast onboarding and safe reuse: copy‑paste commands for local tasks, small example scripts to adapt, and pragmatic patterns for combining many objects and ingesting data into BigQuery.

Repository layout
- index.html — landing page
- docs/
  - quickstart.md
  - merge-data.md
  - signed-urls.md
- examples/
  - merge_csv_gcs.py
  - tree-compose.sh (pattern helper)
- cors.json
- LICENSE

Quickstart (minimum steps)
1. Install
   - Google Cloud SDK (gcloud, gsutil): https://cloud.google.com/sdk
   - Optional Python client:
     ```bash
     pip install --upgrade google-cloud-storage
     ```

2. Authenticate (developer / local)
   ```bash
   gcloud auth application-default login
   ```

3. Service account for servers (least privilege)
   ```bash
   gcloud iam service-accounts create my-sa --display-name="My SA"

   gcloud projects add-iam-policy-binding PROJECT_ID \
     --member="serviceAccount:my-sa@PROJECT_ID.iam.gserviceaccount.com" \
     --role="roles/storage.objectViewer"
   ```

   Optional local key (for testing):
   ```bash
   gcloud iam service-accounts keys create key.json \
     --iam-account=my-sa@PROJECT_ID.iam.gserviceaccount.com
   export GOOGLE_APPLICATION_CREDENTIALS="/path/to/key.json"
   ```

Common commands (concise)
- List buckets:
  ```bash
  gsutil ls gs://
  ```
- List objects:
  ```bash
  gsutil ls gs://BUCKET/PREFIX/
  ```
- Download / upload:
  ```bash
  gsutil cp gs://BUCKET/OBJECT ./local-file
  gsutil cp ./local-file gs://BUCKET/OBJECT
  ```
- Access token for HTTP:
  ```bash
  gcloud auth print-access-token
  # Authorization: Bearer <TOKEN>
  ```
- Make object public (use sparingly):
  ```bash
  gsutil acl ch -u AllUsers:R gs://BUCKET/OBJECT
  ```

Sharing & signed URLs
- Create a signed URL (gsutil + service account key):
  ```bash
  gsutil signurl -d 1h /path/to/key.json gs://BUCKET/OBJECT
  ```
Notes:
- V4 signed URLs support up to 7 days expiry.
- Anyone with the URL can access the object while it’s valid — treat as a secret.
- For programmatic signing, use google-cloud-storage or google-auth libraries (see docs/signed-urls.md).

Merging strategies — pick by dataset size
- Small / moderate (fits memory)
  ```bash
  gsutil cat gs://BUCKET/PATH/*.csv | gsutil cp - gs://BUCKET/PATH/combined.csv
  ```
  - Quick and simple. Watch memory & network.

- In-place compose (no download; up to 32 objects per compose)
  ```bash
  gsutil compose gs://BUCKET/part1.csv gs://BUCKET/part2.csv gs://BUCKET/combined.csv
  ```
  - Compose merges object bytes; ensure newline/header handling.

- Large-scale / analytics
  - Load directly to BigQuery (no pre-merge):
    ```bash
    bq load --autodetect --source_format=CSV dataset.table gs://BUCKET/PATH/*.csv
    ```
  - For heavy transforms/streaming merges use Dataflow (Apache Beam) or Dataproc (Spark).

Tree‑compose helper — safe pattern for >32 objects
- Problem: gsutil compose takes at most 32 sources. Use a tree (batch-and-reduce) approach:
  1. List objects under prefix.
  2. Break into batches of up to 32.
  3. Compose each batch into a temporary object.
  4. Repeat composing temporary objects until a single final object remains.
  5. Move/copy final temp object to the target name and clean up temps.

- Example helper: examples/tree-compose.sh (sketch)
  - The repo includes a tested version you can run. Key notes:
    - Handle headers (remove duplicate headers before composing, or use a script to write header once).
    - Test on a small subset first.
    - Use a distinct temporary prefix and optionally lifecycle rules to avoid orphaned temp objects.

CORS & browser uploads
- Example cors.json (included)
  ```json
  [
    {
      "origin": ["https://example.com"],
      "method": ["GET", "HEAD", "PUT", "POST"],
      "responseHeader": ["Content-Type", "x-goog-meta-custom"],
      "maxAgeSeconds": 3600
    }
  ]
  ```
- Apply:
  ```bash
  gsutil cors set cors.json gs://BUCKET
  ```
- For browser uploads with signed PUT URLs, ensure CORS allows the origin and headers.

Examples included
- examples/merge_csv_gcs.py — merge CSVs by prefix, keep only the first header (small/medium datasets).
- examples/tree-compose.sh — tree-compose helper to safely compose >32 objects.
- cors.json — CORS policy example.

Security & best practices (improved clarity)
- Use service accounts with least privilege; rotate credentials and avoid long-lived personal keys on servers.
- Prefer uniform bucket-level access + IAM roles over ACLs.
- Use signed URLs or short-lived tokens for browser access; never embed private keys in client code.
- Monitor access with Cloud Audit Logs; enable object versioning and retention where appropriate.
- For analytics, prefer columnar formats (Parquet/Avro) and BigQuery for cost/performance benefits.
- Consider CMEK if your organization requires customer-managed encryption keys.

Troubleshooting (quick)
- Permission denied: confirm IAM role (roles/storage.objectViewer for read).
- Invalid credentials: re-run `gcloud auth application-default login` or refresh service account keys.
- CORS issues: check bucket CORS includes your origin, methods, and headers.
- Large merges: avoid loading many files into RAM; use compose, streaming, or Dataflow.

Contributing
- PRs and issues welcome. When adding scripts, include:
  - Purpose and example usage
  - Required permissions and dependencies
  - Safety notes (memory/time limits)
- Keep examples minimal, tested, and documented.

License
- MIT by default. See LICENSE.

Need a ready-to-run script or pipeline?
Tell me which you want and I will produce it:
- Fully-tested tree-compose script (with header handling and safety checks)
- Dataflow (Apache Beam) starter pipeline for large merges
- Malay-localized README and docs
- Small GitHub Actions workflow to lint/test examples

Or provide your bucket name, prefix, file type, and approximate size and I'll generate a tailored script (bash or Python).
````
Revise README.md to v5 with enhancements and updates
# AI Workflow Planning System

![AI Planning](https://img.shields.io/badge/AI-Powered_Workflow_Planning-FF6B6B.svg)
![Multi-Tool](https://img.shields.io/badge/Multi--Tool_Integration-✓-2496ED.svg)
![Automation](https://img.shields.io/badge/Automation-✓-00C9FF.svg)

An intelligent workflow planning system that uses AI to analyze project requirements and generate optimized automation workflows across multiple development tools.

## 🚀 Quick Start

### Installation

```bash
# Clone the repository
git clone https://github.com/your-org/ai-workflow-planner.git
cd ai-workflow-planner

# Install dependencies
pip install -r requirements.txt

# Setup environment
cp .env.example .env
# Add your API keys to .env

# Start the system
python -m planner.main
```

### Basic Usage

```python
from ai_workflow_planner import WorkflowPlanner

# Initialize the planner
planner = WorkflowPlanner(api_key="your-openai-key")

# Plan a workflow for iOS project
workflow = await planner.plan_workflow(
    project_type="ios",
    tools=["xcode", "github_actions", "docker"],
    requirements="CI/CD with testing and deployment"
)

# Generate configurations
configs = workflow.generate_configs()
```

## 🏗️ System Architecture

```mermaid
graph TB
    A[Project Input] --> B[AI Analyzer]
    B --> C[Workflow Generator]
    C --> D[Tool Integrations]
    D --> E[GitHub Actions]
    D --> F[Xcode Build]
    D --> G[Docker]
    D --> H[Slack]
    E --> I[Configuration Files]
    F --> I
    G --> I
    H --> I
    I --> J[Execution Engine]
    J --> K[Monitoring]
    K --> B
```

## 🧠 Core AI Planning Engine

### 1. Project Analysis

```python
# planners/project_analyzer.py
class ProjectAnalyzer:
    def __init__(self, ai_client):
        self.ai_client = ai_client
        self.project_templates = ProjectTemplates()
    
    async def analyze_project(self, project_path: str) -> ProjectAnalysis:
        """Analyze project structure and requirements using AI"""
        
        # Scan project files
        project_structure = await self.scan_project_structure(project_path)
        
        # AI analysis prompt
        prompt = f"""
        Analyze this project structure and determine optimal workflow:
        
        Project Structure:
        {project_structure}
        
        Please provide:
        1. Recommended workflow stages
        2. Required tools and integrations
        3. Potential optimizations
        4. Security considerations
        """
        
        analysis = await self.ai_client.analyze(prompt)
        return self.parse_analysis(analysis)
```

### 2. Workflow Generation

```python
# generators/workflow_generator.py
class WorkflowGenerator:
    def __init__(self, analyzer: ProjectAnalyzer):
        self.analyzer = analyzer
        self.tool_registry = ToolRegistry()
    
    async def generate_workflow(self, project_config: dict) -> WorkflowPlan:
        """Generate complete workflow plan using AI"""
        
        # Get AI recommendations
        recommendations = await self.analyzer.get_recommendations(project_config)
        
        # Build workflow stages
        stages = []
        for stage_config in recommendations['stages']:
            stage = await self.build_stage(stage_config)
            stages.append(stage)
        
        # Optimize execution order
        optimized_stages = self.optimize_execution_order(stages)
        
        return WorkflowPlan(
            stages=optimized_stages,
            tools=recommendations['tools'],
            configs=await self.generate_configs(optimized_stages)
        )
```

### 3. Tool-Specific Configuration

```python
# integrations/github_actions.py
class GitHubActionsIntegration:
    async def generate_workflow(self, plan: WorkflowPlan) -> str:
        """Generate GitHub Actions workflow from AI plan"""
        
        workflow = {
            "name": f"{plan.project_name} - AI Generated",
            "on": self.get_triggers(plan),
            "jobs": await self.generate_jobs(plan)
        }
        
        return yaml.dump(workflow)
    
    async def generate_jobs(self, plan: WorkflowPlan) -> dict:
        """Generate jobs based on AI-optimized plan"""
        jobs = {}
        
        for stage in plan.stages:
            jobs[stage.name] = {
                "runs-on": self.select_runner(stage),
                "steps": await self.generate_steps(stage),
                "needs": stage.dependencies
            }
        
        return jobs
```

## ⚙️ Configuration

### AI Model Configuration

```yaml
# config/ai_models.yaml
openai:
  model: "gpt-4"
  temperature: 0.3
  max_tokens: 4000
  retry_attempts: 3

anthropic:
  model: "claude-3-sonnet"
  max_tokens: 4000

local:
  model: "llama2"
  endpoint: "http://localhost:8080"
```

### Tool Registry

```yaml
# config/tools.yaml
github_actions:
  name: "GitHub Actions"
  type: "ci_cd"
  capabilities:
    - "build"
    - "test"
    - "deploy"
  templates:
    - "basic-ci"
    - "advanced-cd"

xcode:
  name: "Xcode Build"
  type: "build_tool"
  capabilities:
    - "compile"
    - "test"
    - "analyze"
  commands:
    - "xcodebuild"
    - "xcodebuild analyze"

docker:
  name: "Docker"
  type: "containerization"
  capabilities:
    - "build"
    - "push"
    - "scan"
```

## 🎯 Usage Examples

### Example 1: iOS CI/CD Pipeline

```python
# examples/ios_workflow.py
async def create_ios_workflow():
    """Create AI-optimized iOS workflow"""
    
    planner = WorkflowPlanner()
    
    workflow = await planner.plan_workflow(
        project_type="ios",
        tools=["xcode", "github_actions", "fastlane", "slack"],
        requirements="""
        - Automated testing on PR
        - Build and analyze on main branch
        - Deploy to TestFlight on tags
        - Security scanning
        - Performance monitoring
        """
    )
    
    # Generate configurations
    configs = await workflow.generate_configs()
    
    # Save to files
    await configs.save("./generated-workflows/")
    
    return workflow
```

### Example 2: Multi-Service Container Platform

```python
# examples/container_platform.py
async def create_container_workflow():
    """Create workflow for container-based platform"""
    
    planner = WorkflowPlanner()
    
    workflow = await planner.plan_workflow(
        project_type="microservices",
        tools=["docker", "kubernetes", "github_actions", "argo_cd"],
        requirements="""
        - Build and push containers on commit
        - Security scanning and vulnerability checks
        - Automated deployment to staging
        - Canary deployment to production
        - Rollback on failure
        - Multi-region deployment
        """
    )
    
    return workflow
```

## 🔧 Tool Integrations

### GitHub Actions Generator

```python
# integrations/github_actions.py
class GitHubActionsGenerator:
    async def generate_workflow_file(self, plan: WorkflowPlan) -> str:
        """Generate complete GitHub Actions workflow file"""
        
        template = {
            "name": f"AI-Generated: {plan.project_name}",
            "on": self._get_trigger_config(plan),
            "env": self._get_environment_vars(plan),
            "jobs": await self._generate_jobs(plan)
        }
        
        return yaml.dump(template)
    
    async def _generate_jobs(self, plan: WorkflowPlan) -> Dict:
        jobs = {}
        
        for stage in plan.stages:
            jobs[stage.name] = {
                "name": stage.description,
                "runs-on": self._select_runner(stage),
                "if": self._get_condition(stage),
                "steps": await self._generate_steps(stage),
                "needs": self._get_dependencies(stage)
            }
            
        return jobs
    
    async def _generate_steps(self, stage: WorkflowStage) -> List[Dict]:
        steps = []
        
        for action in stage.actions:
            step = {
                "name": action.description,
                "uses": action.tool_specific.get('action'),
                "with": action.parameters
            }
            steps.append(step)
            
        return steps
```

### Xcode Build Integration

```python
# integrations/xcode_build.py
class XcodeBuildIntegration:
    async def generate_build_scripts(self, plan: WorkflowPlan) -> List[str]:
        """Generate optimized Xcode build scripts"""
        
        scripts = []
        for build_step in plan.get_steps_by_type('xcode_build'):
            script = self._generate_build_script(build_step)
            scripts.append(script)
            
        return scripts
    
    def _generate_build_script(self, step: WorkflowStep) -> str:
        return f"""
        # AI-Generated Xcode Build Script
        set -eo pipefail
        
        echo "🚀 Starting AI-optimized build..."
        
        # Clean derived data
        rm -rf ~/Library/Developer/Xcode/DerivedData/*
        
        # Build project
        xcodebuild \
          -workspace {step.parameters.get('workspace')} \
          -scheme {step.parameters.get('scheme')} \
          -configuration {step.parameters.get('configuration', 'Debug')} \
          -destination 'platform=iOS Simulator,name=iPhone 15' \
          clean build
        
        # Run tests if specified
        if {step.parameters.get('run_tests', False)}; then
            xcodebuild test \
              -workspace {step.parameters.get('workspace')} \
              -scheme {step.parameters.get('scheme')} \
              -destination 'platform=iOS Simulator,name=iPhone 15'
        fi
        
        echo "✅ Build completed successfully"
        """
```

## 📊 AI-Powered Optimization

### Performance Optimization

```python
# optimizers/performance_optimizer.py
class PerformanceOptimizer:
    def __init__(self, ai_client):
        self.ai_client = ai_client
        self.metrics_collector = MetricsCollector()
    
    async def optimize_workflow(self, workflow: WorkflowPlan) -> WorkflowPlan:
        """Use AI to optimize workflow performance"""
        
        # Collect performance data
        metrics = await self.metrics_collector.collect(workflow)
        
        # AI optimization prompt
        prompt = f"""
        Optimize this workflow for better performance:
        
        Current Workflow:
        {workflow.to_json()}
        
        Performance Metrics:
        {metrics}
        
        Please suggest optimizations for:
        1. Parallel execution opportunities
        2. Caching strategies
        3. Resource allocation
        4. Dependency optimization
        
        Return optimized workflow in JSON format.
        """
        
        optimized_json = await self.ai_client.optimize(prompt)
        return WorkflowPlan.from_json(optimized_json)
```

### Cost Optimization

```python
# optimizers/cost_optimizer.py
class CostOptimizer:
    async def optimize_costs(self, workflow: WorkflowPlan) -> WorkflowPlan:
        """Optimize workflow for cost efficiency"""
        
        cost_analysis = await self.analyze_costs(workflow)
        
        prompt = f"""
        Optimize this workflow to reduce costs:
        
        Workflow: {workflow.to_json()}
        Cost Analysis: {cost_analysis}
        
        Suggest cost-saving changes while maintaining:
        - Functionality
        - Performance
        - Reliability
        
        Focus on:
        - Compute resource optimization
        - Storage efficiency
        - Network usage reduction
        """
        
        return await self.apply_optimizations(workflow, prompt)
```

## 🔄 Real-Time Adaptation

### Dynamic Workflow Adjustment

```python
# adapters/dynamic_adapter.py
class DynamicWorkflowAdapter:
    async def adapt_to_changes(self, workflow: WorkflowPlan, changes: dict) -> WorkflowPlan:
        """Adapt workflow based on real-time changes"""
        
        prompt = f"""
        Adapt this workflow to handle these changes:
        
        Original Workflow: {workflow.to_json()}
        
        Changes Detected:
        - {changes.get('description', 'Unknown changes')}
        
        Please provide an adapted workflow that:
        1. Maintains all original functionality
        2. Handles the new requirements/changes
        3. Maintains or improves performance
        """
        
        adapted_workflow = await self.ai_client.adapt(prompt)
        return WorkflowPlan.from_json(adapted_workflow)
```

## 📈 Monitoring & Analytics

### Workflow Analytics

```python
# analytics/workflow_analytics.py
class WorkflowAnalytics:
    def __init__(self):
        self.metrics_store = MetricsStore()
    
    async def generate_insights(self, workflow: WorkflowPlan) -> dict:
        """Generate AI-powered insights about workflow performance"""
        
        metrics = await self.metrics_store.get_workflow_metrics(workflow.id)
        
        prompt = f"""
        Analyze these workflow metrics and provide insights:
        
        Metrics: {metrics}
        
        Please provide:
        1. Performance bottlenecks
        2. Optimization opportunities
        3. Reliability concerns
        4. Cost-saving suggestions
        """
        
        insights = await self.ai_client.analyze(prompt)
        return self.parse_insights(insights)
```

## 🚀 Advanced Features

### Multi-Project Coordination

```python
# features/multi_project.py
class MultiProjectCoordinator:
    async def coordinate_workflows(self, projects: List[Project]) -> dict:
        """Coordinate workflows across multiple related projects"""
        
        project_configs = [p.to_json() for p in projects]
        
        prompt = f"""
        Coordinate workflows for these related projects:
        
        Projects: {project_configs}
        
        Create a coordinated workflow plan that:
        1. Handles dependencies between projects
        2. Optimizes build order
        3. Manages shared resources
        4. Coordinates deployments
        """
        
        coordination_plan = await self.ai_client.coordinate(prompt)
        return coordination_plan
```

### Security Hardening

```python
# features/security_hardener.py
class WorkflowSecurityHardener:
    async def harden_workflow(self, workflow: WorkflowPlan) -> WorkflowPlan:
        """Use AI to identify and fix security issues"""
        
        prompt = f"""
        Analyze this workflow for security issues:
        
        Workflow: {workflow.to_json()}
        
        Identify:
        1. Potential security vulnerabilities
        2. Secrets exposure risks
        3. Permission issues
        4. Compliance concerns
        
        Provide a hardened version of the workflow.
        """
        
        hardened_workflow = await self.ai_client.harden(prompt)
        return WorkflowPlan.from_json(hardened_workflow)
```

## 💡 Example Output

### Generated GitHub Actions Workflow

```yaml
# .github/workflows/ai-optimized-ci.yml
name: AI-Optimized iOS CI/CD

on:
  push:
    branches: [ main, develop ]
  pull_request:
    branches: [ main ]

env:
  PROJECT_NAME: MyApp
  SCHEME_NAME: MyApp

jobs:
  analyze:
    name: Static Analysis
    runs-on: macos-latest
    steps:
      - uses: actions/checkout@v4
      - name: Xcode Analyze
        run: |
          xcodebuild analyze \
            -workspace $PROJECT_NAME.xcworkspace \
            -scheme $SCHEME_NAME \
            -configuration Debug

  test:
    name: Run Tests
    runs-on: macos-latest
    needs: analyze
    steps:
      - uses: actions/checkout@v4
      - name: Run Unit Tests
        run: |
          xcodebuild test \
            -workspace $PROJECT_NAME.xcworkspace \
            -scheme $SCHEME_NAME \
            -destination 'platform=iOS Simulator,name=iPhone 15'

  build:
    name: Build App
    runs-on: macos-latest
    needs: test
    steps:
      - uses: actions/checkout@v4
      - name: Build Release
        run: |
          xcodebuild build \
            -workspace $PROJECT_NAME.xcworkspace \
            -scheme $SCHEME_NAME \
            -configuration Release
```

## 🔧 Deployment

### Docker Setup

```dockerfile
FROM python:3.9-slim

WORKDIR /app

COPY requirements.txt .
RUN pip install -r requirements.txt

COPY . .

EXPOSE 8000

CMD ["python", "-m", "planner.api"]
```

### Kubernetes Deployment

```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ai-workflow-planner
spec:
  replicas: 3
  template:
    spec:
      containers:
      - name: planner
        image: ai-workflow-planner:latest
        ports:
        - containerPort: 8000
        env:
        - name: OPENAI_API_KEY
          valueFrom:
            secretKeyRef:
              name: ai-secrets
              key: openai-api-key
```

---

<div align="center">

## 🧠 Start Planning with AI

[**Documentation**](docs/) • 
[**Examples**](examples/) • 
[**API Reference**](docs/api.md)

**AI-Powered Workflow Planning | Multi-Tool Integration | Real-Time Optimization**

</div>
# Node.js GKE Deployment Guide

![Google Cloud](https://img.shields.io/badge/Google_Cloud-4285F4?style=for-the-badge&logo=google-cloud&logoColor=white)
![Kubernetes](https://img.shields.io/badge/kubernetes-326CE5?style=for-the-badge&logo=kubernetes&logoColor=white)
![Node.js](https://img.shields.io/badge/Node.js-339933?style=for-the-badge&logo=nodedotjs&logoColor=white)
![Docker](https://img.shields.io/badge/Docker-2CA5E0?style=for-the-badge&logo=docker&logoColor=white)

A complete guide for deploying production-ready Node.js applications to Google Kubernetes Engine (GKE) with best practices for security, scalability, and monitoring.

## 📋 Table of Contents

- [Overview](#overview)
- [Architecture](#architecture)
- [Quick Start](#quick-start)
- [Prerequisites](#prerequisites)
- [Project Structure](#project-structure)
- [Local Development](#local-development)
- [GKE Deployment](#gke-deployment)
- [Monitoring & Scaling](#monitoring--scaling)
- [CI/CD Pipeline](#cicd-pipeline)
- [Troubleshooting](#troubleshooting)
- [Cleanup](#cleanup)
- [Best Practices](#best-practices)

## 🎯 Overview

This project demonstrates how to deploy a production-ready Node.js application to Google Kubernetes Engine with:

- ✅ **Security Best Practices** (non-root users, security contexts, minimal images)
- ✅ **Health Checks** (liveness, readiness, startup probes)
- ✅ **Auto-scaling** (Horizontal Pod Autoscaler)
- ✅ **Monitoring** (Stackdriver, resource metrics)
- ✅ **CI/CD** (Cloud Build automation)
- ✅ **High Availability** (multi-replica deployment)
- ✅ **Zero-downtime Deployments** (rolling updates)

## 🏗 Architecture

```mermaid
graph TB
    A[User] --> B[GCP Load Balancer]
    B --> C[Node.js Service]
    C --> D[Pod 1]
    C --> E[Pod 2]
    C --> F[Pod 3]
    D --> G[Node.js App]
    E --> G
    F --> G
    H[HPA] --> C
    I[Cloud Build] --> J[Container Registry]
    J --> C
    K[Cloud Monitoring] --> C
```

## 🚀 Quick Start

### Prerequisites Checklist

- [ ] Google Cloud Account with billing enabled
- [ ] Google Cloud SDK installed
- [ ] Docker installed
- [ ] kubectl installed
- [ ] Node.js 18+ installed

### One-Command Deployment

```bash
# Clone the repository
git clone https://github.com/your-username/nodejs-gke-app.git
cd nodejs-gke-app

# Run the deployment script (update PROJECT_ID first)
./deploy.sh
```

## ⚙️ Prerequisites

### 1. Install Required Tools

```bash
# Install Google Cloud SDK
curl https://sdk.cloud.google.com | bash
exec -l $SHELL

# Install kubectl
gcloud components install kubectl

# Install Docker
# On macOS:
brew install --cask docker

# On Ubuntu:
sudo apt-get update && sudo apt-get install -y docker.io

# Verify installations
gcloud --version
kubectl version --client
docker --version
```

### 2. Google Cloud Setup

```bash
# Authenticate with GCP
gcloud auth login

# Set your project
gcloud config set project YOUR_PROJECT_ID

# Enable required APIs
gcloud services enable \
  container.googleapis.com \
  containerregistry.googleapis.com \
  cloudbuild.googleapis.com \
  compute.googleapis.com
```

## 📁 Project Structure

```
nodejs-gke-app/
├── src/                    # Application source code
│   ├── app.js             # Main application file
│   ├── routes/            # API routes
│   │   ├── api.js
│   │   └── health.js
│   └── middleware/        # Express middleware
│       └── security.js
├── tests/                 # Test files
│   └── app.test.js
├── k8s/                   # Kubernetes manifests
│   ├── namespace.yaml
│   ├── deployment.yaml
│   ├── service.yaml
│   ├── hpa.yaml
│   └── configmap.yaml
├── Dockerfile             # Multi-stage Dockerfile
├── .dockerignore
├── cloudbuild.yaml        # CI/CD configuration
├── deploy.sh              # Deployment script
├── cleanup.sh             # Cleanup script
└── package.json
```

## 💻 Local Development

### Run Application Locally

```bash
# Install dependencies
npm install

# Start development server
npm run dev

# Run tests
npm test

# Build Docker image locally
npm run docker:build

# Test Docker image locally
npm run docker:run
```

### Test Health Endpoints

```bash
curl http://localhost:8080/health
curl http://localhost:8080/health/ready
curl http://localhost:8080/health/live
```

## ☸️ GKE Deployment

### Step 1: Build and Push Docker Image

```bash
# Build the image
docker build -t nodejs-gke-app .

# Tag for GCR
docker tag nodejs-gke-app gcr.io/YOUR_PROJECT_ID/nodejs-gke-app:latest

# Push to Google Container Registry
docker push gcr.io/YOUR_PROJECT_ID/nodejs-gke-app:latest
```

### Step 2: Create GKE Cluster

```bash
# Create production cluster
gcloud container clusters create nodejs-production-cluster \
  --zone=us-central1-a \
  --num-nodes=2 \
  --machine-type=e2-medium \
  --enable-autoscaling \
  --min-nodes=1 \
  --max-nodes=5 \
  --enable-ip-alias

# Get cluster credentials
gcloud container clusters get-credentials nodejs-production-cluster \
  --zone us-central1-a
```

### Step 3: Deploy Application

```bash
# Create namespace
kubectl apply -f k8s/namespace.yaml

# Deploy application
kubectl apply -f k8s/configmap.yaml
kubectl apply -f k8s/deployment.yaml
kubectl apply -f k8s/service.yaml
kubectl apply -f k8s/hpa.yaml

# Wait for deployment
kubectl rollout status deployment/nodejs-app -n nodejs-production

# Get external IP
kubectl get service nodejs-app-service -n nodejs-production
```

### Step 4: Verify Deployment

```bash
# Check all resources
kubectl get all -n nodejs-production

# View pods
kubectl get pods -n nodejs-production

# Check service details
kubectl describe service nodejs-app-service -n nodejs-production

# Test the application
EXTERNAL_IP=$(kubectl get service nodejs-app-service -n nodejs-production -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
curl http://$EXTERNAL_IP
curl http://$EXTERNAL_IP/health
```

## 📊 Monitoring & Scaling

### Application Monitoring

```bash
# View application logs
kubectl logs -n nodejs-production -l app=nodejs-app --tail=50

# Stream logs in real-time
kubectl logs -n nodejs-production -l app=nodejs-app -f

# View resource usage
kubectl top pods -n nodejs-production
kubectl top nodes

# Check HPA status
kubectl get hpa -n nodejs-production
```

### Auto-scaling

The application includes Horizontal Pod Autoscaler configured to:
- Scale based on CPU (70%) and memory (80%) utilization
- Minimum 2 pods, maximum 10 pods
- Automatic scaling based on load

### Manual Scaling

```bash
# Scale manually
kubectl scale deployment nodejs-app --replicas=5 -n nodejs-production

# Check current replicas
kubectl get deployment nodejs-app -n nodejs-production
```

## 🔄 CI/CD Pipeline

### Automated Deployment with Cloud Build

The project includes `cloudbuild.yaml` for automated CI/CD:

```yaml
# Build, test, and deploy automatically on git push
steps:
  - name: 'gcr.io/cloud-builders/docker'
    args: ['build', '-t', 'gcr.io/$PROJECT_ID/nodejs-gke-app:$COMMIT_SHA', '.']
  - name: 'gcr.io/cloud-builders/docker'
    args: ['push', 'gcr.io/$PROJECT_ID/nodejs-gke-app:$COMMIT_SHA']
  - name: 'gcr.io/cloud-builders/gke-deploy'
    args: ['run', '--filename=k8s/', '--image=gcr.io/$PROJECT_ID/nodejs-gke-app:$COMMIT_SHA']
```

### Trigger Cloud Build

```bash
# Submit build manually
gcloud builds submit --config cloudbuild.yaml
```

## 🐛 Troubleshooting

### Common Issues

1. **Image Pull Errors**
   ```bash
   # Check image exists in GCR
   gcloud container images list-tags gcr.io/YOUR_PROJECT_ID/nodejs-gke-app
   
   # Verify GCR permissions
   gcloud projects get-iam-policy YOUR_PROJECT_ID
   ```

2. **Pod CrashLoopBackOff**
   ```bash
   # Check pod logs
   kubectl logs -n nodejs-production <pod-name>
   
   # Describe pod for details
   kubectl describe pod -n nodejs-production <pod-name>
   ```

3. **Service Not Accessible**
   ```bash
   # Check service endpoints
   kubectl get endpoints nodejs-app-service -n nodejs-production
   
   # Check firewall rules
   gcloud compute firewall-rules list
   ```

### Debugging Commands

```bash
# Get detailed pod information
kubectl describe pod -n nodejs-production -l app=nodejs-app

# Check cluster events
kubectl get events -n nodejs-production --sort-by=.metadata.creationTimestamp

# Access pod shell
kubectl exec -n nodejs-production -it <pod-name> -- sh

# Check network connectivity
kubectl run -it --rm debug --image=busybox -n nodejs-production -- sh
```

## 🧹 Cleanup

### Remove All Resources

```bash
# Run cleanup script
./cleanup.sh

# Or manually remove resources
kubectl delete -f k8s/ --ignore-not-found=true
gcloud container clusters delete nodejs-production-cluster --zone=us-central1-a --quiet
gcloud container images delete gcr.io/YOUR_PROJECT_ID/nodejs-gke-app:latest --quiet
```

## 📝 Best Practices Implemented

### Security
- ✅ Non-root user in containers
- ✅ Read-only root filesystem
- ✅ Security contexts in pods
- ✅ Minimal base images (Alpine Linux)
- ✅ Regular security updates

### Reliability
- ✅ Multiple replicas for high availability
- ✅ Liveness, readiness, and startup probes
- ✅ Resource limits and requests
- ✅ Pod Disruption Budget
- ✅ Rolling update strategy

### Performance
- ✅ Horizontal Pod Autoscaler
- ✅ Proper resource sizing
- ✅ Compression middleware
- ✅ Rate limiting
- ✅ Connection pooling

### Monitoring
- ✅ Health check endpoints
- ✅ Structured logging
- ✅ Resource metrics
- ✅ Prometheus metrics ready
- ✅ Cloud Monitoring integration

## 🤝 Contributing

1. Fork the repository
2. Create a feature branch (`git checkout -b feature/amazing-feature`)
3. Commit your changes (`git commit -m 'Add some amazing feature'`)
4. Push to the branch (`git push origin feature/amazing-feature`)
5. Open a Pull Request

## 📄 License

This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.

## 🙏 Acknowledgments

- Google Cloud Platform documentation
- Kubernetes community
- Node.js best practices community

---

**Note**: Remember to replace `YOUR_PROJECT_ID` with your actual Google Cloud Project ID in all commands and configuration files.

For support, please open an issue in the GitHub repository or contact the maintainers.
# Node.js GKE Deployment Guide

![Google Cloud](https://img.shields.io/badge/Google_Cloud-4285F4?style=for-the-badge&logo=google-cloud&logoColor=white)
![Kubernetes](https://img.shields.io/badge/kubernetes-326CE5?style=for-the-badge&logo=kubernetes&logoColor=white)
![Node.js](https://img.shields.io/badge/Node.js-339933?style=for-the-badge&logo=nodedotjs&logoColor=white)
![Docker](https://img.shields.io/badge/Docker-2CA5E0?style=for-the-badge&logo=docker&logoColor=white)
![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg?style=for-the-badge)

A complete guide for deploying production-ready Node.js applications to Google Kubernetes Engine (GKE) with best practices for security, scalability, and monitoring.

## 📋 Table of Contents

- [Overview](#overview)
- [Architecture](#architecture)
- [Quick Start](#quick-start)
- [Prerequisites](#prerequisites)
- [Project Structure](#project-structure)
- [Local Development](#local-development)
- [GKE Deployment](#gke-deployment)
- [Monitoring & Scaling](#monitoring--scaling)
- [CI/CD Pipeline](#cicd-pipeline)
- [Troubleshooting](#troubleshooting)
- [Cleanup](#cleanup)
- [Best Practices](#best-practices)
- [License](#license)

## 🎯 Overview

This project demonstrates how to deploy a production-ready Node.js application to Google Kubernetes Engine with:

- ✅ **Security Best Practices** (non-root users, security contexts, minimal images)
- ✅ **Health Checks** (liveness, readiness, startup probes)
- ✅ **Auto-scaling** (Horizontal Pod Autoscaler)
- ✅ **Monitoring** (Stackdriver, resource metrics)
- ✅ **CI/CD** (Cloud Build automation)
- ✅ **High Availability** (multi-replica deployment)
- ✅ **Zero-downtime Deployments** (rolling updates)

## 🏗 Architecture

```mermaid
graph TB
    A[User] --> B[GCP Load Balancer]
    B --> C[Node.js Service]
    C --> D[Pod 1]
    C --> E[Pod 2]
    C --> F[Pod 3]
    D --> G[Node.js App]
    E --> G
    F --> G
    H[HPA] --> C
    I[Cloud Build] --> J[Container Registry]
    J --> C
    K[Cloud Monitoring] --> C
```

## 🚀 Quick Start

### Prerequisites Checklist

- [ ] Google Cloud Account with billing enabled
- [ ] Google Cloud SDK installed
- [ ] Docker installed
- [ ] kubectl installed
- [ ] Node.js 18+ installed

### One-Command Deployment

```bash
# Clone the repository
git clone https://github.com/your-username/nodejs-gke-app.git
cd nodejs-gke-app

# Run the deployment script (update PROJECT_ID first)
./deploy.sh
```

## ⚙️ Prerequisites

### 1. Install Required Tools

```bash
# Install Google Cloud SDK
curl https://sdk.cloud.google.com | bash
exec -l $SHELL

# Install kubectl
gcloud components install kubectl

# Install Docker
# On macOS:
brew install --cask docker

# On Ubuntu:
sudo apt-get update && sudo apt-get install -y docker.io

# Verify installations
gcloud --version
kubectl version --client
docker --version
```

### 2. Google Cloud Setup

```bash
# Authenticate with GCP
gcloud auth login

# Set your project
gcloud config set project YOUR_PROJECT_ID

# Enable required APIs
gcloud services enable \
  container.googleapis.com \
  containerregistry.googleapis.com \
  cloudbuild.googleapis.com \
  compute.googleapis.com
```

## 📁 Project Structure

```
nodejs-gke-app/
├── src/                    # Application source code
│   ├── app.js             # Main application file
│   ├── routes/            # API routes
│   │   ├── api.js
│   │   └── health.js
│   └── middleware/        # Express middleware
│       └── security.js
├── tests/                 # Test files
│   └── app.test.js
├── k8s/                   # Kubernetes manifests
│   ├── namespace.yaml
│   ├── deployment.yaml
│   ├── service.yaml
│   ├── hpa.yaml
│   └── configmap.yaml
├── Dockerfile             # Multi-stage Dockerfile
├── .dockerignore
├── cloudbuild.yaml        # CI/CD configuration
├── deploy.sh              # Deployment script
├── cleanup.sh             # Cleanup script
├── LICENSE                # MIT License file
└── package.json
```

## 💻 Local Development

### Run Application Locally

```bash
# Install dependencies
npm install

# Start development server
npm run dev

# Run tests
npm test

# Build Docker image locally
npm run docker:build

# Test Docker image locally
npm run docker:run
```

### Test Health Endpoints

```bash
curl http://localhost:8080/health
curl http://localhost:8080/health/ready
curl http://localhost:8080/health/live
```

## ☸️ GKE Deployment

### Step 1: Build and Push Docker Image

```bash
# Build the image
docker build -t nodejs-gke-app .

# Tag for GCR
docker tag nodejs-gke-app gcr.io/YOUR_PROJECT_ID/nodejs-gke-app:latest

# Push to Google Container Registry
docker push gcr.io/YOUR_PROJECT_ID/nodejs-gke-app:latest
```

### Step 2: Create GKE Cluster

```bash
# Create production cluster
gcloud container clusters create nodejs-production-cluster \
  --zone=us-central1-a \
  --num-nodes=2 \
  --machine-type=e2-medium \
  --enable-autoscaling \
  --min-nodes=1 \
  --max-nodes=5 \
  --enable-ip-alias

# Get cluster credentials
gcloud container clusters get-credentials nodejs-production-cluster \
  --zone us-central1-a
```

### Step 3: Deploy Application

```bash
# Create namespace
kubectl apply -f k8s/namespace.yaml

# Deploy application
kubectl apply -f k8s/configmap.yaml
kubectl apply -f k8s/deployment.yaml
kubectl apply -f k8s/service.yaml
kubectl apply -f k8s/hpa.yaml

# Wait for deployment
kubectl rollout status deployment/nodejs-app -n nodejs-production

# Get external IP
kubectl get service nodejs-app-service -n nodejs-production
```

### Step 4: Verify Deployment

```bash
# Check all resources
kubectl get all -n nodejs-production

# View pods
kubectl get pods -n nodejs-production

# Check service details
kubectl describe service nodejs-app-service -n nodejs-production

# Test the application
EXTERNAL_IP=$(kubectl get service nodejs-app-service -n nodejs-production -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
curl http://$EXTERNAL_IP
curl http://$EXTERNAL_IP/health
```

## 📊 Monitoring & Scaling

### Application Monitoring

```bash
# View application logs
kubectl logs -n nodejs-production -l app=nodejs-app --tail=50

# Stream logs in real-time
kubectl logs -n nodejs-production -l app=nodejs-app -f

# View resource usage
kubectl top pods -n nodejs-production
kubectl top nodes

# Check HPA status
kubectl get hpa -n nodejs-production
```

### Auto-scaling

The application includes Horizontal Pod Autoscaler configured to:
- Scale based on CPU (70%) and memory (80%) utilization
- Minimum 2 pods, maximum 10 pods
- Automatic scaling based on load

### Manual Scaling

```bash
# Scale manually
kubectl scale deployment nodejs-app --replicas=5 -n nodejs-production

# Check current replicas
kubectl get deployment nodejs-app -n nodejs-production
```

## 🔄 CI/CD Pipeline

### Automated Deployment with Cloud Build

The project includes `cloudbuild.yaml` for automated CI/CD:

```yaml
# Build, test, and deploy automatically on git push
steps:
  - name: 'gcr.io/cloud-builders/docker'
    args: ['build', '-t', 'gcr.io/$PROJECT_ID/nodejs-gke-app:$COMMIT_SHA', '.']
  - name: 'gcr.io/cloud-builders/docker'
    args: ['push', 'gcr.io/$PROJECT_ID/nodejs-gke-app:$COMMIT_SHA']
  - name: 'gcr.io/cloud-builders/gke-deploy'
    args: ['run', '--filename=k8s/', '--image=gcr.io/$PROJECT_ID/nodejs-gke-app:$COMMIT_SHA']
```

### Trigger Cloud Build

```bash
# Submit build manually
gcloud builds submit --config cloudbuild.yaml
```

## 🐛 Troubleshooting

### Common Issues

1. **Image Pull Errors**
   ```bash
   # Check image exists in GCR
   gcloud container images list-tags gcr.io/YOUR_PROJECT_ID/nodejs-gke-app
   
   # Verify GCR permissions
   gcloud projects get-iam-policy YOUR_PROJECT_ID
   ```

2. **Pod CrashLoopBackOff**
   ```bash
   # Check pod logs
   kubectl logs -n nodejs-production <pod-name>
   
   # Describe pod for details
   kubectl describe pod -n nodejs-production <pod-name>
   ```

3. **Service Not Accessible**
   ```bash
   # Check service endpoints
   kubectl get endpoints nodejs-app-service -n nodejs-production
   
   # Check firewall rules
   gcloud compute firewall-rules list
   ```

### Debugging Commands

```bash
# Get detailed pod information
kubectl describe pod -n nodejs-production -l app=nodejs-app

# Check cluster events
kubectl get events -n nodejs-production --sort-by=.metadata.creationTimestamp

# Access pod shell
kubectl exec -n nodejs-production -it <pod-name> -- sh

# Check network connectivity
kubectl run -it --rm debug --image=busybox -n nodejs-production -- sh
```

## 🧹 Cleanup

### Remove All Resources

```bash
# Run cleanup script
./cleanup.sh

# Or manually remove resources
kubectl delete -f k8s/ --ignore-not-found=true
gcloud container clusters delete nodejs-production-cluster --zone=us-central1-a --quiet
gcloud container images delete gcr.io/YOUR_PROJECT_ID/nodejs-gke-app:latest --quiet
```

## 📝 Best Practices Implemented

### Security
- ✅ Non-root user in containers
- ✅ Read-only root filesystem
- ✅ Security contexts in pods
- ✅ Minimal base images (Alpine Linux)
- ✅ Regular security updates

### Reliability
- ✅ Multiple replicas for high availability
- ✅ Liveness, readiness, and startup probes
- ✅ Resource limits and requests
- ✅ Pod Disruption Budget
- ✅ Rolling update strategy

### Performance
- ✅ Horizontal Pod Autoscaler
- ✅ Proper resource sizing
- ✅ Compression middleware
- ✅ Rate limiting
- ✅ Connection pooling

### Monitoring
- ✅ Health check endpoints
- ✅ Structured logging
- ✅ Resource metrics
- ✅ Prometheus metrics ready
- ✅ Cloud Monitoring integration

## 📄 License

This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.

```
MIT License

Copyright (c) 2024 Node.js GKE Deployment Guide

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```

## 🤝 Contributing

1. Fork the repository
2. Create a feature branch (`git checkout -b feature/amazing-feature`)
3. Commit your changes (`git commit -m 'Add some amazing feature'`)
4. Push to the branch (`git push origin feature/amazing-feature`)
5. Open a Pull Request

## 🙏 Acknowledgments

- Google Cloud Platform documentation
- Kubernetes community
- Node.js best practices community

---

**Note**: Remember to replace `YOUR_PROJECT_ID` with your actual Google Cloud Project ID in all commands and configuration files.

For support, please open an issue in the GitHub repository or contact the maintainers.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

1 participant