Compare commits

...

5 commits

Author SHA1 Message Date
b9d4623d63 chore: add CI/CD tooling and project configuration
- Add commit-message-generator skill for standardized commit messages
 - Add buildspec.yml for CodeBuild Docker image build process
 - Add build-artifact.sh script for creating deployment packages
 - Add .gitignore to exclude zip files from version control
2026-01-04 12:47:40 +09:00
5e9c27cbf7 feat: enhance Lambda function configuration and add outputs
- Add parameters for source bucket, repo URL, and branch
- Increase timeout to 300s and memory to 512MB
- Add ARM64 architecture support
- Add S3 GetObject and ListBucket permissions
- Use ImportValue for ECR repository ARN
- Add resource names for better identification
- Export API endpoint, Lambda ARN, and function name
2026-01-04 12:46:05 +09:00
639044388f feat: add ECR lifecycle policy and additional outputs
- Add lifecycle policy to keep only last 10 images
- Export repository ARN and name for cross-stack references
2026-01-04 12:45:08 +09:00
45c77bab2b feat: add CodePipeline for Lambda deployment automation
- Add CodeBuild project for building and pushing Docker images
- Add CodePipeline with S3 source and build stages
- Add EventBridge rule to trigger pipeline on S3 object creation
- Configure IAM roles and policies for pipeline execution
2026-01-04 12:44:31 +09:00
2ebcc5541e refactor: improve Lambda handler and update Go to 1.25.5
- Update Dockerfile: Go 1.25.5, fix build context paths
- Use git archive instead of zip to exclude .git directory
- Add context support for git/zip operations (timeout control)
- Optimize git clone (--depth 1, --single-branch)
- Improve error handling (%w wrapping) and logging consistency
- Add case-insensitive HTTP header lookup for webhooks
2026-01-04 10:02:12 +09:00
9 changed files with 453 additions and 51 deletions

View file

@ -0,0 +1,32 @@
---
name: commit-message-generator
description: Generate appropriate commit messages based on Git diffs
---
## Prerequisites
- This Skill retrieves Git diffs and suggests meaningful commit messages
- Message format should follow Conventional Commits
- Commit messages should be in English
- **Never perform Git commit or Git push**
## Steps
1. Run `git status` to check modified files
2. Retrieve diffs with `git diff` or `git diff --cached`
3. Analyze the diff content and determine if changes should be split into multiple commits
4. For each logical group of changes:
- List the target files
- Generate a message in English compliant with Conventional Commits
- Suggest the command: `git add <files> && git commit -m "<message>"`
5. If changes are extensive and should be split, provide:
- Rationale for the split
- Multiple commit suggestions with their respective target files and messages
## Commit Splitting Guidelines
- Split commits when changes span multiple logical concerns (e.g., feature + refactoring)
- Group related files that serve the same purpose
- Keep each commit focused on a single, atomic change
## Notes
- **This Skill must never execute `git commit` or `git push`**
- Only suggest commands; execution is entirely at user's discretion
- Users must explicitly perform commits and pushes themselves

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
*.zip

20
ci/buildspec.yml Normal file
View file

@ -0,0 +1,20 @@
version: 0.2
phases:
pre_build:
commands:
- echo Logging in to Amazon ECR...
- aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $ECR_REPOSITORY_URI
- IMAGE_TAG=$(date +%s)
- echo "Image tag will be ${IMAGE_TAG}"
build:
commands:
- echo Build started on `date`
- echo Building Docker image for ARM64/Lambda...
- docker build --platform linux/arm64 -f docker/Dockerfile -t $ECR_REPOSITORY_URI:$IMAGE_TAG -t $ECR_REPOSITORY_URI:latest .
post_build:
commands:
- echo Build completed on `date`
- echo Pushing Docker images...
- docker push $ECR_REPOSITORY_URI:$IMAGE_TAG
- docker push $ECR_REPOSITORY_URI:latest
- echo "Image pushed with tags ${IMAGE_TAG} and latest"

View file

@ -11,6 +11,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings"
"github.com/aws/aws-lambda-go/lambda" "github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws"
@ -34,6 +35,7 @@ type Response struct {
} }
var commandRunner = exec.Command var commandRunner = exec.Command
var commandRunnerContext = exec.CommandContext
// verifySignature computes an HMAC using the provided secret and compares it to the incoming signature. // verifySignature computes an HMAC using the provided secret and compares it to the incoming signature.
func verifySignature(secret, body, signatureHeader string) bool { func verifySignature(secret, body, signatureHeader string) bool {
@ -50,6 +52,15 @@ func verifySignature(secret, body, signatureHeader string) bool {
return hmac.Equal([]byte(receivedSig), []byte(expectedSig)) return hmac.Equal([]byte(receivedSig), []byte(expectedSig))
} }
func getHeader(headers map[string]string, key string) string {
for k, v := range headers {
if strings.EqualFold(k, key) {
return v
}
}
return ""
}
func handleRequest(ctx context.Context, event json.RawMessage) (Response, error) { func handleRequest(ctx context.Context, event json.RawMessage) (Response, error) {
// For demonstration, assume the event JSON includes a "body" and "headers" map. // For demonstration, assume the event JSON includes a "body" and "headers" map.
var req struct { var req struct {
@ -67,7 +78,7 @@ func handleRequest(ctx context.Context, event json.RawMessage) (Response, error)
return Response{StatusCode: 500, Headers: map[string]string{"Content-Type": "application/json"}, Body: "{\"message\":\"Server configuration error\"}"}, fmt.Errorf("WEBHOOK_SECRET is not set") return Response{StatusCode: 500, Headers: map[string]string{"Content-Type": "application/json"}, Body: "{\"message\":\"Server configuration error\"}"}, fmt.Errorf("WEBHOOK_SECRET is not set")
} }
signature := req.Headers["X-Hub-Signature-256"] // adjust this header name as appropriate. signature := getHeader(req.Headers, "X-Hub-Signature-256")
if signature == "" || !verifySignature(secret, req.Body, signature) { if signature == "" || !verifySignature(secret, req.Body, signature) {
log.Println("Signature verification failed") log.Println("Signature verification failed")
return Response{StatusCode: 401, Headers: map[string]string{"Content-Type": "application/json"}, Body: "{\"message\":\"Unauthorized\"}"}, fmt.Errorf("signature verification failed") return Response{StatusCode: 401, Headers: map[string]string{"Content-Type": "application/json"}, Body: "{\"message\":\"Unauthorized\"}"}, fmt.Errorf("signature verification failed")
@ -95,47 +106,42 @@ func main() {
} }
func runDeploymentProcess(ctx context.Context) error { func runDeploymentProcess(ctx context.Context) error {
cfg, err := loadConfig() cfg, err := loadConfig()
if err != nil { if err != nil {
log.Printf("Configuration error: %v", err) return fmt.Errorf("load config: %w", err)
return err
} }
// Create a unique temp directory for this run
repoDir, err := os.MkdirTemp("", "repo-*") repoDir, err := os.MkdirTemp("", "repo-*")
if err != nil { if err != nil {
log.Printf("Error creating temporary directory: %v", err) return fmt.Errorf("create temp directory: %w", err)
return err
} }
defer os.RemoveAll(repoDir) defer os.RemoveAll(repoDir)
zipFilePath := filepath.Join(repoDir, "source.zip") zipFilePath := filepath.Join(repoDir, "source.zip")
// 1. Clone the repository // 1. Clone the repository
if err := cloneRepository(ctx, cfg.RepoURL, cfg.RepoBranch, repoDir); err != nil { if err := cloneRepository(ctx, cfg.RepoURL, cfg.RepoBranch, repoDir); err != nil {
log.Printf("Failure in cloning: %v", err)
return err return err
} }
// 2. Create a ZIP archive of the repository // 2. Create a ZIP archive of the repository (without .git)
if err := createZipArchive(ctx, repoDir, zipFilePath); err != nil { if err := createZipArchive(ctx, repoDir, zipFilePath); err != nil {
log.Printf("Failure in creating ZIP archive: %v", err)
return err return err
} }
// 3. Upload the ZIP file to S3 // 3. Upload the ZIP file to S3
cfg_s3, err := config.LoadDefaultConfig(ctx, config.WithRegion(cfg.AWSRegion)) awsCfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(cfg.AWSRegion))
if err != nil { if err != nil {
log.Printf("Error loading configuration: %v", err) return fmt.Errorf("load AWS config: %w", err)
return err
} }
s3Client := s3.NewFromConfig(cfg_s3)
s3Client := s3.NewFromConfig(awsCfg)
uploader := manager.NewUploader(s3Client) uploader := manager.NewUploader(s3Client)
if err := uploadToS3WithUploader(ctx, zipFilePath, cfg.S3Bucket, cfg.S3Key, uploader); err != nil { if err := uploadToS3WithUploader(ctx, zipFilePath, cfg.S3Bucket, cfg.S3Key, uploader); err != nil {
log.Printf("Failure in uploading to S3: %v", err)
return err return err
} }
log.Println("Deployment process completed successfully")
return nil return nil
} }
@ -169,28 +175,28 @@ func loadConfig() (*Config, error) {
}, nil }, nil
} }
func cloneRepository(_ context.Context, repoURL, repoBranch, repoDir string) error { func cloneRepository(ctx context.Context, repoURL, repoBranch, repoDir string) error {
cloneCmd := commandRunner("git", "clone", "--branch", repoBranch, repoURL, repoDir) log.Printf("Cloning repository (branch=%s)...", repoBranch)
cloneCmd := commandRunnerContext(ctx, "git", "clone", "--depth", "1", "--single-branch", "--branch", repoBranch, repoURL, repoDir)
cloneCmd.Stdout = os.Stdout cloneCmd.Stdout = os.Stdout
cloneCmd.Stderr = os.Stderr cloneCmd.Stderr = os.Stderr
fmt.Printf("Cloning repository %s (branch %s)...\n", repoURL, repoBranch) cloneCmd.Env = append(os.Environ(), "GIT_TERMINAL_PROMPT=0")
if err := cloneCmd.Run(); err != nil { if err := cloneCmd.Run(); err != nil {
return fmt.Errorf("error cloning repository: %v", err) return fmt.Errorf("git clone: %w", err)
} }
fmt.Println("Repository cloned successfully.") log.Println("Repository cloned successfully")
return nil return nil
} }
func createZipArchive(_ context.Context, repoDir, zipFilePath string) error { func createZipArchive(ctx context.Context, repoDir, zipFilePath string) error {
zipCmd := commandRunner("zip", "-r", zipFilePath, ".") log.Println("Creating ZIP archive (using git archive)...")
zipCmd.Dir = repoDir // Change to the cloned repo directory archiveCmd := commandRunnerContext(ctx, "git", "-C", repoDir, "archive", "--format=zip", "--output", zipFilePath, "HEAD")
zipCmd.Stdout = os.Stdout archiveCmd.Stdout = os.Stdout
zipCmd.Stderr = os.Stderr archiveCmd.Stderr = os.Stderr
fmt.Println("Creating ZIP archive of the repository...") if err := archiveCmd.Run(); err != nil {
if err := zipCmd.Run(); err != nil { return fmt.Errorf("git archive: %w", err)
return fmt.Errorf("error creating ZIP archive: %v", err)
} }
fmt.Printf("ZIP archive created at %s.\n", zipFilePath) log.Printf("ZIP archive created at %s", zipFilePath)
return nil return nil
} }
@ -199,26 +205,22 @@ type Uploader interface {
} }
func uploadToS3WithUploader(ctx context.Context, zipPath, bucket, key string, uploader Uploader) error { func uploadToS3WithUploader(ctx context.Context, zipPath, bucket, key string, uploader Uploader) error {
// Open the ZIP file
f, err := os.Open(zipPath) f, err := os.Open(zipPath)
if err != nil { if err != nil {
return fmt.Errorf("Error opening ZIP file: %v", err) return fmt.Errorf("open zip file: %w", err)
} }
defer f.Close() defer f.Close()
// Upload the file to S3. log.Printf("Uploading %s to s3://%s/%s...", zipPath, bucket, key)
fmt.Printf("Uploading %s to s3://%s/%s...\n", zipPath, bucket, key)
result, err := uploader.Upload(ctx, &s3.PutObjectInput{ result, err := uploader.Upload(ctx, &s3.PutObjectInput{
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
Key: aws.String(key), Key: aws.String(key),
Body: f, Body: f,
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to upload file: %v", err) return fmt.Errorf("upload to S3: %w", err)
} }
fmt.Printf("Successfully uploaded to %s\n", result.Location) log.Printf("Successfully uploaded: %s", result.Location)
return nil return nil
} }

View file

@ -1,11 +1,12 @@
FROM docker.io/golang:1.24.2-bookworm as build FROM docker.io/golang:1.25.5-bookworm as build
WORKDIR /app WORKDIR /app
# Copy dependencies list # NOTE: This Dockerfile assumes the build context is the repository root.
COPY ./app/go.mod ./ # Example: docker build -f docker/Dockerfile .
COPY ./app/go.sum ./ COPY go.mod go.sum ./
RUN go mod download
COPY . .
# Build with optional lambda.norpc tag # Build with optional lambda.norpc tag
COPY ./app/main.go ./ RUN go build -tags lambda.norpc -o main ./cmd/lambda
RUN go build -tags lambda.norpc -o main main.go
# Copy artifacts to a clean image # Copy artifacts to a clean image
FROM public.ecr.aws/lambda/provided:al2023 FROM public.ecr.aws/lambda/provided:al2023
# Install git and zip using dnf (Amazon Linux 2023) # Install git and zip using dnf (Amazon Linux 2023)

View file

@ -0,0 +1,233 @@
AWSTemplateFormatVersion: "2010-09-09"
Description: S3 -> CodePipeline -> CodeBuild(ARM) -> ECR pipeline for Blog Lambda
Parameters:
SourceBucketName:
Type: String
Default: blog-lambda-source-bucket
Description: S3 bucket name for source code
SourceObjectKey:
Type: String
Default: blog-lambda-source.zip
Description: S3 object key for source code archive
Resources:
SourceBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: !Sub "${AWS::Region}-${AWS::AccountId}-${SourceBucketName}"
Tags:
- Key: Project
Value: Blog-Deployment
VersioningConfiguration:
Status: Enabled
NotificationConfiguration:
EventBridgeConfiguration:
EventBridgeEnabled: true
CodeBuildRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal:
Service: codebuild.amazonaws.com
Action: sts:AssumeRole
Policies:
- PolicyName: CodeBuildPolicy
PolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Action:
- logs:CreateLogGroup
- logs:CreateLogStream
- logs:PutLogEvents
Resource: "*"
- Effect: Allow
Action:
- ecr:GetAuthorizationToken
Resource: "*"
- Effect: Allow
Action:
- ecr:BatchCheckLayerAvailability
- ecr:InitiateLayerUpload
- ecr:UploadLayerPart
- ecr:CompleteLayerUpload
- ecr:PutImage
Resource:
Fn::ImportValue: BlogDeployment-RepositoryArn
- Effect: Allow
Action:
- s3:GetObject
- s3:PutObject
- s3:ListBucket
Resource:
- !Sub "arn:aws:s3:::codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket"
- !Sub "arn:aws:s3:::codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket/*"
- !GetAtt SourceBucket.Arn
- !Sub "${SourceBucket.Arn}/*"
BlogLambdaBuildProject:
Type: AWS::CodeBuild::Project
Properties:
Name: blog-lambda-build
ServiceRole: !GetAtt CodeBuildRole.Arn
Artifacts:
Type: CODEPIPELINE
Environment:
Type: ARM_CONTAINER
ComputeType: BUILD_GENERAL1_SMALL
Image: aws/codebuild/amazonlinux2-aarch64-standard:3.0
PrivilegedMode: true
EnvironmentVariables:
- Name: ECR_REPOSITORY_URI
Value:
Fn::ImportValue: BlogDeployment-RepositoryUri
- Name: AWS_DEFAULT_REGION
Value: !Ref AWS::Region
- Name: AWS_ACCOUNT_ID
Value: !Ref AWS::AccountId
Source:
Type: CODEPIPELINE
BuildSpec: ci/buildspec.yml
TimeoutInMinutes: 30
CodePipelineRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal:
Service: codepipeline.amazonaws.com
Action: sts:AssumeRole
Policies:
- PolicyName: CodePipelinePolicy
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- s3:GetBucketAcl
- s3:GetObjectTagging
- s3:GetObjectVersionTagging
- s3:GetObject
- s3:GetObjectVersion
- s3:PutObject
- s3:ListBucket
- s3:GetBucketLocation
- s3:GetBucketVersioning
Resource:
- !Sub "arn:aws:s3:::codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket"
- !Sub "arn:aws:s3:::codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket/*"
- !GetAtt SourceBucket.Arn
- !Sub "${SourceBucket.Arn}/*"
- Effect: Allow
Action:
- codebuild:StartBuild
- codebuild:BatchGetBuilds
Resource:
- !Sub "arn:aws:codebuild:${AWS::Region}:${AWS::AccountId}:build/*"
- !Sub "arn:aws:codebuild:${AWS::Region}:${AWS::AccountId}:project/*"
- Effect: Allow
Action:
- codepipeline:PutApprovalResult
- codepipeline:StartPipelineExecution
Resource: !Sub "arn:aws:codepipeline:${AWS::Region}:${AWS::AccountId}:*"
BlogLambdaPipeline:
Type: AWS::CodePipeline::Pipeline
Properties:
Name: blog-lambda-pipeline
PipelineType: V2
RoleArn: !GetAtt CodePipelineRole.Arn
ArtifactStore:
Type: S3
Location: !Sub "codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket"
Stages:
- Name: Source
Actions:
- Name: S3Source
ActionTypeId:
Category: Source
Owner: AWS
Provider: S3
Version: "1"
Configuration:
S3Bucket: !Ref SourceBucket
S3ObjectKey: !Ref SourceObjectKey
PollForSourceChanges: false
OutputArtifacts:
- Name: SourceOutput
- Name: Build
Actions:
- Name: BuildAndPushImage
ActionTypeId:
Category: Build
Owner: AWS
Provider: CodeBuild
Version: "1"
InputArtifacts:
- Name: SourceOutput
Configuration:
ProjectName: !Ref BlogLambdaBuildProject
S3SourceChangeRule:
Type: AWS::Events::Rule
Properties:
Description: Trigger CodePipeline on S3 source update
EventPattern:
source:
- aws.s3
detail-type:
- Object Created
detail:
bucket:
name:
- !Ref SourceBucket
object:
key:
- !Ref SourceObjectKey
Targets:
- Arn: !Sub "arn:aws:codepipeline:${AWS::Region}:${AWS::AccountId}:${BlogLambdaPipeline}"
RoleArn: !GetAtt EventBridgeInvokePipelineRole.Arn
Id: CodePipelineTarget
EventBridgeInvokePipelineRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service: events.amazonaws.com
Action: sts:AssumeRole
Policies:
- PolicyName: AllowStartPipeline
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- codepipeline:StartPipelineExecution
Resource: !Sub "arn:aws:codepipeline:${AWS::Region}:${AWS::AccountId}:${BlogLambdaPipeline}"
Outputs:
SourceBucketName:
Description: S3 bucket for source code
Value: !Ref SourceBucket
Export:
Name: !Sub "${AWS::StackName}-SourceBucket"
PipelineName:
Description: CodePipeline name
Value: !Ref BlogLambdaPipeline
Export:
Name: !Sub "${AWS::StackName}-PipelineName"

View file

@ -14,6 +14,24 @@ Resources:
RepositoryName: !Ref RepositoryName RepositoryName: !Ref RepositoryName
ImageScanningConfiguration: ImageScanningConfiguration:
ScanOnPush: true ScanOnPush: true
LifecyclePolicy:
LifecyclePolicyText: |
{
"rules": [
{
"rulePriority": 1,
"description": "Keep last 10 images",
"selection": {
"tagStatus": "any",
"countType": "imageCountMoreThan",
"countNumber": 10
},
"action": {
"type": "expire"
}
}
]
}
RepositoryPolicyText: RepositoryPolicyText:
Version: "2012-10-17" Version: "2012-10-17"
Statement: Statement:
@ -32,3 +50,15 @@ Outputs:
Value: !GetAtt Repository.RepositoryUri Value: !GetAtt Repository.RepositoryUri
Export: Export:
Name: BlogDeployment-RepositoryUri Name: BlogDeployment-RepositoryUri
RepositoryArn:
Description: ARN of the ECR repository
Value: !GetAtt Repository.Arn
Export:
Name: BlogDeployment-RepositoryArn
RepositoryName:
Description: Name of the ECR repository
Value: !Ref RepositoryName
Export:
Name: BlogDeployment-RepositoryName

View file

@ -1,10 +1,26 @@
AWSTemplateFormatVersion: '2010-09-09' AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31 Transform: AWS::Serverless-2016-10-31
Parameters: Parameters:
StageName: StageName:
Type: String Type: String
Default: Prod Default: Prod
Description: Name of the API stage. Description: Name of the API stage
SourceBucketName:
Type: String
Default: naputo-blog-source
Description: S3 bucket for blog source files
RepoURL:
Type: String
Default: "https://git.n-daisuke897.com/nakada0907/n-daisuke897-blog.git"
Description: Git repository URL
RepoBranch:
Type: String
Default: main
Description: Git repository branch
Resources: Resources:
@ -27,7 +43,11 @@ Resources:
- Effect: Allow - Effect: Allow
Action: Action:
- s3:PutObject - s3:PutObject
Resource: arn:aws:s3:::naputo-blog-source/* - s3:GetObject
- s3:ListBucket
Resource:
- !Sub "arn:aws:s3:::${SourceBucketName}"
- !Sub "arn:aws:s3:::${SourceBucketName}/*"
- PolicyName: LambdaEcrImagePullPolicy - PolicyName: LambdaEcrImagePullPolicy
PolicyDocument: PolicyDocument:
Version: '2012-10-17' Version: '2012-10-17'
@ -41,26 +61,30 @@ Resources:
- ecr:BatchGetImage - ecr:BatchGetImage
- ecr:BatchCheckLayerAvailability - ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer - ecr:GetDownloadUrlForLayer
Resource: !Sub "arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/*" Resource:
Fn::ImportValue: BlogDeployment-RepositoryArn
ManagedPolicyArns: ManagedPolicyArns:
- arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole - arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
MyLambdaFunction: MyLambdaFunction:
Type: AWS::Serverless::Function Type: AWS::Serverless::Function
Properties: Properties:
FunctionName: blog-deployment-webhook-handler
PackageType: Image PackageType: Image
ImageUri: ImageUri:
!Join !Join
- ":" - ":"
- - !ImportValue BlogDeployment-RepositoryUri - - !ImportValue BlogDeployment-RepositoryUri
- "latest" - "latest"
Timeout: 30 Timeout: 300
MemorySize: 256 MemorySize: 512
Architectures:
- arm64
Environment: Environment:
Variables: Variables:
REPO_URL: "https://git.n-daisuke897.com/nakada0907/n-daisuke897-blog.git" REPO_URL: !Ref RepoURL
REPO_BRANCH: "main" REPO_BRANCH: !Ref RepoBranch
S3_BUCKET: "naputo-blog-source" S3_BUCKET: !Ref SourceBucketName
S3_KEY: "source.zip" S3_KEY: "source.zip"
WEBHOOK_SECRET: WEBHOOK_SECRET:
Fn::Sub: Fn::Sub:
@ -79,6 +103,7 @@ Resources:
MyApi: MyApi:
Type: AWS::Serverless::Api Type: AWS::Serverless::Api
Properties: Properties:
Name: blog-deployment-webhook-api
StageName: !Ref StageName StageName: !Ref StageName
EndpointConfiguration: REGIONAL EndpointConfiguration: REGIONAL
DefinitionBody: DefinitionBody:
@ -104,3 +129,22 @@ Resources:
description: "Unauthorized - Signature verification failed" description: "Unauthorized - Signature verification failed"
'500': '500':
description: "Server error - Deployment process failed" description: "Server error - Deployment process failed"
Outputs:
ApiEndpoint:
Description: API Gateway endpoint URL for webhook
Value: !Sub "https://${MyApi}.execute-api.${AWS::Region}.amazonaws.com/${StageName}/forgejo-webhook"
Export:
Name: !Sub "${AWS::StackName}-ApiEndpoint"
LambdaFunctionArn:
Description: Lambda function ARN
Value: !GetAtt MyLambdaFunction.Arn
Export:
Name: !Sub "${AWS::StackName}-LambdaArn"
LambdaFunctionName:
Description: Lambda function name
Value: !Ref MyLambdaFunction
Export:
Name: !Sub "${AWS::StackName}-LambdaName"

39
scripts/build-artifact.sh Executable file
View file

@ -0,0 +1,39 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
ARTIFACT_DIR="${ROOT_DIR}/artifacts"
ZIP_PATH="${ARTIFACT_DIR}/blog-lambda-source.zip"
echo "Building artifact for blog-lambda pipeline..."
echo "Root directory: ${ROOT_DIR}"
mkdir -p "${ARTIFACT_DIR}"
# Create temporary directory for staging files
tmpdir="$(mktemp -d)"
trap 'rm -rf "${tmpdir}"' EXIT
# Copy necessary files for CodeBuild
echo "Copying source files..."
cp -r "${ROOT_DIR}/cmd" "${tmpdir}/cmd"
cp -r "${ROOT_DIR}/docker" "${tmpdir}/docker"
cp -r "${ROOT_DIR}/ci" "${tmpdir}/ci"
cp "${ROOT_DIR}/go.mod" "${tmpdir}/go.mod"
cp "${ROOT_DIR}/go.sum" "${tmpdir}/go.sum"
# Create the ZIP archive
echo "Creating ZIP archive..."
(
cd "${tmpdir}"
zip -r "${ZIP_PATH}" . -x "*.git*" "*.DS_Store"
)
# Display artifact info
echo ""
echo "✅ Artifact created successfully!"
echo " Path: ${ZIP_PATH}"
echo " Size: $(du -h "${ZIP_PATH}" | cut -f1)"
echo ""
echo "To upload to S3:"
echo " aws s3 cp ${ZIP_PATH} s3://\${BUCKET_NAME}/blog-lambda-source.zip"