Compare commits

..

No commits in common. "b9d4623d63d221aeeab1f787a1d4124767a0ad87" and "621b9f006ea522d058cec4a0a136d94eae4738f4" have entirely different histories.

9 changed files with 51 additions and 453 deletions

View file

@ -1,32 +0,0 @@
---
name: commit-message-generator
description: Generate appropriate commit messages based on Git diffs
---
## Prerequisites
- This Skill retrieves Git diffs and suggests meaningful commit messages
- Message format should follow Conventional Commits
- Commit messages should be in English
- **Never perform Git commit or Git push**
## Steps
1. Run `git status` to check modified files
2. Retrieve diffs with `git diff` or `git diff --cached`
3. Analyze the diff content and determine if changes should be split into multiple commits
4. For each logical group of changes:
- List the target files
- Generate a message in English compliant with Conventional Commits
- Suggest the command: `git add <files> && git commit -m "<message>"`
5. If changes are extensive and should be split, provide:
- Rationale for the split
- Multiple commit suggestions with their respective target files and messages
## Commit Splitting Guidelines
- Split commits when changes span multiple logical concerns (e.g., feature + refactoring)
- Group related files that serve the same purpose
- Keep each commit focused on a single, atomic change
## Notes
- **This Skill must never execute `git commit` or `git push`**
- Only suggest commands; execution is entirely at user's discretion
- Users must explicitly perform commits and pushes themselves

1
.gitignore vendored
View file

@ -1 +0,0 @@
*.zip

View file

@ -1,20 +0,0 @@
version: 0.2
phases:
pre_build:
commands:
- echo Logging in to Amazon ECR...
- aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $ECR_REPOSITORY_URI
- IMAGE_TAG=$(date +%s)
- echo "Image tag will be ${IMAGE_TAG}"
build:
commands:
- echo Build started on `date`
- echo Building Docker image for ARM64/Lambda...
- docker build --platform linux/arm64 -f docker/Dockerfile -t $ECR_REPOSITORY_URI:$IMAGE_TAG -t $ECR_REPOSITORY_URI:latest .
post_build:
commands:
- echo Build completed on `date`
- echo Pushing Docker images...
- docker push $ECR_REPOSITORY_URI:$IMAGE_TAG
- docker push $ECR_REPOSITORY_URI:latest
- echo "Image pushed with tags ${IMAGE_TAG} and latest"

View file

@ -11,7 +11,6 @@ import (
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go-v2/aws"
@ -35,7 +34,6 @@ type Response struct {
}
var commandRunner = exec.Command
var commandRunnerContext = exec.CommandContext
// verifySignature computes an HMAC using the provided secret and compares it to the incoming signature.
func verifySignature(secret, body, signatureHeader string) bool {
@ -52,15 +50,6 @@ func verifySignature(secret, body, signatureHeader string) bool {
return hmac.Equal([]byte(receivedSig), []byte(expectedSig))
}
func getHeader(headers map[string]string, key string) string {
for k, v := range headers {
if strings.EqualFold(k, key) {
return v
}
}
return ""
}
func handleRequest(ctx context.Context, event json.RawMessage) (Response, error) {
// For demonstration, assume the event JSON includes a "body" and "headers" map.
var req struct {
@ -78,7 +67,7 @@ func handleRequest(ctx context.Context, event json.RawMessage) (Response, error)
return Response{StatusCode: 500, Headers: map[string]string{"Content-Type": "application/json"}, Body: "{\"message\":\"Server configuration error\"}"}, fmt.Errorf("WEBHOOK_SECRET is not set")
}
signature := getHeader(req.Headers, "X-Hub-Signature-256")
signature := req.Headers["X-Hub-Signature-256"] // adjust this header name as appropriate.
if signature == "" || !verifySignature(secret, req.Body, signature) {
log.Println("Signature verification failed")
return Response{StatusCode: 401, Headers: map[string]string{"Content-Type": "application/json"}, Body: "{\"message\":\"Unauthorized\"}"}, fmt.Errorf("signature verification failed")
@ -106,42 +95,47 @@ func main() {
}
func runDeploymentProcess(ctx context.Context) error {
cfg, err := loadConfig()
if err != nil {
return fmt.Errorf("load config: %w", err)
log.Printf("Configuration error: %v", err)
return err
}
// Create a unique temp directory for this run
repoDir, err := os.MkdirTemp("", "repo-*")
if err != nil {
return fmt.Errorf("create temp directory: %w", err)
log.Printf("Error creating temporary directory: %v", err)
return err
}
defer os.RemoveAll(repoDir)
zipFilePath := filepath.Join(repoDir, "source.zip")
// 1. Clone the repository
if err := cloneRepository(ctx, cfg.RepoURL, cfg.RepoBranch, repoDir); err != nil {
log.Printf("Failure in cloning: %v", err)
return err
}
// 2. Create a ZIP archive of the repository (without .git)
// 2. Create a ZIP archive of the repository
if err := createZipArchive(ctx, repoDir, zipFilePath); err != nil {
log.Printf("Failure in creating ZIP archive: %v", err)
return err
}
// 3. Upload the ZIP file to S3
awsCfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(cfg.AWSRegion))
cfg_s3, err := config.LoadDefaultConfig(ctx, config.WithRegion(cfg.AWSRegion))
if err != nil {
return fmt.Errorf("load AWS config: %w", err)
log.Printf("Error loading configuration: %v", err)
return err
}
s3Client := s3.NewFromConfig(awsCfg)
s3Client := s3.NewFromConfig(cfg_s3)
uploader := manager.NewUploader(s3Client)
if err := uploadToS3WithUploader(ctx, zipFilePath, cfg.S3Bucket, cfg.S3Key, uploader); err != nil {
log.Printf("Failure in uploading to S3: %v", err)
return err
}
log.Println("Deployment process completed successfully")
return nil
}
@ -175,28 +169,28 @@ func loadConfig() (*Config, error) {
}, nil
}
func cloneRepository(ctx context.Context, repoURL, repoBranch, repoDir string) error {
log.Printf("Cloning repository (branch=%s)...", repoBranch)
cloneCmd := commandRunnerContext(ctx, "git", "clone", "--depth", "1", "--single-branch", "--branch", repoBranch, repoURL, repoDir)
func cloneRepository(_ context.Context, repoURL, repoBranch, repoDir string) error {
cloneCmd := commandRunner("git", "clone", "--branch", repoBranch, repoURL, repoDir)
cloneCmd.Stdout = os.Stdout
cloneCmd.Stderr = os.Stderr
cloneCmd.Env = append(os.Environ(), "GIT_TERMINAL_PROMPT=0")
fmt.Printf("Cloning repository %s (branch %s)...\n", repoURL, repoBranch)
if err := cloneCmd.Run(); err != nil {
return fmt.Errorf("git clone: %w", err)
return fmt.Errorf("error cloning repository: %v", err)
}
log.Println("Repository cloned successfully")
fmt.Println("Repository cloned successfully.")
return nil
}
func createZipArchive(ctx context.Context, repoDir, zipFilePath string) error {
log.Println("Creating ZIP archive (using git archive)...")
archiveCmd := commandRunnerContext(ctx, "git", "-C", repoDir, "archive", "--format=zip", "--output", zipFilePath, "HEAD")
archiveCmd.Stdout = os.Stdout
archiveCmd.Stderr = os.Stderr
if err := archiveCmd.Run(); err != nil {
return fmt.Errorf("git archive: %w", err)
func createZipArchive(_ context.Context, repoDir, zipFilePath string) error {
zipCmd := commandRunner("zip", "-r", zipFilePath, ".")
zipCmd.Dir = repoDir // Change to the cloned repo directory
zipCmd.Stdout = os.Stdout
zipCmd.Stderr = os.Stderr
fmt.Println("Creating ZIP archive of the repository...")
if err := zipCmd.Run(); err != nil {
return fmt.Errorf("error creating ZIP archive: %v", err)
}
log.Printf("ZIP archive created at %s", zipFilePath)
fmt.Printf("ZIP archive created at %s.\n", zipFilePath)
return nil
}
@ -205,22 +199,26 @@ type Uploader interface {
}
func uploadToS3WithUploader(ctx context.Context, zipPath, bucket, key string, uploader Uploader) error {
// Open the ZIP file
f, err := os.Open(zipPath)
if err != nil {
return fmt.Errorf("open zip file: %w", err)
return fmt.Errorf("Error opening ZIP file: %v", err)
}
defer f.Close()
log.Printf("Uploading %s to s3://%s/%s...", zipPath, bucket, key)
// Upload the file to S3.
fmt.Printf("Uploading %s to s3://%s/%s...\n", zipPath, bucket, key)
result, err := uploader.Upload(ctx, &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
Body: f,
})
if err != nil {
return fmt.Errorf("upload to S3: %w", err)
return fmt.Errorf("failed to upload file: %v", err)
}
log.Printf("Successfully uploaded: %s", result.Location)
fmt.Printf("Successfully uploaded to %s\n", result.Location)
return nil
}

View file

@ -1,12 +1,11 @@
FROM docker.io/golang:1.25.5-bookworm as build
FROM docker.io/golang:1.24.2-bookworm as build
WORKDIR /app
# NOTE: This Dockerfile assumes the build context is the repository root.
# Example: docker build -f docker/Dockerfile .
COPY go.mod go.sum ./
RUN go mod download
COPY . .
# Copy dependencies list
COPY ./app/go.mod ./
COPY ./app/go.sum ./
# Build with optional lambda.norpc tag
RUN go build -tags lambda.norpc -o main ./cmd/lambda
COPY ./app/main.go ./
RUN go build -tags lambda.norpc -o main main.go
# Copy artifacts to a clean image
FROM public.ecr.aws/lambda/provided:al2023
# Install git and zip using dnf (Amazon Linux 2023)

View file

@ -1,233 +0,0 @@
AWSTemplateFormatVersion: "2010-09-09"
Description: S3 -> CodePipeline -> CodeBuild(ARM) -> ECR pipeline for Blog Lambda
Parameters:
SourceBucketName:
Type: String
Default: blog-lambda-source-bucket
Description: S3 bucket name for source code
SourceObjectKey:
Type: String
Default: blog-lambda-source.zip
Description: S3 object key for source code archive
Resources:
SourceBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: !Sub "${AWS::Region}-${AWS::AccountId}-${SourceBucketName}"
Tags:
- Key: Project
Value: Blog-Deployment
VersioningConfiguration:
Status: Enabled
NotificationConfiguration:
EventBridgeConfiguration:
EventBridgeEnabled: true
CodeBuildRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal:
Service: codebuild.amazonaws.com
Action: sts:AssumeRole
Policies:
- PolicyName: CodeBuildPolicy
PolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Action:
- logs:CreateLogGroup
- logs:CreateLogStream
- logs:PutLogEvents
Resource: "*"
- Effect: Allow
Action:
- ecr:GetAuthorizationToken
Resource: "*"
- Effect: Allow
Action:
- ecr:BatchCheckLayerAvailability
- ecr:InitiateLayerUpload
- ecr:UploadLayerPart
- ecr:CompleteLayerUpload
- ecr:PutImage
Resource:
Fn::ImportValue: BlogDeployment-RepositoryArn
- Effect: Allow
Action:
- s3:GetObject
- s3:PutObject
- s3:ListBucket
Resource:
- !Sub "arn:aws:s3:::codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket"
- !Sub "arn:aws:s3:::codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket/*"
- !GetAtt SourceBucket.Arn
- !Sub "${SourceBucket.Arn}/*"
BlogLambdaBuildProject:
Type: AWS::CodeBuild::Project
Properties:
Name: blog-lambda-build
ServiceRole: !GetAtt CodeBuildRole.Arn
Artifacts:
Type: CODEPIPELINE
Environment:
Type: ARM_CONTAINER
ComputeType: BUILD_GENERAL1_SMALL
Image: aws/codebuild/amazonlinux2-aarch64-standard:3.0
PrivilegedMode: true
EnvironmentVariables:
- Name: ECR_REPOSITORY_URI
Value:
Fn::ImportValue: BlogDeployment-RepositoryUri
- Name: AWS_DEFAULT_REGION
Value: !Ref AWS::Region
- Name: AWS_ACCOUNT_ID
Value: !Ref AWS::AccountId
Source:
Type: CODEPIPELINE
BuildSpec: ci/buildspec.yml
TimeoutInMinutes: 30
CodePipelineRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal:
Service: codepipeline.amazonaws.com
Action: sts:AssumeRole
Policies:
- PolicyName: CodePipelinePolicy
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- s3:GetBucketAcl
- s3:GetObjectTagging
- s3:GetObjectVersionTagging
- s3:GetObject
- s3:GetObjectVersion
- s3:PutObject
- s3:ListBucket
- s3:GetBucketLocation
- s3:GetBucketVersioning
Resource:
- !Sub "arn:aws:s3:::codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket"
- !Sub "arn:aws:s3:::codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket/*"
- !GetAtt SourceBucket.Arn
- !Sub "${SourceBucket.Arn}/*"
- Effect: Allow
Action:
- codebuild:StartBuild
- codebuild:BatchGetBuilds
Resource:
- !Sub "arn:aws:codebuild:${AWS::Region}:${AWS::AccountId}:build/*"
- !Sub "arn:aws:codebuild:${AWS::Region}:${AWS::AccountId}:project/*"
- Effect: Allow
Action:
- codepipeline:PutApprovalResult
- codepipeline:StartPipelineExecution
Resource: !Sub "arn:aws:codepipeline:${AWS::Region}:${AWS::AccountId}:*"
BlogLambdaPipeline:
Type: AWS::CodePipeline::Pipeline
Properties:
Name: blog-lambda-pipeline
PipelineType: V2
RoleArn: !GetAtt CodePipelineRole.Arn
ArtifactStore:
Type: S3
Location: !Sub "codebuild-${AWS::Region}-${AWS::AccountId}-input-bucket"
Stages:
- Name: Source
Actions:
- Name: S3Source
ActionTypeId:
Category: Source
Owner: AWS
Provider: S3
Version: "1"
Configuration:
S3Bucket: !Ref SourceBucket
S3ObjectKey: !Ref SourceObjectKey
PollForSourceChanges: false
OutputArtifacts:
- Name: SourceOutput
- Name: Build
Actions:
- Name: BuildAndPushImage
ActionTypeId:
Category: Build
Owner: AWS
Provider: CodeBuild
Version: "1"
InputArtifacts:
- Name: SourceOutput
Configuration:
ProjectName: !Ref BlogLambdaBuildProject
S3SourceChangeRule:
Type: AWS::Events::Rule
Properties:
Description: Trigger CodePipeline on S3 source update
EventPattern:
source:
- aws.s3
detail-type:
- Object Created
detail:
bucket:
name:
- !Ref SourceBucket
object:
key:
- !Ref SourceObjectKey
Targets:
- Arn: !Sub "arn:aws:codepipeline:${AWS::Region}:${AWS::AccountId}:${BlogLambdaPipeline}"
RoleArn: !GetAtt EventBridgeInvokePipelineRole.Arn
Id: CodePipelineTarget
EventBridgeInvokePipelineRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service: events.amazonaws.com
Action: sts:AssumeRole
Policies:
- PolicyName: AllowStartPipeline
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- codepipeline:StartPipelineExecution
Resource: !Sub "arn:aws:codepipeline:${AWS::Region}:${AWS::AccountId}:${BlogLambdaPipeline}"
Outputs:
SourceBucketName:
Description: S3 bucket for source code
Value: !Ref SourceBucket
Export:
Name: !Sub "${AWS::StackName}-SourceBucket"
PipelineName:
Description: CodePipeline name
Value: !Ref BlogLambdaPipeline
Export:
Name: !Sub "${AWS::StackName}-PipelineName"

View file

@ -14,24 +14,6 @@ Resources:
RepositoryName: !Ref RepositoryName
ImageScanningConfiguration:
ScanOnPush: true
LifecyclePolicy:
LifecyclePolicyText: |
{
"rules": [
{
"rulePriority": 1,
"description": "Keep last 10 images",
"selection": {
"tagStatus": "any",
"countType": "imageCountMoreThan",
"countNumber": 10
},
"action": {
"type": "expire"
}
}
]
}
RepositoryPolicyText:
Version: "2012-10-17"
Statement:
@ -50,15 +32,3 @@ Outputs:
Value: !GetAtt Repository.RepositoryUri
Export:
Name: BlogDeployment-RepositoryUri
RepositoryArn:
Description: ARN of the ECR repository
Value: !GetAtt Repository.Arn
Export:
Name: BlogDeployment-RepositoryArn
RepositoryName:
Description: Name of the ECR repository
Value: !Ref RepositoryName
Export:
Name: BlogDeployment-RepositoryName

View file

@ -1,26 +1,10 @@
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Parameters:
StageName:
Type: String
Default: Prod
Description: Name of the API stage
SourceBucketName:
Type: String
Default: naputo-blog-source
Description: S3 bucket for blog source files
RepoURL:
Type: String
Default: "https://git.n-daisuke897.com/nakada0907/n-daisuke897-blog.git"
Description: Git repository URL
RepoBranch:
Type: String
Default: main
Description: Git repository branch
Description: Name of the API stage.
Resources:
@ -43,11 +27,7 @@ Resources:
- Effect: Allow
Action:
- s3:PutObject
- s3:GetObject
- s3:ListBucket
Resource:
- !Sub "arn:aws:s3:::${SourceBucketName}"
- !Sub "arn:aws:s3:::${SourceBucketName}/*"
Resource: arn:aws:s3:::naputo-blog-source/*
- PolicyName: LambdaEcrImagePullPolicy
PolicyDocument:
Version: '2012-10-17'
@ -61,30 +41,26 @@ Resources:
- ecr:BatchGetImage
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
Resource:
Fn::ImportValue: BlogDeployment-RepositoryArn
Resource: !Sub "arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/*"
ManagedPolicyArns:
- arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
MyLambdaFunction:
Type: AWS::Serverless::Function
Properties:
FunctionName: blog-deployment-webhook-handler
PackageType: Image
ImageUri:
!Join
- ":"
- - !ImportValue BlogDeployment-RepositoryUri
- "latest"
Timeout: 300
MemorySize: 512
Architectures:
- arm64
Timeout: 30
MemorySize: 256
Environment:
Variables:
REPO_URL: !Ref RepoURL
REPO_BRANCH: !Ref RepoBranch
S3_BUCKET: !Ref SourceBucketName
REPO_URL: "https://git.n-daisuke897.com/nakada0907/n-daisuke897-blog.git"
REPO_BRANCH: "main"
S3_BUCKET: "naputo-blog-source"
S3_KEY: "source.zip"
WEBHOOK_SECRET:
Fn::Sub:
@ -103,7 +79,6 @@ Resources:
MyApi:
Type: AWS::Serverless::Api
Properties:
Name: blog-deployment-webhook-api
StageName: !Ref StageName
EndpointConfiguration: REGIONAL
DefinitionBody:
@ -129,22 +104,3 @@ Resources:
description: "Unauthorized - Signature verification failed"
'500':
description: "Server error - Deployment process failed"
Outputs:
ApiEndpoint:
Description: API Gateway endpoint URL for webhook
Value: !Sub "https://${MyApi}.execute-api.${AWS::Region}.amazonaws.com/${StageName}/forgejo-webhook"
Export:
Name: !Sub "${AWS::StackName}-ApiEndpoint"
LambdaFunctionArn:
Description: Lambda function ARN
Value: !GetAtt MyLambdaFunction.Arn
Export:
Name: !Sub "${AWS::StackName}-LambdaArn"
LambdaFunctionName:
Description: Lambda function name
Value: !Ref MyLambdaFunction
Export:
Name: !Sub "${AWS::StackName}-LambdaName"

View file

@ -1,39 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
ARTIFACT_DIR="${ROOT_DIR}/artifacts"
ZIP_PATH="${ARTIFACT_DIR}/blog-lambda-source.zip"
echo "Building artifact for blog-lambda pipeline..."
echo "Root directory: ${ROOT_DIR}"
mkdir -p "${ARTIFACT_DIR}"
# Create temporary directory for staging files
tmpdir="$(mktemp -d)"
trap 'rm -rf "${tmpdir}"' EXIT
# Copy necessary files for CodeBuild
echo "Copying source files..."
cp -r "${ROOT_DIR}/cmd" "${tmpdir}/cmd"
cp -r "${ROOT_DIR}/docker" "${tmpdir}/docker"
cp -r "${ROOT_DIR}/ci" "${tmpdir}/ci"
cp "${ROOT_DIR}/go.mod" "${tmpdir}/go.mod"
cp "${ROOT_DIR}/go.sum" "${tmpdir}/go.sum"
# Create the ZIP archive
echo "Creating ZIP archive..."
(
cd "${tmpdir}"
zip -r "${ZIP_PATH}" . -x "*.git*" "*.DS_Store"
)
# Display artifact info
echo ""
echo "✅ Artifact created successfully!"
echo " Path: ${ZIP_PATH}"
echo " Size: $(du -h "${ZIP_PATH}" | cut -f1)"
echo ""
echo "To upload to S3:"
echo " aws s3 cp ${ZIP_PATH} s3://\${BUCKET_NAME}/blog-lambda-source.zip"