Commit bf140a70 by Ivan

feat: update dockerfile

parent dec5337b
......@@ -13,6 +13,7 @@
# Ignore all default key files.
/config/master.key
/config/credentials/*.key
/registry_password.key
# Ignore all logfiles and tempfiles.
/log/*
......
......@@ -32,6 +32,7 @@
# Ignore master key for decrypting credentials and more.
/config/master.key
/registry_password.key
# Vite Ruby
/public/vite*
......
......@@ -10,8 +10,9 @@
# Use a GITHUB_TOKEN if private repositories are needed for the image
# GITHUB_TOKEN=$(gh config get -h github.com oauth_token)
# Grab the registry password from ENV
KAMAL_REGISTRY_PASSWORD=$KAMAL_REGISTRY_PASSWORD
# Set the Docker registry password directly
# Replace 'your-docker-password' with your actual Docker Hub password
KAMAL_REGISTRY_PASSWORD=$(cat registry_password.key)
# Improve security by using a password manager. Never check config/master.key into git!
RAILS_MASTER_KEY=$(cat config/master.key)
# syntax=docker/dockerfile:1
# check=error=true
# This Dockerfile is designed for production, not development. Use with Kamal or build'n'run by hand:
# docker build -t img_manager .
# docker run -d -p 80:80 -e RAILS_MASTER_KEY=<value from config/master.key> --name img_manager img_manager
# For a containerized dev environment, see Dev Containers: https://guides.rubyonrails.org/getting_started_with_devcontainer.html
# This Dockerfile is designed for Git-based deployment with Kamal
# It pulls code from Git during deployment and uses external volumes for code, build artifacts, and data
# Make sure RUBY_VERSION matches the Ruby version in .ruby-version
ARG RUBY_VERSION=3.2.2
FROM docker.io/library/ruby:$RUBY_VERSION-slim AS base
FROM docker.io/library/ruby:$RUBY_VERSION-slim
# Rails app lives here
WORKDIR /rails
# Install base packages
# Install base packages including Git for code pulling
RUN apt-get update -qq && \
apt-get install --no-install-recommends -y curl libjemalloc2 libvips sqlite3 && \
rm -rf /var/lib/apt/lists /var/cache/apt/archives
apt-get install --no-install-recommends -y \
curl \
libjemalloc2 \
libvips \
sqlite3 \
nodejs \
npm \
netcat-openbsd \
git \
build-essential \
pkg-config \
&& rm -rf /var/lib/apt/lists /var/cache/apt/archives
# Set production environment
ENV RAILS_ENV="production" \
......@@ -25,43 +32,35 @@ ENV RAILS_ENV="production" \
BUNDLE_PATH="/usr/local/bundle" \
BUNDLE_WITHOUT="development"
# Throw-away build stage to reduce size of final image
FROM base AS build
# Install packages needed to build gems
RUN apt-get update -qq && \
apt-get install --no-install-recommends -y build-essential git pkg-config && \
rm -rf /var/lib/apt/lists /var/cache/apt/archives
# Install application gems
COPY Gemfile Gemfile.lock ./
RUN bundle install && \
rm -rf ~/.bundle/ "${BUNDLE_PATH}"/ruby/*/cache "${BUNDLE_PATH}"/ruby/*/bundler/gems/*/.git && \
bundle exec bootsnap precompile --gemfile
# Copy application code
COPY . .
# Precompile bootsnap code for faster boot times
RUN bundle exec bootsnap precompile app/ lib/
# Final stage for app image
FROM base
# Copy built artifacts: gems, application
COPY --from=build "${BUNDLE_PATH}" "${BUNDLE_PATH}"
COPY --from=build /rails /rails
# Create directories for mounted volumes
RUN mkdir -p \
/rails/code \
/rails/storage \
/rails/public/uploads \
/rails/public/assets \
/rails/public/vite \
/rails/node_modules \
/rails/tmp \
/rails/log \
/rails/tmp/pids \
/rails/tmp/cache \
/rails/tmp/sockets
# Declare volumes for persistent storage
VOLUME ["/rails/code", "/rails/storage", "/rails/public/uploads", "/rails/public/vite", "/rails/node_modules", "/rails/log", "/rails/tmp"]
# Copy entrypoint script
COPY bin/docker-entrypoint /rails/bin/
RUN chmod +x /rails/bin/docker-entrypoint
# Run and own only the runtime files as a non-root user for security
RUN groupadd --system --gid 1000 rails && \
useradd rails --uid 1000 --gid 1000 --create-home --shell /bin/bash && \
chown -R rails:rails db log storage tmp
chown -R rails:rails /rails
USER 1000:1000
# Entrypoint prepares the database.
# Entrypoint pulls code, installs dependencies, and prepares the application
ENTRYPOINT ["/rails/bin/docker-entrypoint"]
# Start server via Thruster by default, this can be overwritten at runtime
......
......@@ -35,7 +35,7 @@ module Authentication
def request_authentication
session[:return_to_after_authenticating] = request.url
redirect_to new_sessions_path
redirect_to new_session_path
end
def after_authentication_url
......
......@@ -10,7 +10,7 @@ class PasswordsController < ApplicationController
PasswordsMailer.reset(user).deliver_later
end
redirect_to new_sessions_path, notice: "Password reset instructions sent (if user with that email address exists)."
redirect_to new_session_path, notice: "Password reset instructions sent (if user with that email address exists)."
end
def edit
......@@ -18,7 +18,7 @@ class PasswordsController < ApplicationController
def update
if @user.update(params.permit(:password, :password_confirmation))
redirect_to new_sessions_path, notice: "Password has been reset."
redirect_to new_session_path, notice: "Password has been reset."
else
redirect_to edit_password_path(params[:token]), alert: "Passwords did not match."
end
......
#!/bin/bash
# This script sets up the required directories and permissions on the server
# Run this script on the server before deploying
echo "Setting up directories and permissions for img-manager..."
# Create all required directories
mkdir -p /root/img_manager/code
mkdir -p /root/img_manager/storage
mkdir -p /root/img_manager/uploads
mkdir -p /root/img_manager/public/vite
mkdir -p /root/img_manager/node_modules
mkdir -p /root/img_manager/logs
mkdir -p /root/img_manager/tmp
# Set very permissive permissions
chmod -R 777 /root/img_manager
# Create empty database files with proper permissions
touch /root/img_manager/storage/production.sqlite3
touch /root/img_manager/storage/production_cache.sqlite3
touch /root/img_manager/storage/production_queue.sqlite3
touch /root/img_manager/storage/production_cable.sqlite3
chmod 666 /root/img_manager/storage/*.sqlite3
# Initialize the database files with schema_migrations tables
echo "Initializing database files with schema_migrations tables..."
for db_file in /root/img_manager/storage/production.sqlite3 /root/img_manager/storage/production_cache.sqlite3 /root/img_manager/storage/production_queue.sqlite3 /root/img_manager/storage/production_cable.sqlite3; do
sqlite3 "$db_file" "CREATE TABLE IF NOT EXISTS schema_migrations (version varchar(255) NOT NULL); CREATE UNIQUE INDEX IF NOT EXISTS unique_schema_migrations ON schema_migrations (version);" || echo "Failed to initialize $db_file"
done
echo "Checking SELinux status..."
if command -v getenforce &> /dev/null; then
selinux_status=$(getenforce)
echo "SELinux status: $selinux_status"
if [ "$selinux_status" == "Enforcing" ]; then
echo "SELinux is enforcing. Setting proper contexts..."
# Set SELinux context for the directories
if command -v chcon &> /dev/null; then
chcon -Rt svirt_sandbox_file_t /root/img_manager
else
echo "chcon command not found. Unable to set SELinux context."
fi
fi
else
echo "getenforce command not found. Unable to check SELinux status."
fi
echo "Directory permissions:"
ls -la /root/img_manager
echo "Storage directory permissions:"
ls -la /root/img_manager/storage
echo "Setup complete!"
......@@ -21,21 +21,21 @@ test:
database: storage/test.sqlite3
# Store production database in the storage/ directory, which by default
# is mounted as a persistent Docker volume in config/deploy.yml.
# Store production database in the /rails/storage/ directory with absolute paths
# This ensures the database files are found regardless of current directory
production:
primary:
<<: *default
database: storage/production.sqlite3
database: /rails/storage/production.sqlite3
cache:
<<: *default
database: storage/production_cache.sqlite3
database: /rails/storage/production_cache.sqlite3
migrations_paths: db/cache_migrate
queue:
<<: *default
database: storage/production_queue.sqlite3
database: /rails/storage/production_queue.sqlite3
migrations_paths: db/queue_migrate
cable:
<<: *default
database: storage/production_cable.sqlite3
database: /rails/storage/production_cable.sqlite3
migrations_paths: db/cable_migrate
......@@ -2,12 +2,12 @@
service: img_manager
# Name of the container image.
image: your-user/img_manager
image: mumumumushu/img_manager
# Deploy to these servers.
servers:
web:
- 192.168.0.1
- 45.78.59.154
# job:
# hosts:
# - 192.168.0.1
......@@ -18,14 +18,16 @@ servers:
#
# Note: If using Cloudflare, set encryption mode in SSL/TLS setting to "Full" to enable CF-to-app encryption.
proxy:
ssl: true
host: app.example.com
# ssl: true
host: 45.78.59.154
# If you have a domain name, use it instead of the IP address
# host: your-domain.com
# Credentials for your image host.
registry:
# Specify the registry server, if you're not using Docker Hub
# server: registry.digitalocean.com / ghcr.io / ...
username: your-user
username: mumumumushu
# Always use an access token rather than real password when possible.
password:
......@@ -35,9 +37,26 @@ registry:
env:
secret:
- RAILS_MASTER_KEY
# Add Git credentials if needed for private repositories
# - GIT_CREDENTIALS
clear:
# Git repository configuration
GIT_REPOSITORY: http://git.tallty.com/mumumumushu/img-manager.git
GIT_BRANCH: main
# Force rebuild flags (set to true when you want to force rebuild)
FORCE_BUNDLE_INSTALL: true
FORCE_NPM_INSTALL: true
FORCE_VITE_BUILD: true
# Database configuration
RAILS_ENV: production
DATABASE_URL: sqlite3:///rails/storage/production.sqlite3
# Disable database migrations during startup to avoid SQLite issues
SKIP_DB_PREPARATION: true
# Run the Solid Queue Supervisor inside the web server's Puma process to do jobs.
# When you start using multiple servers, you should split out job processing to a dedicated machine.
SOLID_QUEUE_IN_PUMA: true
# Set number of processes dedicated to Solid Queue (default: 1)
......@@ -47,11 +66,10 @@ env:
# WEB_CONCURRENCY: 2
# Match this to any external database server to configure Active Record correctly
# Use img_manager-db for a db accessory server on same machine via local kamal docker network.
# DB_HOST: 192.168.0.2
# Log everything from Rails
# RAILS_LOG_LEVEL: debug
RAILS_LOG_LEVEL: debug
# Aliases are triggered with "bin/kamal <alias>". You can overwrite arguments on invocation:
# "bin/kamal logs -r job" will tail logs from the first server in the job section.
......@@ -60,18 +78,47 @@ aliases:
shell: app exec --interactive --reuse "bash"
logs: app logs -f
dbc: app exec --interactive --reuse "bin/rails dbconsole"
assets: app exec --interactive --reuse "bundle exec vite build"
migrate: app exec --interactive --reuse "bin/rails db:migrate"
# Use a persistent storage volume for sqlite database files and local Active Storage files.
# Recommended to change this to a mounted volume path that is backed up off server.
# Use persistent storage volumes for external storage of code, files, and database
# These volumes will be mounted to the container and persist between deployments
volumes:
- "img_manager_storage:/rails/storage"
# Git code repository storage
- "/root/img_manager/code:/rails/code"
# Database storage with simpler configuration (no Z flag to avoid SELinux issues)
- "/root/img_manager/storage:/rails/storage"
# Uploaded files storage
- "/root/img_manager/uploads:/rails/public/uploads"
# Vite assets storage
- "/root/img_manager/public/vite:/rails/public/vite"
# Node modules storage
- "/root/img_manager/node_modules:/rails/node_modules"
# Logs storage
- "/root/img_manager/logs:/rails/log"
# Tmp directory for pids and other temporary files
- "/root/img_manager/tmp:/rails/tmp"
# Bridge fingerprinted assets, like JS and CSS, between versions to avoid
# hitting 404 on in-flight requests. Combines all files from new and old
# version inside the asset_path.
asset_path: /rails/public/assets
# Disable asset bridging for now to fix deployment issues
asset_path: false
# Deployment configuration for better reliability
# Increase the time to wait for a container to become ready
# deploy_timeout: 180
# # Increase the time to wait for a container to drain
# drain_timeout: 60
# # Increase the time to wait for a container to boot after it is running
# readiness_delay: 30
# Note: Run the setup script manually before deployment:
# scp ./bin/setup_volumes.sh root@45.78.59.154:/root/setup_volumes.sh
# ssh root@45.78.59.154 'chmod +x /root/setup_volumes.sh && /root/setup_volumes.sh'
# Configure the image builder.
builder:
......
dckr_pat_3-xxxx
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment