reorganize source (src) using /rootfs to copy any files into image
moved distros.csv to root of repo reogranize packages to use *.system.pkgs or system/*.pkgsmaster
parent
abea829cab
commit
d89001bc51
|
@ -1,4 +1,4 @@
|
|||
/archive/
|
||||
archive/
|
||||
/build.log
|
||||
_opt/
|
||||
.src
|
||||
|
@ -7,3 +7,6 @@ mnt/
|
|||
logs/
|
||||
Dockerfile
|
||||
.env
|
||||
.vscode/extensions.json
|
||||
.vscode/settings.json
|
||||
|
||||
|
|
|
@ -1,6 +1,13 @@
|
|||
# syntax=docker/dockerfile:latest
|
||||
ARG BASE_IMAGE
|
||||
ARG LINUX_DISTRO=alpine
|
||||
% if [[ "$BASE_IMAGE_COPY" ]]; then
|
||||
FROM <% $LINUX_DISTRO %>
|
||||
COPY --from=<% $BASE_IMAGE %> / /
|
||||
% else
|
||||
FROM $BASE_IMAGE
|
||||
% fi
|
||||
|
||||
ARG BASE_IMAGE
|
||||
ARG VERBOSE
|
||||
ARG REBUILD
|
||||
|
@ -17,13 +24,25 @@ eot
|
|||
.INCLUDE packages.run
|
||||
% fi
|
||||
|
||||
% if [[ ( "$BUILD_SRC" && ! $BUILD_SRC = "_core_" ) ]]; then
|
||||
% if [[ ( -d "$BUILD_SRC/rootfs" && ! $BUILD_SRC = "_core_" ) ]]; then
|
||||
COPY .src/rootfs/ /
|
||||
% fi
|
||||
|
||||
|
||||
% if [[ ( -f "$BUILD_SRC/init/init.sh" && ! $BUILD_SRC = "_core_" ) ]]; then
|
||||
.INCLUDE init.run
|
||||
% fi
|
||||
|
||||
# appends any additional custom Dockerfile code in source
|
||||
.INCLUDE "$BDIR/.src/Dockerfile"
|
||||
|
||||
% if [[ $VOLUME_DIRS ]]; then
|
||||
VOLUME <% $VOLUME_DIRS %>
|
||||
% fi
|
||||
|
||||
# default command
|
||||
ENTRYPOINT ["/opt/bin/entrypoint"]
|
||||
# default
|
||||
WORKDIR /opt
|
||||
WORKDIR <% ${WORKDIR:-/opt} %>
|
||||
|
||||
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
#!/bin/sh
|
||||
%
|
||||
if [ "$REBUILD" = "core" ]; then
|
||||
echo "## Busting Cache, Forcing Rebuild starting at core RUN "
|
||||
echo $(date)
|
||||
echo "# Busting Cache, Forcing Rebuild starting at core RUN "
|
||||
echo # $(date)
|
||||
fi
|
||||
%
|
||||
if ! { [ "$VERBOSE" = "core" ] || [ "$VERBOSE" = "all" ]; }; then unset VERBOSE; fi
|
||||
|
||||
mkdir -p /opt
|
||||
/bin/cp -R -f -p opt/. /opt
|
||||
/bin/cp -R -f -p rootfs/. /
|
||||
. /opt/lib/verbose.lib
|
||||
|
||||
echo "**************************************"
|
||||
|
|
|
@ -1,6 +1,17 @@
|
|||
#!/bin/bash
|
||||
echo "------------ creating Dockfile from template in Dockerfile.d -------------"
|
||||
|
||||
mkdir -p $BDIR/.src
|
||||
[[ ! -f $BDIR/.src/Dockerfile ]] && echo "#dummy file" > $BDIR/.src/Dockerfile
|
||||
|
||||
[[ -f $APPEND_BUILD_ENV ]] && source "$APPEND_BUILD_ENV" && echo using $APPEND_BUILD_ENV when building Dockerfile && cat $APPEND_BUILD_ENV && echo -e "\n-----"
|
||||
|
||||
# echo build source?: $BUILD_SRC packages? $packages
|
||||
pushd $(dirname "$(realpath "$BASH_SOURCE")") > /dev/null || return 1
|
||||
source <(../lib/bash-tpl Dockerfile.tpl ) | grep -v '^# ' > ../Dockerfile
|
||||
echo "------------ Created Dockfile from template in Dockerfile.d -------------"
|
||||
if [[ $VERBOSE ]]; then
|
||||
echo -e "\n#### Dockerfile to use from template ####\n"
|
||||
cat $BDIR/Dockerfile
|
||||
echo -e "\n#### end Dockerfile to use from template ####"
|
||||
fi
|
||||
echo "done ------- creating Dockfile from template in Dockerfile.d -------------"
|
||||
popd > /dev/null || return 2
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
ENTRYPOINT [ ]
|
|
@ -15,10 +15,6 @@ echo sourcing core-run.env
|
|||
quiet cat /opt/core_run.env
|
||||
source /opt/core_run.env
|
||||
|
||||
echo copying source init/opt directory to image /opt directory
|
||||
quiet ls -la opt/
|
||||
/bin/cp -R -f -p opt/. /opt
|
||||
|
||||
[[ -f ./build.env ]] && source ./build.env && echo loaded build.env at /init/build.env in source
|
||||
|
||||
if [[ -f ./init.sh ]]; then
|
||||
|
@ -34,13 +30,13 @@ if [[ -f ./init.sh ]]; then
|
|||
echo "############## Finished running init.sh build script #########################"
|
||||
fi
|
||||
|
||||
# map host id now based on build environment
|
||||
if [[ $VOLUME_DIRS ]]; then
|
||||
echo "*** creating and configuring volume directories ***"
|
||||
echo $VOLUME_DIRS
|
||||
mkdir -p $VOLUME_DIRS
|
||||
$BIN_DIR/map-host-id
|
||||
chmod -R g+rw $VOLUME_DIRS
|
||||
fi
|
||||
# # map host id now based on build environment
|
||||
# if [[ $VOLUME_DIRS ]]; then
|
||||
# echo "*** creating and configuring volume directories ***"
|
||||
# echo $VOLUME_DIRS
|
||||
# mkdir -p $VOLUME_DIRS
|
||||
# $BIN_DIR/map-host-id
|
||||
# chmod -R g+rw $VOLUME_DIRS
|
||||
# fi
|
||||
|
||||
echo -e "\n ************* End Initialzation ************************"
|
|
@ -7,26 +7,38 @@
|
|||
echo "************* PACKAGE INSTALLATION ***********************"
|
||||
if ! { [ "$VERBOSE" = "packages" ] || [ "$VERBOSE" = "all" ]; }; then unset VERBOSE; fi
|
||||
source /opt/lib/verbose.lib
|
||||
export BUILDING=true
|
||||
export PATH=$PATH:/sbin:/usr/sbin:/usr/local/sbin
|
||||
echo sourcing core_run.env
|
||||
quiet cat /opt/core_run.env
|
||||
source /opt/core_run.env
|
||||
echo Distro is $LINUX_DISTRO
|
||||
echo package installer command for this build: "$INSTALL_PKGS"
|
||||
echo package update command for this build: "$UPDATE_PKGS"
|
||||
if [[ -f ./packages.lst || -f ./packages.sh ]]; then
|
||||
if [[ -f ./packages.lst ]]; then
|
||||
echo "----- Installing Packages ---------------"
|
||||
_pkgs=$(< ./packages.lst)
|
||||
echo $_pkgs
|
||||
echo ....
|
||||
silence ${INSTALL_PKGS} ${_pkgs}
|
||||
echo "done ----- Installing Packages ---------------"
|
||||
list=$(ls *system.pkgs 2> /dev/null)
|
||||
list+=" $(ls ./system/*.pkgs 2> /dev/null)"
|
||||
quiet echo list of system package files to install: $list
|
||||
for file in $list; do
|
||||
[ -f "$file" ] || break
|
||||
echo "----- Installing System Packages from $file ---------------"
|
||||
while IFS= read -r pkg || [ -n "$pkg" ]; do
|
||||
echo installing: $pkg
|
||||
silence $INSTALL_PKGS $pkg
|
||||
done < ./$file
|
||||
echo "done ----- Installing System Packages from $file ---------------"
|
||||
done
|
||||
|
||||
[[ -f $ENV_DIR/run.env ]] && echo "sourcing $ENV_DIR/run.env" && source $ENV_DIR/run.env
|
||||
|
||||
if [[ -f ./repositories.sh ]]; then
|
||||
echo "---- Running custom repository install script repositories.sh -----"
|
||||
source ./repositories.sh
|
||||
echo "done ---- Running repository installation script repositories.sh -----"
|
||||
fi
|
||||
if [[ -f ./packages.sh ]]; then
|
||||
echo "---- Running package installation script packages.sh -----"
|
||||
/bin/bash -l ./packages.sh
|
||||
echo "---- Running custom package installation script packages.sh -----"
|
||||
source ./packages.sh
|
||||
echo "done ---- Running package installation script packages.sh -----"
|
||||
fi
|
||||
else
|
||||
echo neither packages.lst, nor packages.sh in source
|
||||
echo nothing to install!
|
||||
fi
|
||||
# TODO run a package cache removal based on distro
|
||||
echo "********************************"
|
|
@ -0,0 +1 @@
|
|||
ENTRYPOINT [ ]
|
40
build
40
build
|
@ -21,9 +21,11 @@ case "$1" in
|
|||
load_env_file)
|
||||
echo -e "@@@@@@ loading build environment file for external use @@@@@@"
|
||||
BUILD_EFILE=$(echo -- "$@" | grep -oP -- '(?<=-e )[^ ]*')
|
||||
source_env_file "$BUILD_EFILE"
|
||||
if source_env_file "$BUILD_EFILE"; then
|
||||
echo -e "@@@@@@@@@@@@@@@@@ returning to calling script @@@@@@@@@@@@@@@"
|
||||
return $?
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
build_src) shift 1; get_build_src "$@"; return $? ;;
|
||||
help)
|
||||
|
@ -58,7 +60,7 @@ esac
|
|||
[[ -z "$PS1" ]] || no_prompt=true
|
||||
overwrite=true
|
||||
|
||||
while getopts 'a:b:c:d:e:f:g:hi:lnopr:s:t:u:v:' OPTION; do
|
||||
while getopts 'ya:b:c:d:e:f:g:hi:lnopr:s:t:u:v:j:' OPTION; do
|
||||
# echo processing: option:$OPTION argument:$OPTARG index:$OPTIND remaining:${@:$OPTIND}
|
||||
case "$OPTION" in
|
||||
a)
|
||||
|
@ -68,6 +70,10 @@ while getopts 'a:b:c:d:e:f:g:hi:lnopr:s:t:u:v:' OPTION; do
|
|||
# CUSTOM BASE IMAGE
|
||||
BASE_IMAGE=$OPTARG
|
||||
;;
|
||||
y)
|
||||
# CUSTOM BASE IMAGE
|
||||
BASE_IMAGE_COPY=true
|
||||
;;
|
||||
c)
|
||||
TRY_CMD=$OPTARG
|
||||
;;
|
||||
|
@ -97,6 +103,9 @@ while getopts 'a:b:c:d:e:f:g:hi:lnopr:s:t:u:v:' OPTION; do
|
|||
unset overwrite
|
||||
;;
|
||||
v)
|
||||
VOLUME=$OPTARG
|
||||
;;
|
||||
j)
|
||||
VERBOSE=$OPTARG
|
||||
;;
|
||||
l)
|
||||
|
@ -191,10 +200,8 @@ if [[ ! $no_prompt ]]; then
|
|||
[[ $REPLY != "y" ]] && echo -e "\n" && return 4
|
||||
fi
|
||||
|
||||
if ! source $BDIR/Dockerfile.d/create; then
|
||||
echo unable to create Dockerfile from template, aborting build
|
||||
return 3
|
||||
fi
|
||||
# cat $BDIR/Dockerfile | grep -b5 -a5 ENTRY
|
||||
# return
|
||||
|
||||
builder=default
|
||||
if [[ $TARGET == "publish" ]]; then
|
||||
|
@ -208,24 +215,39 @@ if [[ $TARGET == "publish" ]]; then
|
|||
popd > /dev/null || return 4
|
||||
fi
|
||||
|
||||
|
||||
# make a copy of build source locally in build directory
|
||||
if [[ ! $BUILD_SRC = "_core_" ]]; then
|
||||
# copy or bind build source directory to temporary .src/ subdirectory in build repo
|
||||
_env_dir=rootfs/opt/env
|
||||
[[ -d $BDIR/.src ]] && rm -rf $BDIR/.src
|
||||
[[ -d $BDIR/core/$_env_dir ]] && rm -rf $BDIR/core/$_env_dir
|
||||
if [[ $(which rsync 2> /dev/null ) ]]; then
|
||||
rsync -aAru ${BUILD_SRC:-src}/ $BDIR/.src
|
||||
rsync -aAru $BDIR/.src/$_env_dir/ $BDIR/core/$_env_dir > /dev/null 2>&1
|
||||
else
|
||||
echo no rsync copying with cp
|
||||
/bin/cp -a ${BUILD_SRC:-src}/. $BDIR/.src > /dev/null 2>&1
|
||||
/bin/cp -a $BDIR/.src/rootfs/opt/env/. $BDIR/core/rootfs/opt/env > /dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo run environment directory copied to core at $BDIR/core/$_env_dir
|
||||
ls -la $BDIR/core/$_env_dir
|
||||
|
||||
# create Dockerfile from template
|
||||
if ! source $BDIR/Dockerfile.d/create; then
|
||||
echo unable to create Dockerfile from template, aborting build
|
||||
return 3
|
||||
fi
|
||||
|
||||
if [[ -f $APPEND_BUILD_ENV ]]; then
|
||||
if [[ ! $BUILD_SRC = "_core_" ]]; then
|
||||
echo "------ Including custom build environment at $APPEND_BUILD_ENV -------"
|
||||
cat $APPEND_BUILD_ENV
|
||||
echo -e "\n--------------------"
|
||||
echo | tee -a "$BDIR/.src/init/build.env" > /dev/null
|
||||
tee -a "$BDIR/.src/init/build.env" > /dev/null < "$APPEND_BUILD_ENV"
|
||||
fi
|
||||
cat "$APPEND_BUILD_ENV" > "$BDIR/core/build.env"
|
||||
# run in subshell to not affect $USER
|
||||
/bin/bash <<"EOF"
|
||||
|
@ -243,6 +265,8 @@ fi
|
|||
|
||||
pushd "$BDIR" > /dev/null || return 3
|
||||
|
||||
export BUILDING=true
|
||||
|
||||
echo -e "\n\e[1;31m######### RUNNING THE DOCKER BUILD COMMAND ######################"
|
||||
echo running build command: docker buildx --builder ${builder} bake ${nocache} ${TARGET}
|
||||
echo -e "#################################################################\e[1;37m"
|
||||
|
|
23
core/core.sh
23
core/core.sh
|
@ -1,5 +1,7 @@
|
|||
#!/bin/bash
|
||||
echo "------------- CORE INSTALLATION ------------"
|
||||
export BUILDING=true
|
||||
export PATH=$PATH:/sbin:/usr/sbin:/usr/local/sbin
|
||||
source /opt/core_run.env
|
||||
if [[ -f ./build.env ]]; then
|
||||
echo sourcing a custom core build enviornment
|
||||
|
@ -11,7 +13,6 @@ source $LIB_DIR/verbose.lib
|
|||
echo appending pkg commands to core_run.env
|
||||
echo appending sourcing of $ENV_DIR/run.env if it exists
|
||||
cat <<ENV >> /opt/core_run.env
|
||||
|
||||
export INSTALL_PKGS="$INSTALL_PKGS"
|
||||
export UPDATE_PKGS="$UPDATE_PKGS"
|
||||
[ -f "\$ENV_DIR/run.env" ] && [ -z "\$BUILDING" ] && source \$ENV_DIR/run.env
|
||||
|
@ -21,19 +22,13 @@ mkdir -p /etc/profile.d
|
|||
echo creating login sourcing file for core_run.env in /etc/profile.d
|
||||
echo "source /opt/core_run.env" > /etc/profile.d/01-core-run-env.sh
|
||||
quiet ls -la /etc/profile.d
|
||||
|
||||
echo "--------- creating user and group 'host' with ids 1000 -----"
|
||||
echo "done --------- creating user and group 'host' with ids 1000 -----"
|
||||
cat <<DOC >> /etc/login.defs
|
||||
SYS_UID_MAX 1001"
|
||||
SYS_GID_MAX 1001"
|
||||
DOC
|
||||
groupadd -g 1000 host
|
||||
echo "SYS_UID_MAX 1001" >> /etc/login.defs
|
||||
useradd -r -g host -u 1000 host
|
||||
[[ $USER_PW ]] && export USER=${USER:-sysadmin}
|
||||
[[ $USER ]] && /bin/bash user.sh
|
||||
[[ $USER_PW ]] && export USER=${USER:-host}
|
||||
if [[ $USER ]]; then
|
||||
export UHID=${UHID:-1000}
|
||||
chown -R -h $UHID:$UHID /opt $VOLUME_DIRS
|
||||
/bin/bash user.sh
|
||||
fi
|
||||
[[ $UCI_SHELL ]] && /bin/bash uci-shell.sh
|
||||
$BIN_DIR/map-host-id /opt
|
||||
|
||||
ls -la /opt
|
||||
echo "done ------------- CORE INSTALLATION ------------"
|
|
@ -1,7 +0,0 @@
|
|||
# valid distros list
|
||||
# the distro must be the name used in /etc/os-release
|
||||
# <distro>,<core image name>,<install command>,<update command>
|
||||
alpine,alpine, apk add --no-cache, apk update
|
||||
debian,debian, apt-get install -y, apt-get update
|
||||
arch, archlinux,pacman -S --noconfirm --needed,pacman -Syu
|
||||
ubuntu, ubuntu, apt-get install -y, apt-get update
|
|
|
@ -6,10 +6,10 @@ silence $UPDATE_PKGS
|
|||
echo .... DONE!
|
||||
if [ -f ./packages/$LINUX_DISTRO ]; then
|
||||
echo INSTALLING $LINUX_DISTRO DISTRO SPECIFIC PACKAGES
|
||||
_pkgs=$(cat ./packages/$LINUX_DISTRO)
|
||||
echo $_pkgs
|
||||
echo ....
|
||||
silence $INSTALL_PKGS $_pkgs
|
||||
while IFS="" read -r pkg || [ -n "$pkg" ]; do
|
||||
$INSTALL_PKGS $pkg
|
||||
done < ./packages/$LINUX_DISTRO
|
||||
echo "DONE INSTALLING $LINUX_DISTRO SPECIFIC PACKAGES"
|
||||
fi
|
||||
echo INSTALLING COMMON PACKAGES FOR ANY DISTRO
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
shadow
|
||||
tzdata
|
|
@ -0,0 +1,83 @@
|
|||
#!/bin/bash
|
||||
|
||||
# do not add code here for non-interative login shell
|
||||
# rather put additional non-interactive profile script code in files in /etc/profile.d
|
||||
|
||||
# this files is sourced for all login shells and also interactive non-login shells via /etc/bash.bashrc
|
||||
# more info see http://www.linuxfromscratch.org/blfs/view/svn/postlfs/profile.html
|
||||
|
||||
# interactive non-login and login shells will call the BASH_SHELL_LOAD script below
|
||||
# non-interative login shells only source /etc/profile.d
|
||||
# in profile.d is 03-startup.sh which will call
|
||||
# any of the scripts in a repo's startup subdirectory
|
||||
# non-interactive non-login shells are not handled here only via /etc/bash.bashrc
|
||||
# interactive login
|
||||
|
||||
|
||||
([ -n "$SSH_CONNECTION" ] || [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ]) && export SSH_SESSION=true
|
||||
[[ $- == *i* ]] && export SHELL_INTERACTIVE=true
|
||||
shopt -q login_shell && export SHELL_LOGIN=true
|
||||
[ $EUID -eq 0 ] && export USER_ROOT=true
|
||||
|
||||
# uncomment for debugging non-interactive login shell, i.e. $ . /etc/profile
|
||||
#unset SHELL_INTERACTIVE
|
||||
|
||||
#uncomment these for debugging.
|
||||
# echo ---- sourcing system /etc/profile ---
|
||||
# [[ $USER_ROOT ]] && echo 'Root User' || echo 'Non Root User'
|
||||
# [[ $SHELL_INTERACTIVE ]] && echo 'Interactive' || echo 'Not interactive'
|
||||
# [[ $SHELL_LOGIN ]] && echo 'Login shell' || echo 'Not login shell'
|
||||
# [[ $SSH_SESSION ]] && echo ssh remote user || echo local user
|
||||
# echo ---------------------
|
||||
|
||||
# Set the initial path
|
||||
export PATH=/bin:/usr/bin:/usr/local/bin
|
||||
# set directory for base shell repo
|
||||
export BASH_SHELL_BASE=/shell
|
||||
# now bootstrap by souring the shell repo envinroment
|
||||
source $BASH_SHELL_BASE/shell.env
|
||||
# set $BASH_SAFE_MODE=true in shell.env to disable UCI interactive shell from loading
|
||||
# TODO see if $NO_BASH_SHELL_SSH=true in user or host directory (at the remote machine)
|
||||
# if so don't source the load command below and make just a simple prompt.
|
||||
if [[ $SHELL_INTERACTIVE ]]; then
|
||||
if [[ ! $BASH_SAFE_MODE ]]; then
|
||||
# echo interactive shell loading $BASH_SHELL_LOAD
|
||||
source "$BASH_SHELL_LOAD"
|
||||
else
|
||||
# safe mode
|
||||
# just set a simple prompt instead
|
||||
NORMAL="\[\e[0m\]"
|
||||
RED="\[\e[1;31m\]"
|
||||
GREEN="\[\e[1;32m\]"
|
||||
YELLOW='\e[1;33m'
|
||||
if [[ $EUID == 0 ]] ; then
|
||||
PS1="${YELLOW}SAFE:$RED\u [ $NORMAL\w$RED ]# $NORMAL"
|
||||
else
|
||||
PS1="${YELLOW}SAFE:$GREEN \u [ $NORMAL\w$GREEN ]\$ $NORMAL"
|
||||
fi
|
||||
unset RED GREEN NORMAL YELLOW
|
||||
fi
|
||||
else
|
||||
# this is non-interactive login (e.g. at user machine login)
|
||||
if [[ $EUID -ne 0 ]] && [[ ! $SSH_SESSION ]]; then
|
||||
export LOGIN_LOG=$HOME/logs/login.log
|
||||
mkdir -p $HOME/logs
|
||||
touch $LOGIN_LOG
|
||||
llog () {
|
||||
echo "$@" >> $LOGIN_LOG 2>&1
|
||||
}
|
||||
export -f llog
|
||||
llog "$(env | grep BASH)"
|
||||
echo "$(date)" > $LOGIN_LOG
|
||||
llog "non-interactive login shell for $USER"
|
||||
if [ -d /etc/profile.d ]; then
|
||||
for i in /etc/profile.d/*.sh; do
|
||||
if [ -r $i ]; then
|
||||
llog "sourcing $i"
|
||||
source $i
|
||||
fi
|
||||
done
|
||||
unset i
|
||||
fi
|
||||
fi
|
||||
fi
|
|
@ -0,0 +1,6 @@
|
|||
# root login setup only, put in if block
|
||||
if [ $EUID -eq 0 ] ; then # if root user
|
||||
echo login profile, root specific setup
|
||||
export PATH=$PATH:/sbin:/usr/sbin:/usr/local/sbin
|
||||
unset HISTFILE
|
||||
fi
|
|
@ -0,0 +1,10 @@
|
|||
# this runs startups for bash shell base system
|
||||
# don't run statup if user logs in via su
|
||||
if [ "$SHELL" = "/bin/bash" ] && [ "${BASH_SHELL_STARTUP}" ] && [ "$(ps -o comm= $PPID)" != "su" ]; then
|
||||
# uncomment for debugging
|
||||
if [[ -f $BASH_SHELL_STARTUP ]] && [[ $EUID -ne 0 ]]; then
|
||||
llog "sourcing startup script $BASH_SHELL_STARTUP"
|
||||
# (${BASH_SHELL_STARTUP}) &
|
||||
source ${BASH_SHELL_STARTUP}
|
||||
fi
|
||||
fi
|
|
@ -1,10 +1,15 @@
|
|||
#!/bin/bash
|
||||
source /opt/core_run.env
|
||||
case "$1" in
|
||||
|
||||
|
||||
maphostid)
|
||||
shift 1
|
||||
/bin/bash -l -c '$BIN_DIR/map-host-id $@' $0 "$@"
|
||||
;;
|
||||
idle)
|
||||
sleep infinity
|
||||
;;
|
||||
image)
|
||||
shift 1
|
||||
/bin/bash -l -c '$BIN_DIR/image-info $@' $0 "$@"
|
||||
|
@ -13,7 +18,7 @@ shell)
|
|||
shift 1
|
||||
_shell_=/bin/bash
|
||||
[[ $1 ]] && _shell_="/bin/su $1"
|
||||
$_shell_ -c "cd ${INITIAL_DIR:-/opt}; exec bash -l"
|
||||
$_shell_ -c "cd ${DEFAULT_DIR:-/}; exec bash -l"
|
||||
;;
|
||||
help)
|
||||
$BIN_DIR/entrypoint-help
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
if [[ $USER ]]; then
|
||||
UHID=${UHID:-1000}
|
||||
if [[ ! $(id -un $UHID) ]]; then
|
||||
groupmod -g $UHID $USER
|
||||
usermod -u $UHID -g $UHID $USER
|
||||
chown -R -h $UHID:$UHID /opt $1 $VOLUME_DIRS
|
||||
fi
|
||||
fi
|
|
@ -2,4 +2,4 @@ export ENV_DIR=/opt/env
|
|||
export BIN_DIR=/opt/bin
|
||||
export LIB_DIR=/opt/lib
|
||||
export SHELL=/bin/bash
|
||||
PATH=$BIN_DIR:$PATH
|
||||
export PATH=$BIN_DIR:$PATH
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
|
||||
get_arch () {
|
||||
local arch="$(uname -m)"
|
||||
case "$arch" in
|
||||
x86_64) arch='amd64' ;;
|
||||
armhf) arch='armv6' ;;
|
||||
armv7) arch='armv7' ;;
|
||||
aarch64) arch='arm64' ;;
|
||||
ppc64el|ppc64le) arch='ppc64le' ;;
|
||||
s390x) arch='s390x';;
|
||||
*) return 1 ;;\
|
||||
esac;
|
||||
echo $arch
|
||||
}
|
|
@ -4,16 +4,20 @@ mkdir -p /shell
|
|||
_url=https://git.kebler.net/bash/shell-base.git
|
||||
git clone $_url /shell
|
||||
source /shell/install/install.sh
|
||||
[[ $USER ]] && mkdir -p /home/$USER/shell
|
||||
uci_bash_shell_install $USER
|
||||
|
||||
if [[ $USER ]]; then
|
||||
|
||||
if [[ $USER_PW ]]; then
|
||||
echo adding shell for login user $USER
|
||||
mkdir -p /home/$USER/shell
|
||||
uci_bash_shell_install $USER
|
||||
chown -R $USER:$USER /shell
|
||||
chmod -R g+rw /shell
|
||||
setfacl -d --set u::rwx,g::rwx,o::- /shell
|
||||
# setfacl -d --set u::rwx,g::rwx,o::- /shell
|
||||
chown -R :host /home/$USER/shell
|
||||
chmod -R g+rw /home/$USER/shell
|
||||
setfacl -d --set u::rwx,g::rwx,o::- /home/$USER/shell
|
||||
# setfacl -d --set u::rwx,g::rwx,o::- /home/$USER/shell
|
||||
else
|
||||
uci_bash_shell_install
|
||||
fi
|
||||
|
||||
echo "----------- uci shell install complete ------"
|
||||
|
|
39
core/user.sh
39
core/user.sh
|
@ -1,16 +1,27 @@
|
|||
#!/bin/bash
|
||||
USER=${1:-$USER}
|
||||
USER_PW=${2-$USER_PW}
|
||||
UHID=${UHID:-1000}
|
||||
|
||||
if [[ $USER ]]; then
|
||||
echo "------- Adding USER: $USER ------"
|
||||
echo "------- Adding USER: $USER with ID: $UHID ------"
|
||||
|
||||
cat <<DOC >> /etc/login.defs
|
||||
SYS_UID_MAX $UHID"
|
||||
SYS_GID_MAX $UHID"
|
||||
DOC
|
||||
|
||||
source $LIB_DIR/verbose.lib
|
||||
echo loading acl package
|
||||
silence $INSTALL_PKGS acl
|
||||
echo "------- Adding User: $USER ------"
|
||||
groupadd -g 1001 $USER
|
||||
useradd -rm -s /bin/bash -G host,$([[ $(getent group sudo) ]] && echo sudo || echo wheel) -g $USER -u 1001 $USER
|
||||
# echo loading acl package
|
||||
# silence $INSTALL_PKGS acl
|
||||
groupadd -g $UHID $USER
|
||||
# user passwords implies system (sudo) login user
|
||||
if [[ $USER_PW ]]; then
|
||||
echo "login system user being created"
|
||||
useradd -rm -s /bin/bash -G $USER,$([[ $(getent group sudo) ]] && echo sudo || echo wheel) -g $USER -u $UHID $USER
|
||||
echo $USER groups: $(groups $USER)
|
||||
chpasswd <<< "sysadmin:${USER_PW:-$USER}"
|
||||
chpasswd <<< "${USER}:${USER_PW}"
|
||||
|
||||
# SUDOERS Setup
|
||||
cat <<SUDO >> /etc/sudoers.d/01-sudo-wheel
|
||||
Defaults lecture = never
|
||||
|
@ -18,6 +29,7 @@ Defaults lecture = never
|
|||
%sudo ALL=(ALL:ALL) ALL
|
||||
SUDO
|
||||
chmod 440 /etc/sudoers.d/01-sudo-wheel
|
||||
|
||||
cat <<USER >> /etc/sudoers.d/02-$USER
|
||||
$USER ALL = NOPASSWD:/bin/chown
|
||||
$USER ALL = NOPASSWD:/bin/chmod
|
||||
|
@ -28,9 +40,16 @@ chmod 440 /etc/sudoers.d/02-$USER
|
|||
cat $USER-permits
|
||||
cat $USER-permits >> /etc/sudoers.d/02-$USER
|
||||
fi
|
||||
|
||||
chmod g+rw /opt
|
||||
setfacl -d --set u::rwx,g::rwx,o::- /opt
|
||||
else
|
||||
home_dir=$([[ $USER_HOME ]] && echo "$USER_HOME" || echo "/opt/user" )
|
||||
mkdir -p $home_dir
|
||||
useradd -s /sbin/nologin -G $USER -g $USER -u $UHID $USER -d $home_dir
|
||||
chown $USER:$USER $home_dir
|
||||
fi
|
||||
# shellcheck enable=add-default-case
|
||||
# chmod -R g+rw /opt
|
||||
# setfacl -d --set u::rwx,g::rwx,o::- /opt
|
||||
cat /etc/passwd | grep $USER
|
||||
echo "done------- Adding USER: $USER ------"
|
||||
fi
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
./core/opt/lib/distros.csv
|
||||
/data/Hacking/computing/docker/uci-docker-build/core/rootfs/opt/lib/distros.csv
|
|
|
@ -1,2 +1,8 @@
|
|||
export USER_PW=ucommandit
|
||||
export UCI_SHELL=true
|
||||
# anything in here will be sourced ONLY during build
|
||||
# allows easy custom environment variables
|
||||
# /opt/core_run.env is always sourced before this file
|
||||
# /opt/env/run.env is NOT sourced by default during build but you
|
||||
# can explicity add it there
|
||||
# [ -f "$ENV_DIR/run.env" ] && $ENV_DIR/run.env
|
||||
export SOMEBUILDONLYVALUE="yes!"
|
||||
export VOLUME_DIRS="/opt"
|
|
@ -1,8 +0,0 @@
|
|||
# anything in here will be sourced ONLY during build
|
||||
# allows easy custom environment variables
|
||||
# /opt/core_run.env is always sourced before this file
|
||||
# /opt/env/run.env is NOT sourced by default during build but you
|
||||
# can explicity add it there
|
||||
# [ -f "$ENV_DIR/run.env" ] && $ENV_DIR/run.env
|
||||
export SOMEBUILDONLYVALUE="yes!"
|
||||
export VOLUME_DIRS="/opt"
|
|
@ -3,5 +3,5 @@ export ENTRYPOINT_CMD=mycmd
|
|||
export ENTRYPOINT_CMD_PATH=$BIN_DIR/mycmd
|
||||
# here you could put specific exports
|
||||
# or source another file, whatever. this is sourced at shell login
|
||||
export INITIAL_DIR=/opt/bin
|
||||
export DEFAULT_DIR=/opt/bin
|
||||
export NONSENSE="this is from the run.env file"
|
|
@ -3,5 +3,5 @@ export ENTRYPOINT_CMD=mycmd
|
|||
export ENTRYPOINT_CMD_PATH=$BIN_DIR/mycmd
|
||||
# here you could put specific exports
|
||||
# or source another file, whatever. this is sourced at shell login
|
||||
export INITIAL_DIR=/opt/bin
|
||||
export DEFAULT_DIR=/opt/bin
|
||||
export NONSENSE="this is from the run.env file"
|
|
@ -0,0 +1,940 @@
|
|||
#! /usr/bin/env python3
|
||||
from __future__ import print_function
|
||||
|
||||
__copyright__ = "(C) 2017-2023 Guido U. Draheim, licensed under the EUPL"
|
||||
__version__ = "1.4.6097"
|
||||
|
||||
import subprocess
|
||||
import collections
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import copy
|
||||
import shutil
|
||||
import hashlib
|
||||
import datetime
|
||||
import logging
|
||||
from fnmatch import fnmatchcase as fnmatch
|
||||
|
||||
logg = logging.getLogger("edit")
|
||||
|
||||
if sys.version[0] != '2':
|
||||
xrange = range
|
||||
|
||||
MAX_PATH = 1024 # on Win32 = 260 / Linux PATH_MAX = 4096 / Mac = 1024
|
||||
MAX_NAME = 253
|
||||
MAX_PART = 63
|
||||
MAX_VERSION = 127
|
||||
MAX_COLLISIONS = 100
|
||||
|
||||
TMPDIR = "load.tmp"
|
||||
DOCKER = "docker"
|
||||
KEEPDIR = 0
|
||||
KEEPDATADIR = False
|
||||
KEEPSAVEFILE = False
|
||||
KEEPINPUTFILE = False
|
||||
KEEPOUTPUTFILE = False
|
||||
OK = True
|
||||
NULL = "NULL"
|
||||
|
||||
StringConfigs = {"user": "User", "domainname": "Domainname",
|
||||
"workingdir": "WorkingDir", "workdir": "WorkingDir", "hostname": "Hostname"}
|
||||
StringMeta = {"author": "author", "os": "os", "architecture": "architecture", "arch": "architecture", "variant": "variant"}
|
||||
StringCmd = {"cmd": "Cmd", "entrypoint": "Entrypoint"}
|
||||
|
||||
ShellResult = collections.namedtuple("ShellResult", ["returncode", "stdout", "stderr"])
|
||||
|
||||
def sh(cmd=":", shell=True, check=True, ok=None, default=""):
|
||||
if ok is None: ok = OK # a parameter "ok = OK" does not work in python
|
||||
if not ok:
|
||||
logg.info("skip %s", cmd)
|
||||
return ShellResult(0, default, "")
|
||||
run = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
run.wait()
|
||||
assert run.stdout is not None and run.stderr is not None
|
||||
result = ShellResult(run.returncode, run.stdout.read(), run.stderr.read())
|
||||
if check and result.returncode:
|
||||
logg.error("CMD %s", cmd)
|
||||
logg.error("EXIT %s", result.returncode)
|
||||
logg.error("STDOUT %s", result.stdout)
|
||||
logg.error("STDERR %s", result.stderr)
|
||||
raise Exception("shell command failed")
|
||||
return result
|
||||
|
||||
def portprot(arg):
|
||||
port, prot = arg, ""
|
||||
if "/" in arg:
|
||||
port, prot = arg.rsplit("/", 1)
|
||||
if port and port[0] in "0123456789":
|
||||
pass
|
||||
else:
|
||||
import socket
|
||||
if prot:
|
||||
portnum = socket.getservbyname(port, prot)
|
||||
else:
|
||||
portnum = socket.getservbyname(port)
|
||||
port = str(portnum)
|
||||
if not prot:
|
||||
prot = "tcp"
|
||||
return port, prot
|
||||
|
||||
def podman():
|
||||
return "podman" in DOCKER
|
||||
def cleans(text):
|
||||
if podman():
|
||||
return text.replace('": ', '":').replace(', "', ',"').replace(', {', ',{')
|
||||
return text
|
||||
def os_jsonfile(filename):
|
||||
if podman():
|
||||
os.chmod(filename, 0o644)
|
||||
os.utime(filename, (0, 0))
|
||||
|
||||
class ImageName:
|
||||
def __init__(self, image):
|
||||
self.registry = None
|
||||
self.image = image
|
||||
self.version = None
|
||||
self.parse(image)
|
||||
def parse(self, image):
|
||||
parsing = image
|
||||
parts = image.split("/")
|
||||
if ":" in parts[-1] or "@" in parts[-1]:
|
||||
colon = parts[-1].find(":")
|
||||
atref = parts[-1].find("@")
|
||||
if colon >= 0 and atref >= 0:
|
||||
first = min(colon, atref)
|
||||
else:
|
||||
first = max(colon, atref)
|
||||
version = parts[-1][first:]
|
||||
parts[-1] = parts[-1][:first]
|
||||
self.version = version
|
||||
self.image = "/".join(parts)
|
||||
if len(parts) > 1 and ":" in parts[0]:
|
||||
registry = parts[0]
|
||||
parts = parts[1:]
|
||||
self.registry = registry
|
||||
self.image = "/".join(parts)
|
||||
logg.debug("image parsing = %s", parsing)
|
||||
logg.debug(".registry = %s", self.registry)
|
||||
logg.debug(".image = %s", self.image)
|
||||
logg.debug(".version = %s", self.version)
|
||||
def __str__(self):
|
||||
image = self.image
|
||||
if self.registry:
|
||||
image = "/".join([self.registry, image])
|
||||
if self.version:
|
||||
image += self.version
|
||||
return image
|
||||
def tag(self):
|
||||
image = self.image
|
||||
if self.registry:
|
||||
image = "/".join([self.registry, image])
|
||||
if self.version:
|
||||
image += self.version
|
||||
else:
|
||||
image += ":latest"
|
||||
return image
|
||||
def local(self):
|
||||
if not self.registry: return True
|
||||
if "." not in self.registry: return True
|
||||
if "localhost" in self.registry: return True
|
||||
return False
|
||||
def valid(self):
|
||||
return not list(self.problems())
|
||||
def problems(self):
|
||||
# https://docs.docker.com/engine/reference/commandline/tag/
|
||||
# https://github.com/docker/distribution/blob/master/reference/regexp.go
|
||||
if self.registry and self.registry.startswith("["):
|
||||
if len(self.registry) > MAX_NAME:
|
||||
yield "registry name: full name may not be longer than %i characters" % MAX_NAME
|
||||
yield "registry name= " + self.registry
|
||||
x = self.registry.find("]")
|
||||
if not x:
|
||||
yield "registry name: invalid ipv6 number (missing bracket)"
|
||||
yield "registry name= " + self.registry
|
||||
port = self.registry[x + 1:]
|
||||
if port:
|
||||
m = re.match("^:[A-Za-z0-9]+$", port)
|
||||
if not m:
|
||||
yield 'registry name: invalid ipv6 port (only alnum)'
|
||||
yield "registry name= " + port
|
||||
base = self.registry[:x]
|
||||
if not base:
|
||||
yield "registry name: invalid ipv6 number (empty)"
|
||||
else:
|
||||
m = re.match("^[0-9abcdefABCDEF:]*$", base)
|
||||
if not m:
|
||||
yield "registry name: invalid ipv6 number (only hexnum+colon)"
|
||||
yield "registry name= " + base
|
||||
elif self.registry:
|
||||
if len(self.registry) > MAX_NAME:
|
||||
yield "registry name: full name may not be longer than %i characters" % MAX_NAME
|
||||
yield "registry name= " + self.registry
|
||||
registry = self.registry
|
||||
if registry.count(":") > 1:
|
||||
yield "a colon may only be used to designate the port number"
|
||||
yield "registry name= " + registry
|
||||
elif registry.count(":") == 1:
|
||||
registry, port = registry.split(":", 1)
|
||||
m = re.match("^[A-Za-z0-9]+$", port)
|
||||
if not m:
|
||||
yield 'registry name: invalid ipv4 port (only alnum)'
|
||||
yield "registry name= " + registry
|
||||
parts = registry.split(".")
|
||||
if "" in parts:
|
||||
yield "no double dots '..' allowed in registry names"
|
||||
yield "registry name= " + registry
|
||||
for part in parts:
|
||||
if len(part) > MAX_PART:
|
||||
yield "registry name: dot-separated parts may only have %i characters" % MAX_PART
|
||||
yield "registry name= " + part
|
||||
m = re.match("^[A-Za-z0-9-]*$", part)
|
||||
if not m:
|
||||
yield "registry name: dns names may only have alnum+dots+dash"
|
||||
yield "registry name= " + part
|
||||
if part.startswith("-"):
|
||||
yield "registry name: dns name parts may not start with a dash"
|
||||
yield "registry name= " + part
|
||||
if part.endswith("-") and len(part) > 1:
|
||||
yield "registry name: dns name parts may not end with a dash"
|
||||
yield "registry name= " + part
|
||||
if self.image:
|
||||
if len(self.image) > MAX_NAME:
|
||||
yield "image name: should not be longer than %i characters (min path_max)" % MAX_NAME
|
||||
yield "image name= " + self.image
|
||||
if len(self.image) > MAX_PATH:
|
||||
yield "image name: can not be longer than %i characters (limit path_max)" % MAX_PATH
|
||||
yield "image name= " + self.image
|
||||
parts = self.image.split("/")
|
||||
for part in parts:
|
||||
if not part:
|
||||
yield "image name: double slashes are not a good idea"
|
||||
yield "image name= " + part
|
||||
continue
|
||||
if len(part) > MAX_NAME:
|
||||
yield "image name: slash-separated parts should only have %i characters" % MAX_NAME
|
||||
yield "image name= " + part
|
||||
separators = "._-"
|
||||
m = re.match("^[a-z0-9._-]*$", part)
|
||||
if not m:
|
||||
yield "image name: only lowercase+digits+dots+dash+underscore"
|
||||
yield "image name= " + part
|
||||
if part[0] in separators:
|
||||
yield "image name: components may not start with a separator (%s)" % part[0]
|
||||
yield "image name= " + part
|
||||
if part[-1] in separators and len(part) > 1:
|
||||
yield "image name: components may not end with a separator (%s)" % part[-1]
|
||||
yield "image name= " + part
|
||||
elems = part.split(".")
|
||||
if "" in elems:
|
||||
yield "image name: only single dots are allowed, not even double"
|
||||
yield "image name= " + part
|
||||
elems = part.split("_")
|
||||
if len(elems) > 2:
|
||||
for x in xrange(len(elems) - 1):
|
||||
if not elems[x] and not elems[x + 1]:
|
||||
yield "image name: only single or double underscores are allowed"
|
||||
yield "image name= " + part
|
||||
if self.version:
|
||||
if len(self.version) > MAX_VERSION:
|
||||
yield "image version: may not be longer than %i characters" % MAX_VERSION
|
||||
yield "image version= " + self.version
|
||||
if self.version[0] not in ":@":
|
||||
yield "image version: must either be :version or @digest"
|
||||
yield "image version= " + self.version
|
||||
if len(self.version) > 1 and self.version[1] in "-.":
|
||||
yield "image version: may not start with dots or dash"
|
||||
yield "image version= " + self.version
|
||||
version = self.version[1:]
|
||||
if not version:
|
||||
yield "image version: no name provided after '%s'" % self.version[0]
|
||||
yield "image version= " + self.version
|
||||
m = re.match("^[A-Za-z0-9_.-]*$", version)
|
||||
if not m:
|
||||
yield 'image version: only alnum+undescore+dots+dash are allowed'
|
||||
yield "image version= " + self.version
|
||||
|
||||
def edit_image(inp, out, edits):
|
||||
if True:
|
||||
if not inp:
|
||||
logg.error("no FROM value provided")
|
||||
return False
|
||||
if not out:
|
||||
logg.error("no INTO value provided")
|
||||
return False
|
||||
inp_name = ImageName(inp)
|
||||
out_name = ImageName(out)
|
||||
for problem in inp_name.problems():
|
||||
logg.warning("FROM value: %s", problem)
|
||||
for problem in out_name.problems():
|
||||
logg.warning("INTO value: %s", problem)
|
||||
if not out_name.local():
|
||||
logg.warning("output image is not local for the 'docker load' step")
|
||||
else:
|
||||
logg.warning("output image is local (%s)", out_name.registry)
|
||||
inp_tag = inp
|
||||
out_tag = out_name.tag()
|
||||
#
|
||||
tmpdir = TMPDIR
|
||||
if not os.path.isdir(tmpdir):
|
||||
logg.debug("mkdir %s", tmpdir)
|
||||
if OK: os.makedirs(tmpdir)
|
||||
datadir = os.path.join(tmpdir, "data")
|
||||
if not os.path.isdir(datadir):
|
||||
logg.debug("mkdir %s", datadir)
|
||||
if OK: os.makedirs(datadir)
|
||||
inputfile = os.path.join(tmpdir, "saved.tar")
|
||||
outputfile = os.path.join(tmpdir, "ready.tar")
|
||||
inputfile_hints = ""
|
||||
outputfile_hints = ""
|
||||
#
|
||||
docker = DOCKER
|
||||
if KEEPSAVEFILE:
|
||||
if os.path.exists(inputfile):
|
||||
os.remove(inputfile)
|
||||
cmd = "{docker} save {inp} -o {inputfile}"
|
||||
sh(cmd.format(**locals()))
|
||||
cmd = "tar xf {inputfile} -C {datadir}"
|
||||
sh(cmd.format(**locals()))
|
||||
logg.info("new {datadir} from {inputfile}".format(**locals()))
|
||||
else:
|
||||
cmd = "{docker} save {inp} | tar x -f - -C {datadir}"
|
||||
sh(cmd.format(**locals()))
|
||||
logg.info("new {datadir} from {docker} save".format(**locals()))
|
||||
inputfile_hints += " (not created)"
|
||||
run = sh("ls -l {tmpdir}".format(**locals()))
|
||||
logg.debug(run.stdout)
|
||||
#
|
||||
if OK:
|
||||
changed = edit_datadir(datadir, out_tag, edits)
|
||||
if changed:
|
||||
outfile = os.path.realpath(outputfile)
|
||||
cmd = "cd {datadir} && tar cf {outfile} ."
|
||||
sh(cmd.format(**locals()))
|
||||
cmd = "{docker} load -i {outputfile}"
|
||||
sh(cmd.format(**locals()))
|
||||
else:
|
||||
logg.warning("unchanged image from %s", inp_tag)
|
||||
outputfile_hints += " (not created)"
|
||||
if inp != out:
|
||||
cmd = "{docker} tag {inp_tag} {out_tag}"
|
||||
sh(cmd.format(**locals()))
|
||||
logg.warning(" tagged old image as %s", out_tag)
|
||||
#
|
||||
if KEEPDATADIR:
|
||||
logg.warning("keeping %s", datadir)
|
||||
else:
|
||||
if os.path.exists(datadir):
|
||||
shutil.rmtree(datadir)
|
||||
if KEEPINPUTFILE:
|
||||
logg.warning("keeping %s%s", inputfile, inputfile_hints)
|
||||
else:
|
||||
if os.path.exists(inputfile):
|
||||
os.remove(inputfile)
|
||||
if KEEPOUTPUTFILE:
|
||||
logg.warning("keeping %s%s", outputfile, outputfile_hints)
|
||||
else:
|
||||
if os.path.exists(outputfile):
|
||||
os.remove(outputfile)
|
||||
return True
|
||||
|
||||
def edit_datadir(datadir, out, edits):
|
||||
if True:
|
||||
manifest_file = "manifest.json"
|
||||
manifest_filename = os.path.join(datadir, manifest_file)
|
||||
with open(manifest_filename) as _manifest_file:
|
||||
manifest = json.load(_manifest_file)
|
||||
replaced = {}
|
||||
for item in xrange(len(manifest)):
|
||||
config_file = manifest[item]["Config"]
|
||||
config_filename = os.path.join(datadir, config_file)
|
||||
replaced[config_filename] = None
|
||||
#
|
||||
for item in xrange(len(manifest)):
|
||||
config_file = manifest[item]["Config"]
|
||||
config_filename = os.path.join(datadir, config_file)
|
||||
with open(config_filename) as _config_file:
|
||||
config = json.load(_config_file)
|
||||
old_config_text = cleans(json.dumps(config)) # to compare later
|
||||
#
|
||||
for CONFIG in ['config', 'Config', 'container_config']:
|
||||
if CONFIG not in config:
|
||||
logg.debug("no section '%s' in config", CONFIG)
|
||||
continue
|
||||
logg.debug("with %s: %s", CONFIG, config[CONFIG])
|
||||
for action, target, arg in edits:
|
||||
if action in ["remove", "rm"] and target in ["volume", "volumes"]:
|
||||
key = 'Volumes'
|
||||
if not arg:
|
||||
logg.error("can not do edit %s %s without arg: <%s>", action, target, arg)
|
||||
continue
|
||||
elif target in ["volumes"] and arg in ["*", "%"]:
|
||||
args = []
|
||||
try:
|
||||
if key in config[CONFIG] and config[CONFIG][key] is not None:
|
||||
del config[CONFIG][key]
|
||||
logg.warning("done actual config %s %s '%s'", action, target, arg)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no '%s' in %s", key, config_filename)
|
||||
elif target in ["volumes"]:
|
||||
pattern = arg.replace("%", "*")
|
||||
args = []
|
||||
if key in config[CONFIG] and config[CONFIG][key] is not None:
|
||||
for entry in config[CONFIG][key]:
|
||||
if fnmatch(entry, pattern):
|
||||
args += [entry]
|
||||
logg.debug("volume pattern %s -> %s", pattern, args)
|
||||
if not args:
|
||||
logg.warning("%s pattern '%s' did not match anything", target, pattern)
|
||||
elif arg.startswith("/"):
|
||||
args = [arg]
|
||||
else:
|
||||
logg.error("can not do edit %s %s %s", action, target, arg)
|
||||
continue
|
||||
#
|
||||
for arg in args:
|
||||
entry = os.path.normpath(arg)
|
||||
try:
|
||||
if config[CONFIG][key] is None:
|
||||
raise KeyError("null section " + key)
|
||||
del config[CONFIG][key][entry]
|
||||
except KeyError as e:
|
||||
logg.warning("there was no '%s' in '%s' of %s", entry, key, config_filename)
|
||||
if action in ["remove", "rm"] and target in ["port", "ports"]:
|
||||
key = 'ExposedPorts'
|
||||
if not arg:
|
||||
logg.error("can not do edit %s %s without arg: <%s>", action, target, arg)
|
||||
continue
|
||||
elif target in ["ports"] and arg in ["*", "%"]:
|
||||
args = []
|
||||
try:
|
||||
if key in config[CONFIG] and config[CONFIG][key] is not None:
|
||||
del config[CONFIG][key]
|
||||
logg.warning("done actual config %s %s %s", action, target, arg)
|
||||
except KeyError as e:
|
||||
logg.warning("there were no '%s' in %s", key, config_filename)
|
||||
elif target in ["ports"]:
|
||||
pattern = arg.replace("%", "*")
|
||||
args = []
|
||||
if key in config[CONFIG] and config[CONFIG][key] is not None:
|
||||
for entry in config[CONFIG][key]:
|
||||
if fnmatch(entry, pattern):
|
||||
args += [entry]
|
||||
logg.debug("ports pattern %s -> %s", pattern, args)
|
||||
if not args:
|
||||
logg.warning("%s pattern '%s' did not match anything", target, pattern)
|
||||
else:
|
||||
args = [arg]
|
||||
#
|
||||
for arg in args:
|
||||
port, prot = portprot(arg)
|
||||
if not port:
|
||||
logg.error("can not do edit %s %s %s", action, target, arg)
|
||||
return False
|
||||
entry = u"%s/%s" % (port, prot)
|
||||
try:
|
||||
if config[CONFIG][key] is None:
|
||||
raise KeyError("null section " + key)
|
||||
del config[CONFIG][key][entry]
|
||||
logg.info("done rm-port '%s' from '%s'", entry, key)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no '%s' in '%s' of %s", entry, key, config_filename)
|
||||
if action in ["append", "add"] and target in ["volume"]:
|
||||
if not arg:
|
||||
logg.error("can not do edit %s %s without arg: <%s>", action, target, arg)
|
||||
continue
|
||||
key = 'Volumes'
|
||||
entry = os.path.normpath(arg)
|
||||
if config[CONFIG].get(key) is None:
|
||||
config[CONFIG][key] = {}
|
||||
if arg not in config[CONFIG][key]:
|
||||
config[CONFIG][key][entry] = {}
|
||||
logg.info("added %s to %s", entry, key)
|
||||
if action in ["append", "add"] and target in ["port"]:
|
||||
if not arg:
|
||||
logg.error("can not do edit %s %s without arg: <%s>", action, target, arg)
|
||||
continue
|
||||
key = 'ExposedPorts'
|
||||
port, prot = portprot(arg)
|
||||
entry = "%s/%s" % (port, prot)
|
||||
if key not in config[CONFIG]:
|
||||
config[CONFIG][key] = {}
|
||||
if arg not in config[CONFIG][key]:
|
||||
config[CONFIG][key][entry] = {}
|
||||
logg.info("added %s to %s", entry, key)
|
||||
if action in ["set", "set-shell"] and target in ["entrypoint"]:
|
||||
key = 'Entrypoint'
|
||||
try:
|
||||
if not arg:
|
||||
running = None
|
||||
elif action in ["set-shell"]:
|
||||
running = ["/bin/sh", "-c", arg]
|
||||
elif arg.startswith("["):
|
||||
running = json.loads(arg)
|
||||
else:
|
||||
running = [arg]
|
||||
config[CONFIG][key] = running
|
||||
logg.warning("done edit %s %s", action, arg)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no '%s' in %s", key, config_filename)
|
||||
if action in ["set", "set-shell"] and target in ["cmd"]:
|
||||
key = 'Cmd'
|
||||
try:
|
||||
if not arg:
|
||||
running = None
|
||||
elif action in ["set-shell"]:
|
||||
running = ["/bin/sh", "-c", arg]
|
||||
logg.info("%s %s", action, running)
|
||||
elif arg.startswith("["):
|
||||
running = json.loads(arg)
|
||||
else:
|
||||
running = [arg]
|
||||
config[CONFIG][key] = running
|
||||
logg.warning("done edit %s %s", action, arg)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no '%s' in %s", key, config_filename)
|
||||
if action in ["set"] and target in StringConfigs:
|
||||
key = StringConfigs[target]
|
||||
try:
|
||||
if not arg:
|
||||
value = u''
|
||||
else:
|
||||
value = arg
|
||||
if key in config[CONFIG]:
|
||||
if config[CONFIG][key] == value:
|
||||
logg.warning("unchanged config '%s' %s", key, value)
|
||||
else:
|
||||
config[CONFIG][key] = value
|
||||
logg.warning("done edit config '%s' %s", key, value)
|
||||
else:
|
||||
config[CONFIG][key] = value
|
||||
logg.warning("done new config '%s' %s", key, value)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no config %s in %s", target, config_filename)
|
||||
if action in ["set"] and target in StringMeta:
|
||||
key = StringMeta[target]
|
||||
try:
|
||||
if not arg:
|
||||
value = u''
|
||||
else:
|
||||
value = arg
|
||||
if key in config:
|
||||
if config[key] == value:
|
||||
logg.warning("unchanged meta '%s' %s", key, value)
|
||||
else:
|
||||
config[key] = value
|
||||
logg.warning("done edit meta '%s' %s", key, value)
|
||||
else:
|
||||
config[key] = value
|
||||
logg.warning("done new meta '%s' %s", key, value)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no meta %s in %s", target, config_filename)
|
||||
if action in ["set-label"]:
|
||||
key = "Labels"
|
||||
try:
|
||||
value = arg or u''
|
||||
if key not in config[CONFIG]:
|
||||
config[CONFIG][key] = {}
|
||||
if target in config[CONFIG][key]:
|
||||
if config[CONFIG][key][target] == value:
|
||||
logg.warning("unchanged label '%s' %s", target, value)
|
||||
else:
|
||||
config[CONFIG][key][target] = value
|
||||
logg.warning("done edit label '%s' %s", target, value)
|
||||
else:
|
||||
config[CONFIG][key][target] = value
|
||||
logg.warning("done new label '%s' %s", target, value)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no config %s in %s", target, config_filename)
|
||||
if action in ["remove-label", "rm-label"]:
|
||||
if not target:
|
||||
logg.error("can not do edit %s without arg: <%s>", action, target)
|
||||
continue
|
||||
key = "Labels"
|
||||
try:
|
||||
if key in config[CONFIG]:
|
||||
if config[CONFIG][key] is None:
|
||||
raise KeyError("null section " + key)
|
||||
del config[CONFIG][key][target]
|
||||
logg.warning("done actual %s %s ", action, target)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no label %s in %s", target, config_filename)
|
||||
if action in ["remove-labels", "rm-labels"]:
|
||||
if not target:
|
||||
logg.error("can not do edit %s without arg: <%s>", action, target)
|
||||
continue
|
||||
key = "Labels"
|
||||
try:
|
||||
pattern = target.replace("%", "*")
|
||||
args = []
|
||||
if key in config[CONFIG] and config[CONFIG][key] is not None:
|
||||
for entry in config[CONFIG][key]:
|
||||
if fnmatch(entry, pattern):
|
||||
args += [entry]
|
||||
for arg in args:
|
||||
del config[CONFIG][key][arg]
|
||||
logg.warning("done actual %s %s (%s)", action, target, arg)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no label %s in %s", target, config_filename)
|
||||
if action in ["remove-envs", "rm-envs"]:
|
||||
if not target:
|
||||
logg.error("can not do edit %s without arg: <%s>", action, target)
|
||||
continue
|
||||
key = "Env"
|
||||
try:
|
||||
pattern = target.strip() + "=*"
|
||||
pattern = pattern.replace("%", "*")
|
||||
found = []
|
||||
if key in config[CONFIG] and config[CONFIG][key] is not None:
|
||||
for n, entry in enumerate(config[CONFIG][key]):
|
||||
if fnmatch(entry, pattern):
|
||||
found += [n]
|
||||
for n in reversed(found):
|
||||
del config[CONFIG][key][n]
|
||||
logg.warning("done actual %s %s (%s)", action, target, n)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no label %s in %s", target, config_filename)
|
||||
if action in ["remove-env", "rm-env"]:
|
||||
if not target:
|
||||
logg.error("can not do edit %s without arg: <%s>", action, target)
|
||||
continue
|
||||
key = "Env"
|
||||
try:
|
||||
if "=" in target:
|
||||
pattern = target.strip()
|
||||
else:
|
||||
pattern = target.strip() + "=*"
|
||||
found = []
|
||||
if key in config[CONFIG] and config[CONFIG][key] is not None:
|
||||
for n, entry in enumerate(config[CONFIG][key]):
|
||||
if fnmatch(entry, pattern):
|
||||
found += [n]
|
||||
for n in reversed(found):
|
||||
del config[CONFIG][key][n]
|
||||
logg.warning("done actual %s %s (%s)", action, target, n)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no label %s in %s", target, config_filename)
|
||||
if action in ["remove-healthcheck", "rm-healthcheck"]:
|
||||
key = "Healthcheck"
|
||||
try:
|
||||
del config[CONFIG][key]
|
||||
logg.warning("done actual %s %s", action, target)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no %s in %s", key, config_filename)
|
||||
if action in ["set-envs"]:
|
||||
if not target:
|
||||
logg.error("can not do edit %s without arg: <%s>", action, target)
|
||||
continue
|
||||
key = "Env"
|
||||
try:
|
||||
if "=" in target:
|
||||
pattern = target.strip().replace("%", "*")
|
||||
else:
|
||||
pattern = target.strip().replace("%", "*") + "=*"
|
||||
if key not in config[CONFIG]:
|
||||
config[key] = {}
|
||||
found = []
|
||||
for n, entry in enumerate(config[CONFIG][key]):
|
||||
if fnmatch(entry, pattern):
|
||||
found += [n]
|
||||
if found:
|
||||
for n in reversed(found):
|
||||
oldvalue = config[CONFIG][key][n]
|
||||
varname = oldvalue.split("=", 1)[0]
|
||||
newvalue = varname + "=" + (arg or u'')
|
||||
if config[CONFIG][key][n] == newvalue:
|
||||
logg.warning("unchanged var '%s' %s", target, newvalue)
|
||||
else:
|
||||
config[CONFIG][key][n] = newvalue
|
||||
logg.warning("done edit var '%s' %s", target, newvalue)
|
||||
elif "=" in target or "*" in target or "%" in target or "?" in target or "[" in target:
|
||||
logg.info("non-existing var pattern '%s'", target)
|
||||
else:
|
||||
value = target.strip() + "=" + (arg or u'')
|
||||
config[CONFIG][key] += [pattern + value]
|
||||
logg.warning("done new var '%s' %s", target, value)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no config %s in %s", target, config_filename)
|
||||
if action in ["set-env"]:
|
||||
if not target:
|
||||
logg.error("can not do edit %s without arg: <%s>", action, target)
|
||||
continue
|
||||
key = "Env"
|
||||
try:
|
||||
pattern = target.strip() + "="
|
||||
if key not in config[CONFIG]:
|
||||
config[key] = {}
|
||||
found = []
|
||||
for n, entry in enumerate(config[CONFIG][key]):
|
||||
if entry.startswith(pattern):
|
||||
found += [n]
|
||||
if found:
|
||||
for n in reversed(found):
|
||||
oldvalue = config[CONFIG][key][n]
|
||||
varname = oldvalue.split("=", 1)[0]
|
||||
newvalue = varname + "=" + (arg or u'')
|
||||
if config[CONFIG][key][n] == newvalue:
|
||||
logg.warning("unchanged var '%s' %s", target, newvalue)
|
||||
else:
|
||||
config[CONFIG][key][n] = newvalue
|
||||
logg.warning("done edit var '%s' %s", target, newvalue)
|
||||
elif "=" in target or "*" in target or "%" in target or "?" in target or "[" in target:
|
||||
logg.info("may not use pattern characters in env variable '%s'", target)
|
||||
else:
|
||||
value = target.strip() + "=" + (arg or u'')
|
||||
config[CONFIG][key] += [pattern + value]
|
||||
logg.warning("done new var '%s' %s", target, value)
|
||||
except KeyError as e:
|
||||
logg.warning("there was no config %s in %s", target, config_filename)
|
||||
logg.debug("done %s: %s", CONFIG, config[CONFIG])
|
||||
new_config_text = cleans(json.dumps(config))
|
||||
if new_config_text != old_config_text:
|
||||
for CONFIG in ['history']:
|
||||
if CONFIG in config:
|
||||
myself = os.path.basename(sys.argv[0])
|
||||
config[CONFIG] += [{"empty_layer": True,
|
||||
"created_by": "%s #(%s)" % (myself, __version__),
|
||||
"created": datetime.datetime.utcnow().isoformat() + "Z"}]
|
||||
new_config_text = cleans(json.dumps(config))
|
||||
new_config_md = hashlib.sha256()
|
||||
new_config_md.update(new_config_text.encode("utf-8"))
|
||||
for collision in xrange(1, MAX_COLLISIONS):
|
||||
new_config_hash = new_config_md.hexdigest()
|
||||
new_config_file = "%s.json" % new_config_hash
|
||||
new_config_filename = os.path.join(datadir, new_config_file)
|
||||
if new_config_filename in replaced.keys() or new_config_filename in replaced.values():
|
||||
logg.info("collision %s %s", collision, new_config_filename)
|
||||
new_config_md.update(" ".encode("utf-8"))
|
||||
continue
|
||||
break
|
||||
with open(new_config_filename, "wb") as fp:
|
||||
fp.write(new_config_text.encode("utf-8"))
|
||||
logg.info("written new %s", new_config_filename)
|
||||
logg.info("removed old %s", config_filename)
|
||||
os_jsonfile(new_config_filename)
|
||||
#
|
||||
manifest[item]["Config"] = new_config_file
|
||||
replaced[config_filename] = new_config_filename
|
||||
else:
|
||||
logg.info(" unchanged %s", config_filename)
|
||||
#
|
||||
if manifest[item]["RepoTags"]:
|
||||
manifest[item]["RepoTags"] = [out]
|
||||
manifest_text = cleans(json.dumps(manifest))
|
||||
manifest_filename = os.path.join(datadir, manifest_file)
|
||||
# report the result
|
||||
with open(manifest_filename + ".tmp", "wb") as fp:
|
||||
fp.write(manifest_text.encode("utf-8"))
|
||||
if podman():
|
||||
if os.path.isfile(manifest_filename + ".old"):
|
||||
os.remove(manifest_filename + ".old")
|
||||
os_jsonfile(manifest_filename)
|
||||
os.rename(manifest_filename, manifest_filename + ".old")
|
||||
os.rename(manifest_filename + ".tmp", manifest_filename)
|
||||
changed = 0
|
||||
for a, b in replaced.items():
|
||||
if b:
|
||||
changed += 1
|
||||
logg.debug("replaced\n\t old %s\n\t new %s", a, b)
|
||||
else:
|
||||
logg.debug("unchanged\n\t old %s", a)
|
||||
logg.debug("updated\n\t --> %s", manifest_filename)
|
||||
logg.debug("changed %s layer metadata", changed)
|
||||
return changed
|
||||
|
||||
def parsing(args):
|
||||
inp = None
|
||||
out = None
|
||||
action = None
|
||||
target = None
|
||||
commands = []
|
||||
known_set_targets = list(StringCmd.keys()) + list(StringConfigs.keys()) + list(StringMeta.keys())
|
||||
for n in xrange(len(args)):
|
||||
arg = args[n]
|
||||
if target is not None:
|
||||
if target.lower() in ["all"]:
|
||||
# remove all ports => remove ports *
|
||||
commands.append((action, arg.lower(), "*"))
|
||||
elif action in ["set", "set-shell"] and target.lower() in ["null", "no"]:
|
||||
# set null cmd => set cmd <none>
|
||||
if arg.lower() not in known_set_targets:
|
||||
logg.error("bad edit command: %s %s %s", action, target, arg)
|
||||
commands.append((action, arg.lower(), None))
|
||||
elif action in ["set", "set-shell"] and target.lower() in known_set_targets:
|
||||
# set cmd null => set cmd <none>
|
||||
if arg.lower() in [NULL.lower(), NULL.upper()]:
|
||||
logg.info("do not use '%s %s %s' - use 'set null %s'", action, target, arg, target.lower())
|
||||
commands.append((action, target.lower(), None))
|
||||
elif arg.lower() in ['']:
|
||||
logg.error("do not use '%s %s %s' - use 'set null %s'", action, target, '""', target.lower())
|
||||
logg.warning("we assume <null> here but that will change in the future")
|
||||
commands.append((action, target.lower(), None))
|
||||
else:
|
||||
commands.append((action, target.lower(), arg))
|
||||
else:
|
||||
commands.append((action, target, arg))
|
||||
action, target = None, None
|
||||
continue
|
||||
if action is None:
|
||||
if arg in ["and", "+", ",", "/"]:
|
||||
continue
|
||||
action = arg.lower()
|
||||
continue
|
||||
rm_labels = ["rm-label", "remove-label", "rm-labels", "remove-labels"]
|
||||
rm_vars = ["rm-var", "remove-var", "rm-vars", "remove-vars"]
|
||||
rm_envs = ["rm-env", "remove-env", "rm-envs", "remove-envs"]
|
||||
if action in (rm_labels + rm_vars + rm_envs):
|
||||
target = arg
|
||||
commands.append((action, target, None))
|
||||
action, target = None, None
|
||||
continue
|
||||
#
|
||||
if action in ["set"] and arg.lower() in ["shell", "label", "labels", "var", "vars", "env", "envs"]:
|
||||
action = "%s-%s" % (action, arg.lower())
|
||||
continue
|
||||
if action in ["rm", "remove"] and arg.lower() in ["label", "labels", "var", "vars", "env", "envs"]:
|
||||
action = "%s-%s" % (action, arg.lower())
|
||||
continue
|
||||
if action in ["rm", "remove"] and arg.lower() in ["healthcheck"]:
|
||||
action = "%s-%s" % (action, arg.lower())
|
||||
commands.append((action, None, None))
|
||||
action, target = None, None
|
||||
continue
|
||||
if action in ["from"]:
|
||||
inp = arg
|
||||
action = None
|
||||
continue
|
||||
elif action in ["into"]:
|
||||
out = arg
|
||||
action = None
|
||||
continue
|
||||
elif action in ["remove", "rm"]:
|
||||
if arg.lower() in ["volume", "port", "all", "volumes", "ports"]:
|
||||
target = arg.lower()
|
||||
continue
|
||||
logg.error("unknown edit command starting with %s %s", action, arg)
|
||||
return None, None, []
|
||||
elif action in ["append", "add"]:
|
||||
if arg.lower() in ["volume", "port"]:
|
||||
target = arg.lower()
|
||||
continue
|
||||
logg.error("unknown edit command starting with %s %s", action, arg)
|
||||
return None, None, []
|
||||
elif action in ["set", "override"]:
|
||||
if arg.lower() in known_set_targets:
|
||||
target = arg.lower()
|
||||
continue
|
||||
if arg.lower() in ["null", "no"]:
|
||||
target = arg.lower()
|
||||
continue # handled in "all" / "no" case
|
||||
logg.error("unknown edit command starting with %s %s", action, arg)
|
||||
return None, None, []
|
||||
elif action in ["set-shell"]:
|
||||
if arg.lower() in StringCmd:
|
||||
target = arg.lower()
|
||||
continue
|
||||
logg.error("unknown edit command starting with %s %s", action, arg)
|
||||
return None, None, []
|
||||
elif action in ["set-label", "set-var", "set-env", "set-envs"]:
|
||||
target = arg
|
||||
continue
|
||||
else:
|
||||
logg.error("unknown edit command starting with %s", action)
|
||||
return None, None, []
|
||||
if not inp:
|
||||
logg.error("no input image given - use 'FROM image-name'")
|
||||
return None, None, []
|
||||
if not out:
|
||||
logg.error("no output image given - use 'INTO image-name'")
|
||||
return None, None, []
|
||||
return inp, out, commands
|
||||
|
||||
def docker_tag(inp, out):
|
||||
docker = DOCKER
|
||||
if inp and out and inp != out:
|
||||
cmd = "{docker} tag {inp} {out}"
|
||||
logg.info("%s", cmd)
|
||||
sh("{docker} tag {inp} {out}".format(**locals()), check=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from optparse import OptionParser
|
||||
cmdline = OptionParser("%prog input-image output-image [commands...]")
|
||||
cmdline.add_option("-T", "--tmpdir", metavar="DIR", default=TMPDIR,
|
||||
help="use this base temp dir %s [%default]")
|
||||
cmdline.add_option("-D", "--docker", metavar="DIR", default=DOCKER,
|
||||
help="use another docker container tool %s [%default]")
|
||||
cmdline.add_option("-k", "--keepdir", action="count", default=KEEPDIR,
|
||||
help="keep the unpacked dirs [%default]")
|
||||
cmdline.add_option("-v", "--verbose", action="count", default=0,
|
||||
help="increase logging level [%default]")
|
||||
cmdline.add_option("-z", "--dryrun", action="store_true", default=not OK,
|
||||
help="only run logic, do not change anything [%default]")
|
||||
cmdline.add_option("--with-null", metavar="name", default=NULL,
|
||||
help="specify the special value for disable [%default]")
|
||||
cmdline.add_option("-c", "--config", metavar="NAME=VAL", action="append", default=[],
|
||||
help="..override internal variables (MAX_PATH) {%default}")
|
||||
opt, args = cmdline.parse_args()
|
||||
logging.basicConfig(level=max(0, logging.ERROR - 10 * opt.verbose))
|
||||
TMPDIR = opt.tmpdir
|
||||
DOCKER = opt.docker
|
||||
KEEPDIR = opt.keepdir
|
||||
OK = not opt.dryrun
|
||||
NULL = opt.with_null
|
||||
if KEEPDIR >= 1:
|
||||
KEEPDATADIR = True
|
||||
if KEEPDIR >= 2:
|
||||
KEEPSAVEFILE = True
|
||||
if KEEPDIR >= 3:
|
||||
KEEPINPUTFILE = True
|
||||
if KEEPDIR >= 4:
|
||||
KEEPOUTPUTFILE = True
|
||||
########################################
|
||||
for setting in opt.config:
|
||||
nam, val = setting, "1"
|
||||
if "=" in setting:
|
||||
nam, val = setting.split("=", 1)
|
||||
elif nam.startswith("no-") or nam.startswith("NO-"):
|
||||
nam, val = nam[3:], "0"
|
||||
elif nam.startswith("No") or nam.startswith("NO"):
|
||||
nam, val = nam[2:], "0"
|
||||
if nam in globals():
|
||||
old = globals()[nam]
|
||||
if old is False or old is True:
|
||||
logg.debug("yes %s=%s", nam, val)
|
||||
globals()[nam] = (val in ("true", "True", "TRUE", "yes", "y", "Y", "YES", "1"))
|
||||
elif isinstance(old, float):
|
||||
logg.debug("num %s=%s", nam, val)
|
||||
globals()[nam] = float(val)
|
||||
elif isinstance(old, int):
|
||||
logg.debug("int %s=%s", nam, val)
|
||||
globals()[nam] = int(val)
|
||||
elif isinstance(old, str):
|
||||
logg.debug("str %s=%s", nam, val)
|
||||
globals()[nam] = val.strip()
|
||||
else:
|
||||
logg.warning("(ignored) unknown target type -c '%s' : %s", nam, type(old))
|
||||
else:
|
||||
logg.warning("(ignored) unknown target config -c '%s' : no such variable", nam)
|
||||
########################################
|
||||
if len(args) < 2:
|
||||
logg.error("not enough arguments, use --help")
|
||||
else:
|
||||
inp, out, commands = parsing(args)
|
||||
if not commands:
|
||||
logg.warning("nothing to do for %s", out)
|
||||
docker_tag(inp, out)
|
||||
else:
|
||||
if opt.dryrun:
|
||||
oldlevel = logg.level
|
||||
logg.level = logging.INFO
|
||||
logg.info(" | from %s into %s", inp, out)
|
||||
for action, target, arg in commands:
|
||||
if arg is None:
|
||||
arg = "<null>"
|
||||
else:
|
||||
arg = "'%s'" % arg
|
||||
logg.info(" | %s %s %s", action, target, arg)
|
||||
logg.level = oldlevel
|
||||
edit_image(inp, out, commands)
|
|
@ -7,7 +7,7 @@
|
|||
# See the accompanying LICENSE file, if present, or visit:
|
||||
# https://opensource.org/licenses/MIT
|
||||
#######################################################################
|
||||
VERSION="v0.7.0"
|
||||
VERSION="v0.7.1"
|
||||
#######################################################################
|
||||
# Bash-TPL: A Smart, Lightweight shell script templating engine
|
||||
#
|
||||
|
@ -1289,6 +1289,8 @@ function main() {
|
|||
fi
|
||||
|
||||
process_stdin
|
||||
|
||||
return 0 # ALL OK
|
||||
}
|
||||
|
||||
# Only process main logic if not being sourced (ie tested)
|
||||
|
|
|
@ -146,6 +146,7 @@ local distro; local distros
|
|||
if docker create --name dummy $1 > /dev/null; then
|
||||
if docker cp -L dummy:/etc/os-release $temp > /dev/null; then
|
||||
docker rm -f dummy > /dev/null
|
||||
# echo $(load_csv $BDIR/distros.csv)
|
||||
distros=$(echo $(echo "$(load_csv $BDIR/distros.csv)" | grep -Eo "^[^,]+") | sed "s/\s/|/g")
|
||||
distro=$(cat $temp | tr [:upper:] [:lower:] | grep -Eio -m 1 $distros)
|
||||
rm $temp
|
||||
|
@ -211,10 +212,21 @@ echo $([[ $RUSER ]] && echo ${RUSER}/)${NAME}$arch
|
|||
get_build_src () {
|
||||
# processing the build source directory
|
||||
local src; local spath; local spaths
|
||||
|
||||
# will determine if there are any minimal build source files/directories
|
||||
check_dir () {
|
||||
[[ -f $1/init/init.sh ]] || return 1
|
||||
[[ ( -f $1/packages/packages.lst || -f $1/packages/packages.sh ) ]] && _packages_=true
|
||||
if
|
||||
[ $(ls $1/packages/*system.pkgs 2> /dev/null) ] || \
|
||||
[ $(ls $1/packages/system/*.pkgs 2> /dev/null) ] || \
|
||||
[ -f $1/packages/repositories.sh ] || \
|
||||
[ -f $1/packages/packages.sh ]
|
||||
then
|
||||
_packages_=true
|
||||
return 0
|
||||
fi
|
||||
[[ -f $1/init/init.sh ]] && return 0
|
||||
[[ -d $1/rootfs ]] && return 0
|
||||
return 1
|
||||
}
|
||||
|
||||
src=${1:-$BUILD_SRC}
|
||||
|
@ -255,14 +267,15 @@ if [[ $VERBOSE ]]; then
|
|||
echo -e "\n---------------------------------"
|
||||
echo "build source at $BUILD_SRC to be mounted to /build in container ***** "
|
||||
ls -la $BUILD_SRC
|
||||
echo -e "\n----- base init script init.sh ------"
|
||||
echo -e "\n----- base init script init.sh ------\n"
|
||||
cat $BUILD_SRC/init/init.sh
|
||||
echo -e "\n----- end base init script init.sh ------"
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
|
||||
fi
|
||||
|
||||
echo -e "\e[1;37m**************BUILD PARAMETERS *******************************"
|
||||
echo "Architecture of this machine doing the building: $ARCH"
|
||||
if [[ $BUILD_SRC="_core_" ]] ;then
|
||||
if [[ "$BUILD_SRC" == "_core_" ]] ;then
|
||||
echo Building ONLY the UCI core
|
||||
else
|
||||
echo "Using scripts source directory at $BUILD_SRC"
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
#!/bin/bash
|
||||
|
||||
base_image_alter () {
|
||||
|
||||
local efile; local dry_run
|
||||
|
||||
declare OPTION; declare OPTARG; declare OPTIND
|
||||
OPTIND=0
|
||||
while getopts "de" OPTION; do
|
||||
# echo processing: option:$OPTION argument:$OPTARG index:$OPTIND remaining:${@:$OPTIND}
|
||||
case "$OPTION" in
|
||||
e)
|
||||
efile=$OPTARG
|
||||
;;
|
||||
d)
|
||||
dry_run="echo "
|
||||
;;
|
||||
*) echo unknown base image alter option -$OPTARG
|
||||
echo "USAGE: base_image_alter <options>"
|
||||
echo "available options: "
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
BASE_IMAGE=${1:-$BASE_IMAGE}
|
||||
|
||||
if [[ ! $BASE_IMAGE ]]; then
|
||||
echo attempting to getting base image name from environment file
|
||||
source_env_file $efile
|
||||
[[ ! $BASE_IMAGE ]] && BASE_IMAGE=$(get_default_distro_image)
|
||||
fi
|
||||
|
||||
[[ ! $BASE_IMAGE ]] && echo unable to determine base image && return 1
|
||||
|
||||
echo $BASE_IMAGE will be altered with: $BASE_IMAGE_ALTER
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
@ -81,6 +81,9 @@ if ! docker image push $target > /dev/null 2>&1; then
|
|||
echo ERROR: unable to push $source2 to repository at $1 as $target
|
||||
fi
|
||||
|
||||
# TODO if need bee
|
||||
# docker login -u="${DOCKER_USERNAME}" -p="${DOCKER_PASSWORD}" ${private_registry_domain}
|
||||
|
||||
if [[ $PULL == downloaded ]]; then
|
||||
echo removing $source2 downloaded from hub.docker.com docker
|
||||
docker image rm $source2 > /dev/null 2>&1
|
||||
|
|
|
@ -56,7 +56,7 @@ try_container () {
|
|||
mp=$OPTARG
|
||||
;;
|
||||
o)
|
||||
options=$OPTARG
|
||||
options="$OPTARG"
|
||||
;;
|
||||
h)
|
||||
hmp=$OPTARG
|
||||
|
@ -144,7 +144,7 @@ END
|
|||
fi
|
||||
dcmd=$( tr "\n" " " <<-END
|
||||
docker run -i $([[ ! $script ]] && echo -t)
|
||||
--rm $priv $evar $options ${entrypoint} ${evnf}
|
||||
--rm $priv $evar $options ${entrypoint} ${envf}
|
||||
$([[ $cuser ]] && echo --user $cuser)
|
||||
--name try-$name --hostname try-$host-$name
|
||||
$([[ $mp ]] && echo -v $vname:/$mp)
|
||||
|
|
|
@ -1 +1 @@
|
|||
../core/opt/lib/distros.csv
|
||||
/data/Hacking/computing/docker/uci-docker-build/core/rootfs/opt/lib/distros.csv
|
|
|
@ -30,3 +30,5 @@ One can make images in one of two ways.
|
|||
|
||||
It is recommended to do the later.
|
||||
|
||||
|
||||
Supported distros are found in distros.csv in the root of the repository. Do NOT delete this file. It is possible to add other distros. This file links itself into lib/ and core/opt/lib
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
[[ $1 = "-f" ]] && force=force && shift
|
||||
if [[ ! $(udbuild image exists -e test.env) || $force ]] ; then
|
||||
echo $force building test image
|
||||
# udbuild -p -e test.env -n
|
||||
udbuild -e test.env
|
||||
else
|
||||
echo using existing image, use -f to force rebuild
|
||||
fi
|
||||
|
|
@ -1,2 +1,4 @@
|
|||
export VOLUME_DIRS="/opt/bin"
|
||||
# export HOST_MAP="1001:1001"
|
||||
# export VOLUME_DIRS="/opt/bin"
|
||||
export UCI_SHELL=true
|
||||
export USER=host
|
||||
export DEFAULT_DIR=/opt/bin
|
|
@ -0,0 +1,3 @@
|
|||
echo Running init script for test
|
||||
echo current environment
|
||||
env
|
|
@ -0,0 +1 @@
|
|||
echo running a custom package script
|
|
@ -0,0 +1 @@
|
|||
wget
|
|
@ -0,0 +1 @@
|
|||
jq
|
|
@ -0,0 +1 @@
|
|||
export DEFAULT_DIR=/opt/bin
|
|
@ -1,4 +1,5 @@
|
|||
# if SYSADMIN_PW is set a sysadmin user with uid of 1001 will be creted
|
||||
VERBOSE=true
|
||||
# if SYSADMIN_PW is set a sysadmin user with UHID of 1001 will be creted
|
||||
# SYSADMIN_PW=ucommandit
|
||||
# default is alpine
|
||||
# LINUX_DISTRO=alpine
|
||||
|
@ -8,4 +9,6 @@ RUSER=testing
|
|||
# default is default target, use dev for running a test container after build
|
||||
# TARGET=dev
|
||||
# by default will look in PWD directory then parent
|
||||
BUILD_SRC=../src
|
||||
# BUILD_SRC=src
|
||||
# APPEND_BUILD_ENV=./build.env
|
||||
|
||||
|
|
Loading…
Reference in New Issue