code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
mkdir -p data pdfmaps
for pdffile in pdfs/*.pdf; do
echo "WORKING ON $pdffile ..."
pdftohtml -xml "$pdffile" > /dev/null
xmlfile=$(echo $pdffile | sed 's/\.pdf$/.xml/')
# draw maps
./parse_xml_from_pdfs.py "$xmlfile" 1
csvfile=$(echo $pdffile | sed 's/\.pdf$/.csv/' | sed 's#pdfs/#data/#')
./parse_xml_from_pdfs.py "$xmlfile" > "$csvfile"
done
| boogheta/various_scrapers | concours-ESR-PU-MC/parse_pdfs.sh | Shell | lgpl-3.0 | 373 |
#!/bin/sh
rm papart/src/Papart-examples
| poqudrof/PapARt | removeExamples.sh | Shell | lgpl-3.0 | 41 |
#!/bin/sh
cd `dirname $0`
. ./config.sh
. ./classpath.sh
exec java ${PANFMP_TOOLS_JAVA_OPTIONS} \
-Dlog4j.configuration="file:${PANFMP_TOOLS_LOG4J_CONFIG}" \
de.pangaea.metadataportal.harvester.Harvester \
"${PANFMP_CONFIG}" "$@"
| AuScope/NatPortalIGSN | panFMP/panFMP-1.1.0/scripts/harvest.sh | Shell | lgpl-3.0 | 233 |
#!/opt/local/bin/zsh
$GOPATH/github.com/gogits/gogs/gogs web & | xj9/smf-manifests | gogs/start-gogs.sh | Shell | unlicense | 63 |
function wikipedia {(
local lang="en"
while getopts ":l:" opt; do
case $opt in
l)
lang="$OPTARG"
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ));
local query=$(rawurlencode "${*:1}")
lynx "http://${lang}.wikipedia.org/w/index.php?search=${query}"
)}
| Quarkex/dotfiles | functions/wikipedia.sh | Shell | unlicense | 553 |
#!/usr/bin/env zsh
rm -f /home/tunnel/Documents/Programs/MDPDF/cool.pdf && rm -f /home/tunnel/Documents/Programs/MDPDF/cool.html && pandoc -f markdown -t html5 $1 -o /home/tunnel/Documents/Programs/MDPDF/cool.html && echo '<link rel="stylesheet" type="text/css" href="/home/tunnel/Documents/Programs/MDPDF/tufte.css">' | cat - /home/tunnel/Documents/Programs/MDPDF/cool.html | wkhtmltopdf - "/home/tunnel/Documents/Programs/MDPDF/cool.pdf" && pkill mupdf || true && mupdf /home/tunnel/Documents/Programs/MDPDF/cool.pdf &
| Multipixelone/dotfiles | MDPDF/Pandoc.sh | Shell | unlicense | 522 |
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is called by a watchdog trigger to shutdown the user pod
# by calling the DELETE method on the pod broker.
echo "INFO: Shutting down ${APP_NAME?} pod for user ${POD_USER?} through pod broker" >&2
ID_TOKEN=$(curl -s -f -H "Metadata-Flavor: Google" "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience=${CLIENT_ID?}&format=full")
curl -s -f -H "Cookie: ${BROKER_COOKIE?}" -H "Authorization: Bearer ${ID_TOKEN}" -X DELETE ${BROKER_ENDPOINT?}/${APP_NAME?}/ | GoogleCloudPlatform/selkies-examples | vdi-vm/images/guacd/idle_shutdown.sh | Shell | apache-2.0 | 1,092 |
#!/bin/sh
#
#
# Copyright 2005 i-Konect LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
DM_CONFIG=../conf/start-transient-DeviceManager.config
CM_CONFIG=../conf/start-transient-ConfigManager.config
LU_CONFIG=../conf/jrmp-reggie.config
if [ "$JAVA_HOME"=="" ] ; then
JAVA_HOME=/usr/java
fi
JAVA_HOME=$JAVA_HOME
JAVA_BIN=$JAVA_HOME/bin
JAVA_LIB_EXT=/usr/java/jre/lib/ext
JINI_HOME=$DM_HOME/jini-home
DM_HTTP_PORT=8081
JAVA_OPTS="-server -cp ../conf -Djava.security.manager \
-Djava.security.policy=../conf/service.policy \
-Dcom.sun.management.jmxremote \
-Dorg.firstopen.singularity.httpd.port=8081 \
-Dorg.firstopen.singularity.home=.. \
-Dorg.firstopen.singularity.classserver=`hostname` \
-Djava.ext.dirs=$JAVA_HOME/lib/ext:../lib"
SERVICE_CMD=$JAVA_BIN/java
export SERVICE_CMD JAVA_HOME JAVA_BIN JAVA_LIB_EXT JAVA_OPTS DM_HTTP BASE
| tomrose/singularity | install/common/linux/bin/common.sh | Shell | apache-2.0 | 1,411 |
#!/bin/bash
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
set -e
$VALGRIND ./searchlib_changevector_test_app
rm -rf *.dat
rm -rf *.idx
rm -rf *.weight
rm -rf clstmp
rm -rf alstmp
| vespa-engine/vespa | searchlib/src/tests/attribute/changevector/changevector_test.sh | Shell | apache-2.0 | 237 |
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
## Get the directory of the build script
scriptDir=$(realpath $(dirname "${BASH_SOURCE[0]}"))
## cd to the parent directory, i.e. the root of the git repo
cd ${scriptDir}/..
# include common functions
source ${scriptDir}/common.sh
# Print out Java version
java -version
echo ${JOB_TYPE}
RETURN_CODE=0
set +e
git submodule update --init
case ${JOB_TYPE} in
test)
./mvnw test
RETURN_CODE=$?
;;
lint)
./mvnw com.coveo:fmt-maven-plugin:check
RETURN_CODE=$?
if [[ ${RETURN_CODE} != 0 ]]; then
echo "To fix formatting errors, run: mvn com.coveo:fmt-maven-plugin:format"
fi
;;
esac
echo "exiting with ${RETURN_CODE}"
exit ${RETURN_CODE}
| google/exposure-notifications-private-analytics-ingestion | ci/build.sh | Shell | apache-2.0 | 1,280 |
#!/bin/bash
LANGPACKSFOLDER='../../moodle-langpacks'
function check_success {
if [ $? -ne 0 ]; then
print_error "$1"
elif [ "$#" -gt 1 ]; then
print_ok "$2"
fi
}
function print_success {
if [ $? -ne 0 ]; then
print_message "$1"
$3=0
else
print_ok "$2"
fi
}
function print_error {
tput setaf 1; echo " ERROR: $1"
}
function print_ok {
tput setaf 2; echo " OK: $1"
echo
}
function print_message {
tput setaf 3; echo "-------- $1"
echo
}
function print_title {
stepnumber=$(($stepnumber + 1))
echo
tput setaf 5; echo "$stepnumber $1"
tput setaf 5; echo '=================='
} | saman222/moodlemobile2 | scripts/functions.sh | Shell | apache-2.0 | 687 |
#!/bin/bash
set -e
if [[ $HACL_HOME == "" ]]; then
echo "Usage: HACL_HOME=<DIRECTORY> $0"
echo "Please set a suitable value for HACL_HOME"
exit 1
fi
echo -n "Will install vale into $(realpath $HACL_HOME/..)/vale, hit Ctrl-C to abort"
sleep 1
echo -n .
sleep 1
echo -n .
sleep 1
echo .
cd $HACL_HOME/..
if [ ! -d vale ]; then
mkdir vale
fi
vale_version=$(<$HACL_HOME/vale/.vale_version)
vale_version=${vale_version%$'\r'} # remove Windows carriage return, if it exists
old_vale_version=none
if [ -e vale/bin/.vale_version ]; then
old_vale_version=$(<vale/bin/.vale_version)
old_vale_version=${old_vale_version%$'\r'} # remove Windows carriage return, if it exists
fi
if [ $vale_version != $old_vale_version ]; then
wget "https://github.com/project-everest/vale/releases/download/v${vale_version}/vale-release-${vale_version}.zip" -O vale/vale-release.zip
rm -rf "vale/vale-release-${vale_version}"
unzip -o vale/vale-release.zip -d vale
rm -rf "vale/bin"
mv "vale/vale-release-${vale_version}/bin" vale/
chmod +x vale/bin/*.exe
echo
echo -e "\033[0;31mRemember to do:\033[0;0m"
echo "export VALE_HOME=$(realpath $HACL_HOME/..)/vale"
else
echo "Vale is up-to-date"
fi
| mitls/hacl-star | tools/get_vale.sh | Shell | apache-2.0 | 1,208 |
#!/bin/bash
#Copyright © 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
INTERFACE=eth0
BRIDGE=br0
ifconfig $INTERFACE 0.0.0.0
ifconfig $INTERFACE promisc
brctl addbr $BRIDGE
brctl addif $BRIDGE $INTERFACE
brctl setfd $BRIDGE 0
brctl sethello $BRIDGE 1
brctl stp $BRIDGE no
mv /sbin/dhclient /usr/sbin/dhclient
/usr/sbin/dhclient $BRIDGE
ifconfig
infrasim node start
tail -f /var/log/infrasim/default/*.log
| hohene/RackHD | example/infrasim/start_infrasim.sh | Shell | apache-2.0 | 424 |
#!/bin/bash
set -eu
set -o pipefail
if [ "$#" -ne 3 ] ; then
echo "word2vec_wrap <size> <window> <iters>"
exit 0
fi
echo "Training for modified word2vec (with both word and context vectors dump)"
model_dir=models/word2vec_$1_$2
mkdir -p $model_dir
./word2vec/word2vec -train data/wiki.shuffled-norm1-phrase1 -min_count 1 -outputw $model_dir/vectorsW.txt -outputc $model_dir/vectorsC.txt -outputwc $model_dir/vectorsB.txt -cbow 0 -size $1 -window $2 -negative 5 -threads 20 -iter $3
| FTAsr/wordvet | trainWord2vec.sh | Shell | apache-2.0 | 487 |
# Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
database=$(sed -n 's/.*database_name *: *\([^ ]*.*\)/\1/p' < ./conf/ais.ini)
# Prepare data in HDFS and Hive table
hadoop fs -rm /tmp/ais_smallone/*
hadoop fs -rmdir /tmp/ais_smallone
hadoop fs -mkdir /tmp/ais_smallone
gzip -d aisShipData.csv.gz
hadoop fs -put aisShipData.csv /tmp/ais_smallone/
hive --hiveconf database=${database} -f etl.sql
# Create output directory, remove old output
mkdir -p output
rm -f output/micro_path_ais_results.csv
# Run Job
python AggregateMicroPath.py -c ais.ini
# Get Results
echo -e "latitude\tlongitude\tcount\tdate" > output/micro_path_ais_results.csv
hive -S -e "select * from ${database}.micro_path_intersect_counts_ais_small_final;" >> output/micro_path_ais_results.csv
| Sotera/aggregate-micro-paths | hive-streaming/run_ais.sh | Shell | apache-2.0 | 1,310 |
#!/bin/bash
# Manual Backup as root
# sh /backup/backup.sh >> /backup/logs 2>&1
###################################################
# Manual Restore - USE WITH CAUTION ! #
###################################################
# tar -xf mongo-11-11-11.tar.gz -C mongo-11-11-11 #
# mongorestore --dir mongo-11-11-11 #
# tar -xf cfs-11-11-11.tar.gz -C cfs-11-11-11 #
# cp -r cfs-11-11-11 /opt/mywebsite/cfs #
# rm -r mongo-11-11-11 cfs-11-11-11 #
###################################################
dbName="mydb"
cfsLoc="/opt/mywebsite/cfs"
curDate=`date +"%y-%m-%d"`
backupLoc="/backup"
cd $backupLoc
# Mongo Backup
mongodump -d $dbName -o mongo-$curDate
tar -zcf mongo-$curDate.tar.gz mongo-$curDate
rm -r mongo-$curDate
# CFS backup
cp -r $cfsLoc cfs-$curDate
tar -zcf cfs-$curDate.tar.gz cfs-$curDate
rm -r cfs-$curDate
| wdhif/meteor-script-backup | backup.sh | Shell | apache-2.0 | 878 |
#!/bin/bash
#
# Copyright 2017 Istio Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Script to configure and start the Istio sidecar.
set -e
# Match pilot/docker/Dockerfile.proxyv2
export ISTIO_META_ISTIO_VERSION="1.3.0"
# Load optional config variables
ISTIO_SIDECAR_CONFIG=${ISTIO_SIDECAR_CONFIG:-/var/lib/istio/envoy/sidecar.env}
if [[ -r ${ISTIO_SIDECAR_CONFIG} ]]; then
# shellcheck disable=SC1090
. "$ISTIO_SIDECAR_CONFIG"
fi
# Load config variables ISTIO_SYSTEM_NAMESPACE, CONTROL_PLANE_AUTH_POLICY
ISTIO_CLUSTER_CONFIG=${ISTIO_CLUSTER_CONFIG:-/var/lib/istio/envoy/cluster.env}
if [[ -r ${ISTIO_CLUSTER_CONFIG} ]]; then
# shellcheck disable=SC1090
. "$ISTIO_CLUSTER_CONFIG"
# Make sure the documented configuration variables are exported
export ISTIO_CP_AUTH ISTIO_SERVICE_CIDR ISTIO_INBOUND_PORTS
fi
# Set defaults
ISTIO_BIN_BASE=${ISTIO_BIN_BASE:-/usr/local/bin}
ISTIO_LOG_DIR=${ISTIO_LOG_DIR:-/var/log/istio}
NS=${ISTIO_NAMESPACE:-default}
SVC=${ISTIO_SERVICE:-rawvm}
ISTIO_SYSTEM_NAMESPACE=${ISTIO_SYSTEM_NAMESPACE:-istio-system}
# The default matches the default istio.yaml - use sidecar.env to override this if you
# enable auth. This requires node-agent to be running.
ISTIO_PILOT_PORT=${ISTIO_PILOT_PORT:-15011}
# If set, override the default
CONTROL_PLANE_AUTH_POLICY=("--controlPlaneAuthPolicy" "MUTUAL_TLS")
if [ -n "${ISTIO_CP_AUTH:-}" ]; then
CONTROL_PLANE_AUTH_POLICY=("--controlPlaneAuthPolicy" "${ISTIO_CP_AUTH}")
fi
if [ -z "${ISTIO_SVC_IP:-}" ]; then
ISTIO_SVC_IP=$(hostname --all-ip-addresses | cut -d ' ' -f 1)
fi
if [ -z "${POD_NAME:-}" ]; then
POD_NAME=$(hostname -s)
fi
# Init option will only initialize iptables. set ISTIO_CUSTOM_IP_TABLES to true if you would like to ignore this step
if [ "${ISTIO_CUSTOM_IP_TABLES}" != "true" ] ; then
if [[ ${1-} == "init" || ${1-} == "-p" ]] ; then
# clean the previous Istio iptables chains. This part is different from the init image mode,
# where the init container runs in a fresh environment and there cannot be previous Istio chains
"${ISTIO_BIN_BASE}/istio-clean-iptables"
# Update iptables, based on current config. This is for backward compatibility with the init image mode.
# The sidecar image can replace the k8s init image, to avoid downloading 2 different images.
"${ISTIO_BIN_BASE}/istio-iptables" "${@}"
exit 0
fi
if [[ ${1-} != "run" ]] ; then
# clean the previous Istio iptables chains. This part is different from the init image mode,
# where the init container runs in a fresh environment and there cannot be previous Istio chains
"${ISTIO_BIN_BASE}/istio-clean-iptables"
# Update iptables, based on config file
"${ISTIO_BIN_BASE}/istio-iptables"
fi
fi
EXEC_USER=${EXEC_USER:-istio-proxy}
if [ "${ISTIO_INBOUND_INTERCEPTION_MODE}" = "TPROXY" ] ; then
# In order to allow redirect inbound traffic using TPROXY, run envoy with the CAP_NET_ADMIN capability.
# This allows configuring listeners with the "transparent" socket option set to true.
EXEC_USER=root
fi
if [ -z "${PILOT_ADDRESS:-}" ]; then
PILOT_ADDRESS=istio-pilot.${ISTIO_SYSTEM_NAMESPACE}:${ISTIO_PILOT_PORT}
fi
# If predefined ISTIO_AGENT_FLAGS is null, make it an empty string.
ISTIO_AGENT_FLAGS=${ISTIO_AGENT_FLAGS:-}
# Split ISTIO_AGENT_FLAGS by spaces.
IFS=' ' read -r -a ISTIO_AGENT_FLAGS_ARRAY <<< "$ISTIO_AGENT_FLAGS"
if [ ${EXEC_USER} == "${USER:-}" ] ; then
# if started as istio-proxy (or current user), do a normal start, without
# redirecting stderr.
INSTANCE_IP=${ISTIO_SVC_IP} POD_NAME=${POD_NAME} POD_NAMESPACE=${NS} "${ISTIO_BIN_BASE}/pilot-agent" proxy "${ISTIO_AGENT_FLAGS_ARRAY[@]}" \
--serviceCluster "$SVC" \
--discoveryAddress "${PILOT_ADDRESS}" \
"${CONTROL_PLANE_AUTH_POLICY[@]}"
else
# Will run: ${ISTIO_BIN_BASE}/envoy -c $ENVOY_CFG --restart-epoch 0 --drain-time-s 2 --parent-shutdown-time-s 3 --service-cluster $SVC --service-node 'sidecar~${ISTIO_SVC_IP}~${POD_NAME}.${NS}.svc.cluster.local~${NS}.svc.cluster.local' $ISTIO_DEBUG >${ISTIO_LOG_DIR}/istio.log" istio-proxy
exec su -s /bin/bash -c "INSTANCE_IP=${ISTIO_SVC_IP} POD_NAME=${POD_NAME} POD_NAMESPACE=${NS} exec ${ISTIO_BIN_BASE}/pilot-agent proxy ${ISTIO_AGENT_FLAGS_ARRAY[*]} \
--serviceCluster $SVC \
--discoveryAddress ${PILOT_ADDRESS} \
${CONTROL_PLANE_AUTH_POLICY[*]} \
2> ${ISTIO_LOG_DIR}/istio.err.log > ${ISTIO_LOG_DIR}/istio.log" ${EXEC_USER}
fi
| geeknoid/istio | tools/packaging/common/istio-start.sh | Shell | apache-2.0 | 5,072 |
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to fetch latest swagger spec.
# Puts the updated spec at api/swagger-spec/
set -o errexit
set -o nounset
set -o pipefail
readonly ERROR_MAVEN_NOT_INSTALLED=80
if ! which mvn > /dev/null 2>&1; then
echo "Maven is not installed." >&2
exit ${ERROR_MAVEN_NOT_INSTALLED}
fi
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")
CLIENT_ROOT="${SCRIPT_ROOT}/../kubernetes"
pushd "${SCRIPT_ROOT}" > /dev/null
SCRIPT_ROOT=$(pwd)
popd > /dev/null
pushd "${CLIENT_ROOT}" > /dev/null
CLIENT_ROOT=$(pwd)
popd > /dev/null
TEMP_FOLDER=$(mktemp -d)
trap "rm -rf ${TEMP_FOLDER}" EXIT SIGINT
if [[ -z ${GEN_ROOT:-} ]]; then
GEN_ROOT="${TEMP_FOLDER}/gen"
echo ">>> Cloning gen repo"
git clone --recursive https://github.com/kubernetes-client/gen.git "${GEN_ROOT}"
else
echo ">>> Reusing gen repo at ${GEN_ROOT}"
fi
echo ">>> Running java generator from the gen repo"
"${GEN_ROOT}/openapi/java.sh" "${CLIENT_ROOT}" "${SCRIPT_ROOT}/../settings"
echo ">>> Running formatter"
./mvnw spotless:apply
echo ">>> Done."
| kubernetes-client/java | scripts/update-client.sh | Shell | apache-2.0 | 1,620 |
#!/usr/bin/env bash
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Exit immediately if a command exits with a non-zero status.
set -e
rimraf dist/
yarn
tsc --sourceMap false
browserify -g browserify-shim --standalone deeplearn_legacy_loader src/index.ts -p [tsify] > dist/deeplearn-legacy-loader.js
uglifyjs dist/deeplearn-legacy-loader.js -c -m -o dist/deeplearn-legacy-loader.min.js
echo "Stored standalone library at dist/deeplearn-legacy-loader(.min).js"
npm pack
| PAIR-code/deeplearnjs-legacy-loader | scripts/build-npm.sh | Shell | apache-2.0 | 1,101 |
#!/bin/sh
# Backs up all mysql databases on a given host.
#
# Copyright 2016 John Kinsella
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###
# This script expects $MYSQL_ROOT_PASSWORD to contain the root
# password for the mysql server.
MYSQLHOME=/var/lib/mysql
MYSQLDUMP=/usr/bin/mysqldump
MYSQLUSER=root
DUMP_OPTIONS="-u $MYSQLUSER --password=$MYSQL_ROOT_PASSWORD --opt --compress --single-transaction"
DATE=`date +%Y-%m-%d`
if [ $# -gt 0 ]; then
BACKUP_DIR=$1
else
echo "Usage: $0 <backup_directory>"
fi
DATABASES=`echo "show databases" | mysql --user=root --password=$MYSQL_ROOT_PASSWORD |tail --lines=+2`
if [ ! -d $MYSQL_HOME ] ; then
echo "Could not find $MYSQL_HOME. Is mysql installed?"
exit 1
fi
mkdir -p $BACKUP_DIR
for dbname in $DATABASES ; do
echo "Backing up $dbname"
$MYSQLDUMP $DUMP_OPTIONS $dbname | gzip > $BACKUP_DIR/$dbname.backup.gz
done
| jlk/backup_mysql_containers | backup_mysql_databases.sh | Shell | apache-2.0 | 1,390 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -u
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
echo "========== preparing pagerank data =========="
# configure
DIR=`cd $bin/../; pwd`
. "${DIR}/../bin/hibench-config.sh"
. "${DIR}/conf/configure.sh"
# compress
if [ $COMPRESS -eq 1 ]; then
COMPRESS_OPT="-c ${COMPRESS_CODEC}"
fi
# generate data
#DELIMITER=\t
OPTION="-t pagerank \
-b ${PAGERANK_BASE_HDFS} \
-n ${PAGERANK_INPUT} \
-m ${NUM_MAPS} \
-r ${NUM_REDS} \
-p ${PAGES} \
-o text"
# -d ${DELIMITER} \
$HADOOP_EXECUTABLE jar ${DATATOOLS} HiBench.DataGen ${OPTION} ${COMPRESS_OPT}
result=$?
if [ $result -ne 0 ]
then
echo "ERROR: Hadoop job failed to run successfully."
exit $result
fi
| BigData-Lab-Frankfurt/HiBench-DSE | pagerank/bin/prepare.sh | Shell | apache-2.0 | 1,459 |
#!/bin/sh
cd $WORKDIR
wait_for_tcp_service $ZED_DB_HOST $ZED_DB_PORT
sectionText "Propel - Creating configuration ..."
$CONSOLE propel:config:convert
sectionText "Propel - Insert PG compatibility ..."
$CONSOLE propel:pg-sql-compat
sectionText "Propel - Create database ..."
# Create database if it does not already exist
$CONSOLE propel:database:create
sectionText "Propel - Create schema diff ..."
# Generate diff for Propel2
$CONSOLE propel:diff
sectionText "Propel - Migrate Schema ..."
# Migrate database
$CONSOLE propel:migrate
sectionText "Propel - Initialize database ..."
# Fill the database with required data
$CONSOLE setup:init-db
| claranet/spryker-base | docker/init.d/200_dbms.sh | Shell | apache-2.0 | 650 |
#!/bin/bash
set -exuo pipefail
# The VERSION can be set with an environment variable. If it's not set, use $1
export VERSION=${VERSION:-$1}
if [ "$(docker ps -qa -f name=maproulette-frontend)" ]; then
echo "Removing existing maproulette-frontend container"
docker stop maproulette-frontend
docker rm maproulette-frontend
fi
echo "Starting maproulette frontend container"
docker run \
-itd \
--name maproulette-frontend \
--network mrnet \
--restart unless-stopped \
-p 3000:80 \
maproulette/maproulette-frontend:"${VERSION}"
| maproulette/maproulette2-docker | frontend/docker-start.sh | Shell | apache-2.0 | 547 |
#!/bin/bash
python setup.py bdist_egg upload --identity="Alexander Alexandrov" --sign --quiet
python setup.py bdist_wininst --target-version=2.4 register upload --identity="Alexander Alexandrov" --sign --quiet
python setup.py sdist upload --identity="Alexander Alexandrov" --sign
| TU-Berlin-DIMA/scrum-tools | build.sh | Shell | apache-2.0 | 281 |
#!/bin/bash
set -o errexit
# ---------------UPDATE ME-------------------------------#
# Increment me any time the environment should be rebuilt.
# This includes dependency changes, directory renames, etc.
# Simple integer sequence: 1, 2, 3...
environment_version=47
#--------------------------------------------------------#
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Horizon's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically"
echo " if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local"
echo " environment"
echo " -c, --coverage Generate reports using Coverage"
echo " -f, --force Force a clean re-build of the virtual"
echo " environment. Useful when dependencies have"
echo " been added."
echo " -m, --manage Run a Django management command."
echo " --makemessages Create/Update English translation files."
echo " --compilemessages Compile all translation files."
echo " --check-only Do not update translation files (--makemessages only)."
echo " -p, --pep8 Just run pep8"
echo " -8, --pep8-changed [<basecommit>]"
echo " Just run PEP8 and HACKING compliance check"
echo " on files changed since HEAD~1 (or <basecommit>)"
echo " -P, --no-pep8 Don't run pep8 by default"
echo " -t, --tabs Check for tab characters in files."
echo " -y, --pylint Just run pylint"
echo " -j, --jshint Just run jshint"
echo " -q, --quiet Run non-interactively. (Relatively) quiet."
echo " Implies -V if -N is not set."
echo " --only-selenium Run only the Selenium unit tests"
echo " --with-selenium Run unit tests including Selenium tests"
echo " --selenium-headless Run Selenium tests headless"
echo " --integration Run the integration tests (requires a running "
echo " OpenStack environment)"
echo " --runserver Run the Django development server for"
echo " openstack_dashboard in the virtual"
echo " environment."
echo " --docs Just build the documentation"
echo " --backup-environment Make a backup of the environment on exit"
echo " --restore-environment Restore the environment before running"
echo " --destroy-environment Destroy the environment and exit"
echo " -h, --help Print this usage message"
echo ""
echo "Note: with no options specified, the script will try to run the tests in"
echo " a virtual environment, If no virtualenv is found, the script will ask"
echo " if you would like to create one. If you prefer to run tests NOT in a"
echo " virtual environment, simply pass the -N option."
exit
}
# DEFAULTS FOR RUN_TESTS.SH
#
root=`pwd -P`
venv=$root/.venv
with_venv=tools/with_venv.sh
included_dirs="openstack_horizon"
always_venv=0
backup_env=0
command_wrapper=""
destroy=0
force=0
just_pep8=0
just_pep8_changed=0
no_pep8=0
just_pylint=0
just_docs=0
just_tabs=0
just_jshint=0
never_venv=0
quiet=0
restore_env=0
runserver=0
only_selenium=0
with_selenium=0
selenium_headless=0
integration=0
testopts=""
testargs=""
with_coverage=0
makemessages=0
compilemessages=0
check_only=0
manage=0
# Jenkins sets a "JOB_NAME" variable, if it's not set, we'll make it "default"
[ "$JOB_NAME" ] || JOB_NAME="default"
function process_option {
# If running manage command, treat the rest of options as arguments.
if [ $manage -eq 1 ]; then
testargs="$testargs $1"
return 0
fi
case "$1" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-p|--pep8) just_pep8=1;;
-8|--pep8-changed) just_pep8_changed=1;;
-P|--no-pep8) no_pep8=1;;
-y|--pylint) just_pylint=1;;
-j|--jshint) just_jshint=1;;
-f|--force) force=1;;
-t|--tabs) just_tabs=1;;
-q|--quiet) quiet=1;;
-c|--coverage) with_coverage=1;;
-m|--manage) manage=1;;
--makemessages) makemessages=1;;
--compilemessages) compilemessages=1;;
--check-only) check_only=1;;
--only-selenium) only_selenium=1;;
--with-selenium) with_selenium=1;;
--selenium-headless) selenium_headless=1;;
--integration) integration=1;;
--docs) just_docs=1;;
--runserver) runserver=1;;
--backup-environment) backup_env=1;;
--restore-environment) restore_env=1;;
--destroy-environment) destroy=1;;
-*) testopts="$testopts $1";;
*) testargs="$testargs $1"
esac
}
function run_management_command {
${command_wrapper} python $root/manage.py $testopts $testargs
}
function run_server {
echo "Starting Django development server..."
${command_wrapper} python $root/manage.py runserver $testopts $testargs
echo "Server stopped."
}
function run_pylint {
echo "Running pylint ..."
PYTHONPATH=$root ${command_wrapper} pylint --rcfile=.pylintrc -f parseable $included_dirs > pylint.txt || true
CODE=$?
grep Global -A2 pylint.txt
if [ $CODE -lt 32 ]; then
echo "Completed successfully."
exit 0
else
echo "Completed with problems."
exit $CODE
fi
}
function run_jshint {
echo "Running jshint ..."
jshint horizon/static/horizon/js
jshint horizon/static/horizon/tests
}
function warn_on_flake8_without_venv {
set +o errexit
${command_wrapper} python -c "import hacking" 2>/dev/null
no_hacking=$?
set -o errexit
if [ $never_venv -eq 1 -a $no_hacking -eq 1 ]; then
echo "**WARNING**:" >&2
echo "OpenStack hacking is not installed on your host. Its detection will be missed." >&2
echo "Please install or use virtual env if you need OpenStack hacking detection." >&2
fi
}
function run_pep8 {
echo "Running flake8 ..."
warn_on_flake8_without_venv
DJANGO_SETTINGS_MODULE=openstack_horizon.test.settings ${command_wrapper} flake8
}
function run_pep8_changed {
# NOTE(gilliard) We want use flake8 to check the entirety of every file that has
# a change in it. Unfortunately the --filenames argument to flake8 only accepts
# file *names* and there are no files named (eg) "nova/compute/manager.py". The
# --diff argument behaves surprisingly as well, because although you feed it a
# diff, it actually checks the file on disk anyway.
local base_commit=${testargs:-HEAD~1}
files=$(git diff --name-only $base_commit | tr '\n' ' ')
echo "Running flake8 on ${files}"
warn_on_flake8_without_venv
diff -u --from-file /dev/null ${files} | DJANGO_SETTINGS_MODULE=openstack_horizon.test.settings ${command_wrapper} flake8 --diff
exit
}
function run_sphinx {
echo "Building sphinx..."
DJANGO_SETTINGS_MODULE=openstack_horizon.test.settings ${command_wrapper} python setup.py build_sphinx
echo "Build complete."
}
function tab_check {
TAB_VIOLATIONS=`find $included_dirs -type f -regex ".*\.\(css\|js\|py\|html\)" -print0 | xargs -0 awk '/\t/' | wc -l`
if [ $TAB_VIOLATIONS -gt 0 ]; then
echo "TABS! $TAB_VIOLATIONS of them! Oh no!"
HORIZON_FILES=`find $included_dirs -type f -regex ".*\.\(css\|js\|py|\html\)"`
for TABBED_FILE in $HORIZON_FILES
do
TAB_COUNT=`awk '/\t/' $TABBED_FILE | wc -l`
if [ $TAB_COUNT -gt 0 ]; then
echo "$TABBED_FILE: $TAB_COUNT"
fi
done
fi
return $TAB_VIOLATIONS;
}
function destroy_venv {
echo "Cleaning environment..."
echo "Removing virtualenv..."
rm -rf $venv
echo "Virtualenv removed."
rm -f .environment_version
echo "Environment cleaned."
}
function environment_check {
echo "Checking environment."
if [ -f .environment_version ]; then
ENV_VERS=`cat .environment_version`
if [ $ENV_VERS -eq $environment_version ]; then
if [ -e ${venv} ]; then
# If the environment exists and is up-to-date then set our variables
command_wrapper="${root}/${with_venv}"
echo "Environment is up to date."
return 0
fi
fi
fi
if [ $always_venv -eq 1 ]; then
install_venv
else
if [ ! -e ${venv} ]; then
echo -e "Environment not found. Install? (Y/n) \c"
else
echo -e "Your environment appears to be out of date. Update? (Y/n) \c"
fi
read update_env
if [ "x$update_env" = "xY" -o "x$update_env" = "x" -o "x$update_env" = "xy" ]; then
install_venv
else
# Set our command wrapper anyway.
command_wrapper="${root}/${with_venv}"
fi
fi
}
function sanity_check {
# Anything that should be determined prior to running the tests, server, etc.
# Don't sanity-check anything environment-related in -N flag is set
if [ $never_venv -eq 0 ]; then
if [ ! -e ${venv} ]; then
echo "Virtualenv not found at $venv. Did install_venv.py succeed?"
exit 1
fi
fi
# Remove .pyc files. This is sanity checking because they can linger
# after old files are deleted.
find . -name "*.pyc" -exec rm -rf {} \;
}
function backup_environment {
if [ $backup_env -eq 1 ]; then
echo "Backing up environment \"$JOB_NAME\"..."
if [ ! -e ${venv} ]; then
echo "Environment not installed. Cannot back up."
return 0
fi
if [ -d /tmp/.horizon_environment/$JOB_NAME ]; then
mv /tmp/.horizon_environment/$JOB_NAME /tmp/.horizon_environment/$JOB_NAME.old
rm -rf /tmp/.horizon_environment/$JOB_NAME
fi
mkdir -p /tmp/.horizon_environment/$JOB_NAME
cp -r $venv /tmp/.horizon_environment/$JOB_NAME/
cp .environment_version /tmp/.horizon_environment/$JOB_NAME/
# Remove the backup now that we've completed successfully
rm -rf /tmp/.horizon_environment/$JOB_NAME.old
echo "Backup completed"
fi
}
function restore_environment {
if [ $restore_env -eq 1 ]; then
echo "Restoring environment from backup..."
if [ ! -d /tmp/.horizon_environment/$JOB_NAME ]; then
echo "No backup to restore from."
return 0
fi
cp -r /tmp/.horizon_environment/$JOB_NAME/.venv ./ || true
cp -r /tmp/.horizon_environment/$JOB_NAME/.environment_version ./ || true
echo "Environment restored successfully."
fi
}
function install_venv {
# Install with install_venv.py
export PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE-/tmp/.pip_download_cache}
export PIP_USE_MIRRORS=true
if [ $quiet -eq 1 ]; then
export PIP_NO_INPUT=true
fi
echo "Fetching new src packages..."
rm -rf $venv/src
python tools/install_venv.py
command_wrapper="$root/${with_venv}"
# Make sure it worked and record the environment version
sanity_check
chmod -R 754 $venv
echo $environment_version > .environment_version
}
function run_tests {
sanity_check
if [ $with_selenium -eq 1 ]; then
export WITH_SELENIUM=1
elif [ $only_selenium -eq 1 ]; then
export WITH_SELENIUM=1
export SKIP_UNITTESTS=1
fi
if [ $with_selenium -eq 0 -a $integration -eq 0 ]; then
testopts="$testopts --exclude-dir=openstack_horizon/test/integration_tests"
fi
if [ $selenium_headless -eq 1 ]; then
export SELENIUM_HEADLESS=1
fi
if [ -z "$testargs" ]; then
run_tests_all
else
run_tests_subset
fi
}
function run_tests_subset {
project=`echo $testargs | awk -F. '{print $1}'`
${command_wrapper} python $root/manage.py test --settings=$project.test.settings $testopts $testargs
}
function run_tests_all {
echo "Running openstack_horizon tests"
export NOSE_XUNIT_FILE=openstack_horizon/nosetests.xml
if [ "$NOSE_WITH_HTML_OUTPUT" = '1' ]; then
export NOSE_HTML_OUT_FILE='dashboard_nose_results.html'
fi
${command_wrapper} ${coverage_run} $root/manage.py test openstack_horizon --settings=openstack_horizon.test.settings $testopts
# get results of the openstack_horizon tests
DASHBOARD_RESULT=$?
if [ $with_coverage -eq 1 ]; then
echo "Generating coverage reports"
${command_wrapper} python -m coverage.__main__ combine
${command_wrapper} python -m coverage.__main__ xml -i --include="openstack_horizon/*" --omit='/usr*,setup.py,*egg*,.venv/*'
${command_wrapper} python -m coverage.__main__ html -i --include="openstack_horizon/*" --omit='/usr*,setup.py,*egg*,.venv/*' -d reports
fi
# Remove the leftover coverage files from the -p flag earlier.
rm -f .coverage.*
PEP8_RESULT=0
if [ $no_pep8 -eq 0 ] && [ $only_selenium -eq 0 ]; then
run_pep8
PEP8_RESULT=$?
fi
TEST_RESULT=$(($DASHBOARD_RESULT || $PEP8_RESULT))
if [ $TEST_RESULT -eq 0 ]; then
echo "Tests completed successfully."
else
echo "Tests failed."
fi
exit $TEST_RESULT
}
function run_integration_tests {
export INTEGRATION_TESTS=1
if [ $selenium_headless -eq 1 ]; then
export SELENIUM_HEADLESS=1
fi
echo "Running Horizon integration tests..."
if [ -z "$testargs" ]; then
${command_wrapper} nosetests openstack_horizon/test/integration_tests/tests
else
${command_wrapper} nosetests $testargs
fi
exit 0
}
function run_makemessages {
OPTS="-l en --no-obsolete --settings=openstack_horizon.test.settings"
DASHBOARD_OPTS="--extension=html,txt,csv --ignore=openstack"
echo -n "horizon: "
cd ../openstack_horizon
${command_wrapper} $root/manage.py makemessages $DASHBOARD_OPTS $OPTS
DASHBOARD_RESULT=$?
cd ..
if [ $check_only -eq 1 ]; then
git checkout -- horizon_lib/locale/en/LC_MESSAGES/django*.po
git checkout -- openstack_horizon/locale/en/LC_MESSAGES/django.po
fi
exit $(($HORIZON_PY_RESULT || $HORIZON_JS_RESULT || $DASHBOARD_RESULT))
}
function run_compilemessages {
OPTS="--settings=openstack_horizon.test.settings"
cd openstack_horizon
${command_wrapper} $root/manage.py compilemessages $OPTS
DASHBOARD_RESULT=$?
cd ..
# English is the source language, so compiled catalogs are unnecessary.
rm -vf horizon/locale/en/LC_MESSAGES/django*.mo
rm -vf openstack_horizon/locale/en/LC_MESSAGES/django.mo
exit $(($HORIZON_PY_RESULT || $DASHBOARD_RESULT))
}
# ---------PREPARE THE ENVIRONMENT------------ #
# PROCESS ARGUMENTS, OVERRIDE DEFAULTS
for arg in "$@"; do
process_option $arg
done
if [ $quiet -eq 1 ] && [ $never_venv -eq 0 ] && [ $always_venv -eq 0 ]
then
always_venv=1
fi
# If destroy is set, just blow it away and exit.
if [ $destroy -eq 1 ]; then
destroy_venv
exit 0
fi
# Ignore all of this if the -N flag was set
if [ $never_venv -eq 0 ]; then
# Restore previous environment if desired
if [ $restore_env -eq 1 ]; then
restore_environment
fi
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
destroy_venv
fi
# Then check if it's up-to-date
environment_check
# Create a backup of the up-to-date environment if desired
if [ $backup_env -eq 1 ]; then
backup_environment
fi
fi
# ---------EXERCISE THE CODE------------ #
# Run management commands
if [ $manage -eq 1 ]; then
run_management_command
exit $?
fi
# Build the docs
if [ $just_docs -eq 1 ]; then
run_sphinx
exit $?
fi
# Update translation files
if [ $makemessages -eq 1 ]; then
run_makemessages
exit $?
fi
# Compile translation files
if [ $compilemessages -eq 1 ]; then
run_compilemessages
exit $?
fi
# PEP8
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit $?
fi
if [ $just_pep8_changed -eq 1 ]; then
run_pep8_changed
exit $?
fi
# Pylint
if [ $just_pylint -eq 1 ]; then
run_pylint
exit $?
fi
# Jshint
if [ $just_jshint -eq 1 ]; then
run_jshint
exit $?
fi
# Tab checker
if [ $just_tabs -eq 1 ]; then
tab_check
exit $?
fi
# Integration tests
if [ $integration -eq 1 ]; then
run_integration_tests
exit $?
fi
# Django development server
if [ $runserver -eq 1 ]; then
run_server
exit $?
fi
# Full test suite
run_tests || exit
| mrunge/openstack_horizon | run_tests.sh | Shell | apache-2.0 | 16,000 |
#!/bin/sh
# This is the simple version of the install script for PageKicker.
#
# get master repository via git clone
#cd ~
#git clone https://github.com/fredzannarbor/pagekicker-community.git
# cd into the repo and run this script
# cd pagekicker-community
# ./simple-install.sh
# it will do the following
# create outside repo directory structure
mkdir -m 755 ~/.pagekicker
mkdir -m 777 -p /tmp/pagekicker
mkdir -m 755 ~/magento # stub directory for optional import/export to catalog
# put default configuration file in place
# inspect it to make sure paths are correct
cp ~/pagekicker-community/conf/config_defaults.txt "$HOME"/.pagekicker/config.txt
sudo apt-get install -y \
apache2 \
build-essential \
calibre \
default-jre \
fdupes \
git \
imagemagick \
mysql-client \
pandoc \
pdfgrep \
pdftk \
perl \
poppler-utils \
python2.7 \
python3-dev \
python-pip \
python3-pip \
sendemail \
texlive-xetex \
ttf-dejavu \
xmlstarlet
# install python dependencies
cd ~/pagekicker-community
sudo pip install -r requirements.txt
sudo pip3 install -r requirements.txt
# create local-data hierarchy
mkdir -p local-data/bibliography local-data/bibliography/imprints local-data/bibliography/imprints/pagekicker
mkdir -p local-data/bibliography/robots local-data/bibliography/robots/default
mkdir -p local-data/bibliography/yaml
mkdir -p local-data/jobprofile_builds/default
mkdir -p local-data/logs/uuids
mkdir -p local-data/seeds/history
mkdir -p local-data/seeds/SKUs
echo "1000001" > local-data/SKUs/sku_list
touch local-data/bibliography/robots/default/default_titles.txt
# fetches & deploys third party apps stored in PageKicker scripts/lib
cd ~/pagekicker-community/scripts/lib
git clone https://github.com/jarun/googler.git
mkdir KindleGen
cd KindleGen
wget http://kindlegen.s3.amazonaws.com/kindlegen_linux_2.6_i386_v2_9.tar.gz
tar -xvf kindlegen_linux_2.6_i386_v2_9.tar.gz
cd ..
wget https://nlp.stanford.edu/software/stanford-ner-2018-02-27.zip
unzip https://nlp.stanford.edu/software/stanford-ner-2018-02-27.zip
# fix hard-coded IBMcloud configuration file
cd ~/pagekicker-community/scripts/lib/IBMcloud/examples
sed -i "s/fred/"$USER"/" configuration.txt
# set up imagemagick configuration
mkdir ~/.magick
cd ~/pagekicker-community
echo "changed directory successfully"
cp conf/colors.xml ~/.magick/colors.xml
cat conf/imagemagick-fonts.pl > ~/.magick/fonts.xml
sleep 2
echo "install script finished running"
| fredzannarbor/pagekicker-community | debian-install.sh | Shell | apache-2.0 | 2,434 |
#!/bin/sh
./configure --prefix=$PREFIX --without-jni
make
make install
| bird-house/conda-recipes | _unused/proj4/build.sh | Shell | apache-2.0 | 73 |
# -----------------------------------------------------------------------------
#
# Package : right-align
# Version : 0.1.3
# Source repo : https://github.com/jonschlinkert/right-align
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=right-align
PACKAGE_VERSION=0.1.3
PACKAGE_URL=https://github.com/jonschlinkert/right-align
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi | ppc64le/build-scripts | r/right-align/right-align_rhel_8.3.sh | Shell | apache-2.0 | 3,074 |
rm GeoNames/allCountries.txt
rm GeoNames/cities1000.txt
rm GeoNames/alternativeNames.txt
rm -fr GazIndex/*
rm lib/*
| geoparser/geolocator | geo-locator/uninstall.sh | Shell | apache-2.0 | 117 |
# Copyright 2014-2016 Samsung Research America, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
pushd /js/boehm_hf
pushd libatomic_ops
autoreconf -vif
popd
autoreconf -vif
automake --add-missing
./configure --prefix=/js/gc --enable-single-obj-compilation
make
make install
| csgordon/SJS | sjsc/phonegc.sh | Shell | apache-2.0 | 788 |
#!/bin/bash
APP_NAME=resp_q2rest_db
export APP_NAME
destinationUrl=http://localhost:8181/carrierInquiries/
export destinationUrl
jms_qcfName=jms/myConnectionFactory
export jms_qcfName
jms_providerUrl=t3://localhost:8001
export jms_providerUrl
jms_icfName=weblogic.jndi.WLInitialContextFactory
export jms_icfName
jms_sourceQueueName=jms/percyvegaRespQueue
export jms_sourceQueueName
| percyvega/resp_q2rest_db | manage/setVars_dev.sh | Shell | apache-2.0 | 399 |
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
source "${END_TO_END_DIR}"/test-scripts/common.sh
#######################################
# Prints the given description, runs the given test and prints how long the execution took.
# Arguments:
# $1: description of the test
# $2: command to execute
#######################################
function run_test {
description="$1"
command="$2"
printf "\n==============================================================================\n"
printf "Running '${description}'\n"
printf "==============================================================================\n"
# used to randomize created directories
export TEST_DATA_DIR=$TEST_INFRA_DIR/temp-test-directory-$(date +%S%N)
echo "TEST_DATA_DIR: $TEST_DATA_DIR"
backup_config
start_timer
${command}
exit_code="$?"
time_elapsed=$(end_timer)
check_logs_for_errors
check_logs_for_exceptions
check_logs_for_non_empty_out_files
# Investigate exit_code for failures of test executable as well as EXIT_CODE for failures of the test.
# Do not clean up if either fails.
if [[ ${exit_code} == 0 ]]; then
if [[ ${EXIT_CODE} != 0 ]]; then
printf "\n[FAIL] '${description}' failed after ${time_elapsed}! Test exited with exit code 0 but the logs contained errors, exceptions or non-empty .out files\n\n"
exit_code=1
else
printf "\n[PASS] '${description}' passed after ${time_elapsed}! Test exited with exit code 0.\n\n"
fi
else
if [[ ${EXIT_CODE} != 0 ]]; then
printf "\n[FAIL] '${description}' failed after ${time_elapsed}! Test exited with exit code ${exit_code} and the logs contained errors, exceptions or non-empty .out files\n\n"
else
printf "\n[FAIL] '${description}' failed after ${time_elapsed}! Test exited with exit code ${exit_code}\n\n"
fi
fi
if [[ ${exit_code} == 0 ]]; then
cleanup
else
exit "${exit_code}"
fi
}
# Shuts down cluster and reverts changes to cluster configs
function cleanup_proc {
shutdown_all
revert_default_config
}
# Cleans up all temporary folders and files
function cleanup_tmp_files {
clean_log_files
rm -rf ${TEST_DATA_DIR} 2> /dev/null
echo "Deleted ${TEST_DATA_DIR}"
}
# Shuts down the cluster and cleans up all temporary folders and files.
function cleanup {
cleanup_proc
cleanup_tmp_files
}
trap cleanup SIGINT
trap cleanup_proc EXIT
| yew1eb/flink | flink-end-to-end-tests/test-scripts/test-runner-common.sh | Shell | apache-2.0 | 3,437 |
#!/bin/bash -e
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This test checks that the extractor handles transcripts.
# It should be run from the Kythe root.
TEST_NAME="test_extract_transcript"
. ./kythe/cxx/extractor/testdata/test_common.sh
. ./kythe/cxx/extractor/testdata/skip_functions.sh
KYTHE_OUTPUT_DIRECTORY="${OUT_DIR}" \
"${EXTRACTOR}" --with_executable "/usr/bin/g++" \
-I./kythe/cxx/extractor/testdata \
./kythe/cxx/extractor/testdata/transcript_main.cc
[[ $(ls -1 "${OUT_DIR}"/*.kindex | wc -l) -eq 1 ]]
INDEX_PATH=$(ls -1 "${OUT_DIR}"/*.kindex)
"${KINDEX_TOOL}" -canonicalize_hashes -suppress_details -explode "${INDEX_PATH}"
# Remove lines that will change depending on the machine the test is run on.
skip_inplace "-target" 1 "${INDEX_PATH}_UNIT"
skip_inplace "signature" 0 "${INDEX_PATH}_UNIT"
sed "s|TEST_CWD|${PWD}/|" "${BASE_DIR}/transcript_main.UNIT" | \
skip "-target" 1 |
skip "signature" 0 |
diff - "${INDEX_PATH}_UNIT"
| legrosbuffle/kythe | kythe/cxx/extractor/testdata/test_extract_transcript.sh | Shell | apache-2.0 | 1,522 |
pkg_name=compositeproto
pkg_origin=core
pkg_version=0.4
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_description="X11 Composite extension header files"
pkg_upstream_url="https://www.x.org/"
pkg_license=('MIT')
pkg_source="https://www.x.org/releases/individual/proto/${pkg_name}-${pkg_version}.tar.bz2"
pkg_shasum="6013d1ca63b2b7540f6f99977090812b899852acfbd9df123b5ebaa911e30003"
pkg_build_deps=(
lilian/gcc
lilian/make
)
pkg_include_dirs=(include)
pkg_pconfig_dirs=(lib/pkgconfig)
| be-plans/be | compositeproto/plan.sh | Shell | apache-2.0 | 506 |
#!/bin/bash
#
# RPM build script
#
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Source all our reusable functionality, argument is the location of this script.
. "$SCRIPT_DIR/../../admin-scripts/rpm-functions.sh" "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
declare -A ARGS
while [ $# -gt 0 ]; do
case "$1" in
*) NAME="${1:2}"; shift; ARGS[$NAME]="$1" ;;
esac
shift
done
# Artifact settings
ARTIFACT_01_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/geowave-accumulo.jar
ARTIFACT_02_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/geowave-geoserver.jar
ARTIFACT_03_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/jace-linux-amd64-debug.tar.gz
ARTIFACT_04_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/jace-linux-amd64-release.tar.gz
ARTIFACT_05_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/jace-source.tar.gz
ARTIFACT_06_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/geowave-tools.jar
ARTIFACT_07_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/plugins.tar.gz
ARTIFACT_08_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/target/site.tar.gz
ARTIFACT_09_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/puppet-scripts.tar.gz
ARTIFACT_10_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/docs/target/manpages.tar.gz
ARTIFACT_11_URL=$LOCAL_JENKINS/job/${ARGS[job]}/lastSuccessfulBuild/artifact/deploy/target/geowave-analytic-mapreduce.jar
ARTIFACT_12_URL=$LOCAL_JENKINS/userContent/geoserver/${ARGS[geoserver]}
RPM_ARCH=noarch
GEOWAVE_VERSION=$(parseVersion)
case ${ARGS[command]} in
build) rpmbuild \
--define "_topdir $(pwd)" \
--define "_version $GEOWAVE_VERSION" \
--define "_vendor_version ${ARGS[vendor-version]}" \
--define "_priority $(parsePriorityFromVersion $GEOWAVE_VERSION)" \
$(buildArg "${ARGS[buildarg]}") SPECS/*.spec ;;
clean) clean ;;
update)
update_artifact $ARTIFACT_01_URL;
update_artifact $ARTIFACT_02_URL;
update_artifact $ARTIFACT_03_URL;
update_artifact $ARTIFACT_04_URL;
update_artifact $ARTIFACT_05_URL;
update_artifact $ARTIFACT_06_URL;
update_artifact $ARTIFACT_07_URL;
update_artifact $ARTIFACT_08_URL;
update_artifact $ARTIFACT_09_URL;
update_artifact $ARTIFACT_10_URL;
update_artifact $ARTIFACT_11_URL;
update_artifact $ARTIFACT_12_URL geoserver.zip; ;;
*) about ;;
esac
| mses-bly/geowave | deploy/packaging/rpm/centos/6/rpm.sh | Shell | apache-2.0 | 2,734 |
#!/bin/bash
echo '##########################################################################'
echo '##### About to run install-vim-puppet-plugins.sh script ##################'
echo '##########################################################################'
sudo yum install -y epel-release || exit 1
sudo yum install -y vim || exit 1
sudo yum install -y ShellCheck || exit 1
# Some useful plugins: http://vimawesome.com/
## The following gems are needed:
gem install puppet-lint || exit 1 # http://puppet-lint.com/
gem install puppet-syntax || exit 1
mkdir -p ~/.vim/autoload ~/.vim/bundle || exit 1
curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim || exit 1
cd ~/.vim/bundle || exit 1
echo "about to install syntastic"
git clone https://github.com/scrooloose/syntastic.git || exit 1
echo "about to install vim-puppet"
git clone https://github.com/rodjek/vim-puppet.git || exit 1
echo "about to install tabular"
git clone https://github.com/godlygeek/tabular.git || exit 1
#git clone https://github.com/scrooloose/nerdtree.git || exit 1
# vim snippets and shipmate:
git clone https://github.com/tomtom/tlib_vim.git || exit 1
git clone https://github.com/MarcWeber/vim-addon-mw-utils.git || exit 1
git clone https://github.com/garbas/vim-snipmate.git || exit 1
git clone https://github.com/honza/vim-snippets.git || exit 1
echo "execute pathogen#infect()" > ~/.vimrc || exit 1
echo "syntax on" >> ~/.vimrc || exit 1
echo "filetype plugin indent on" >> ~/.vimrc || exit 1
echo "filetype on" >> ~/.vimrc || exit 1
echo "set statusline+=%#warningmsg#" >> ~/.vimrc || exit 1
echo "set statusline+=%{SyntasticStatuslineFlag()}" >> ~/.vimrc || exit 1
echo "set statusline+=%*" >> ~/.vimrc || exit 1
echo "let g:syntastic_always_populate_loc_list = 1" >> ~/.vimrc || exit 1
echo "let g:syntastic_auto_loc_list = 1" >> ~/.vimrc || exit 1
echo "let g:syntastic_check_on_open = 1" >> ~/.vimrc || exit 1
echo "let g:syntastic_check_on_wq = 1" >> ~/.vimrc || exit 1
# http://vim.wikia.com/wiki/Indenting_source_code
echo "set expandtab" >> ~/.vimrc || exit 1
echo "set shiftwidth=2" >> ~/.vimrc || exit 1
echo "set softtabstop=2" >> ~/.vimrc || exit 1
# In vim, to automatically reindent, do "gg=G" while in vim's navigation mode.
echo 'PATH=$PATH:/home/vagrant/bin' >> /home/vagrant/.bashrc || exit 1 # this is to get puppet lint to work.
# here's some extra configurations to make vim easier to use:
cat /vagrant/files/.vimrc >> ~/.vimrc || exit 1
echo "--no-80chars-check" >> ~/.puppet-lint.rc || exit 1 # http://stackoverflow.com/questions/29206887/puppet-lint-configuration-file
# https://github.com/rodjek/puppet-lint#puppet-lint-1
echo "--no-140chars-check" >> ~/.puppet-lint.rc || exit 1
| Sher-Chowdhury/vagrant-puppetmaster4 | scripts/install-vim-puppet-plugins.sh | Shell | apache-2.0 | 3,030 |
# ----------------------------------------------------------------------------
#
# Package : openapi-schema-validator
# Version : 0.1.4 & 0.1.5
# Source repo : https://github.com/p1c2u/openapi-schema-validator
# Tested on : UBI 8.3
# Script License : Apache License, Version 2 or later
# Passing Arguments : Passing Arguments: 1.Version of package,
# Script License : Apache License, Version 2 or later
# Maintainer : Arumugam N S <[email protected]> / Priya Seth<[email protected]>
#
# Disclaimer : This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
if [ -z "$1" ]; then
export VERSION=master
else
export VERSION=$1
fi
if [ -d "openapi-schema-validator" ] ; then
rm -rf openapi-schema-validator
fi
# Dependency installation
sudo dnf install python36 -y
sudo dnf install -y git python3-devel
pip3 install codecov
# Download the repos
git clone https://github.com/p1c2u/openapi-schema-validator
# Build and Test openapi-schema-validator
cd openapi-schema-validator
git checkout $VERSION
ret=$?
if [ $ret -eq 0 ] ; then
echo "$Version found to checkout "
else
echo "$Version not found "
exit
fi
#Build and test
pip3 install -r requirements.txt
pip3 install -r requirements_dev.txt
pip3 install -e .
python3.6 setup.py test
ret=$?
if [ $ret -ne 0 ] ; then
echo "Build & Test failed for python 3.6 environment"
else
echo "Build & Test Success for python 3.6 environment"
fi
| ppc64le/build-scripts | o/openapi-schema-validator/openapi-schema-validator_ubi_8.3.sh | Shell | apache-2.0 | 1,838 |
#!/bin/bash
#
# Copyright 2008 The open-vcdiff Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script tests the correctness of the vcdiff command-line executable.
# If you add a new test here, please add the same test to the Windows script
# vsprojects/vcdiff_test.bat.
#
# The caller should set the environment variable $srcdir to the root directory
# of the open-vcdiff package. ($srcdir is automatically provided by Automake
# when this script is run by "make check".)
# Find input files
VCDIFF=./vcdiff
# These options are only needed for the encoder;
# the decoder will recognize the interleaved and checksum formats
# without needing to specify any options.
VCD_OPTIONS="-interleaved -checksum"
DICTIONARY_FILE=$srcdir/testdata/configure.ac.v0.1
TARGET_FILE=$srcdir/testdata/configure.ac.v0.2
TEST_TMPDIR=${TMPDIR-/tmp}
DELTA_FILE=$TEST_TMPDIR/configure.ac.vcdiff
OUTPUT_TARGET_FILE=$TEST_TMPDIR/configure.ac.output
MALICIOUS_ENCODING=$srcdir/testdata/allocates_4gb.vcdiff
# vcdiff with no arguments shows usage information & error result
$VCDIFF \
&& { echo "vcdiff with no arguments should fail, but succeeded"; \
exit 1; }
echo "Test 1 ok";
# vcdiff with three arguments but without "encode" or "decode"
# shows usage information & error result
$VCDIFF $VCD_OPTIONS \
-dictionary $DICTIONARY_FILE -target $TARGET_FILE -delta $DELTA_FILE \
&& { echo "vcdiff without operation argument should fail, but succeeded"; \
exit 1; }
echo "Test 2 ok";
# vcdiff with all three arguments. Verify that output file matches target file
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
|| { echo "Encode with three arguments failed"; \
exit 1; }
$VCDIFF decode -dictionary $DICTIONARY_FILE \
-delta $DELTA_FILE \
-target $OUTPUT_TARGET_FILE \
|| { echo "Decode with three arguments failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original"; \
exit 1; }
echo "Test 3 ok";
rm $DELTA_FILE
rm $OUTPUT_TARGET_FILE
# vcdiff using stdin/stdout. Verify that output file matches target file
{ $VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
< $TARGET_FILE \
> $DELTA_FILE; } \
|| { echo "Encode using stdin/stdout failed"; \
exit 1; }
{ $VCDIFF decode -dictionary $DICTIONARY_FILE \
< $DELTA_FILE \
> $OUTPUT_TARGET_FILE; } \
|| { echo "Decode using stdin/stdout failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original"; \
exit 1; }
echo "Test 4 ok";
rm $DELTA_FILE
rm $OUTPUT_TARGET_FILE
# vcdiff with mixed stdin/stdout.
{ $VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
> $DELTA_FILE; } \
|| { echo "Encode with mixed arguments failed"; \
exit 1; }
{ $VCDIFF decode -dictionary $DICTIONARY_FILE \
-delta $DELTA_FILE \
> $OUTPUT_TARGET_FILE; } \
|| { echo "Decode with mixed arguments failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original"; \
exit 1; }
echo "Test 5 ok";
rm $DELTA_FILE
rm $OUTPUT_TARGET_FILE
{ $VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
< $TARGET_FILE \
-delta $DELTA_FILE; } \
|| { echo "Encode with mixed arguments failed"; \
exit 1; }
{ $VCDIFF decode -dictionary $DICTIONARY_FILE \
< $DELTA_FILE \
-target $OUTPUT_TARGET_FILE; } \
|| { echo "Decode with mixed arguments failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original"; \
exit 1; }
echo "Test 6 ok";
rm $OUTPUT_TARGET_FILE
# Don't remove $DELTA_FILE; use it for the next test
# If using the wrong dictionary, and dictionary is smaller than the original
# dictionary, vcdiff will spot the mistake and return an error. (It can't
# detect the case where the wrong dictionary is larger than the right one.)
$VCDIFF decode -dictionary $TARGET_FILE \
-delta $DELTA_FILE \
-target $OUTPUT_TARGET_FILE \
&& { echo "Decode using larger dictionary should fail, but succeeded"; \
exit 1; }
echo "Test 7 ok";
rm $DELTA_FILE
rm $OUTPUT_TARGET_FILE
# "vcdiff test" with all three arguments.
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
|| { echo "vcdiff test with three arguments failed"; \
exit 1; }
echo "Test 8 ok";
rm $DELTA_FILE
# Dictionary file not found.
$VCDIFF $VCD_OPTIONS \
encode -dictionary $TEST_TMPDIR/nonexistent_file \
-target $TARGET_FILE \
-delta $DELTA_FILE \
&& { echo "vcdiff with missing dictionary file should fail, but succeeded"; \
exit 1; }
echo "Test 9 ok";
# Target file not found.
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target $TEST_TMPDIR/nonexistent_file \
-delta $DELTA_FILE \
&& { echo "vcdiff with missing target file should fail, but succeeded"; \
exit 1; }
echo "Test 10 ok";
# Delta file not found.
$VCDIFF decode -dictionary $DICTIONARY_FILE \
-delta $TEST_TMPDIR/nonexistent_file \
-target $OUTPUT_TARGET_FILE \
&& { echo "vcdiff with missing delta file should fail, but succeeded"; \
exit 1; }
echo "Test 11 ok";
# Try traversing an infinite loop of symbolic links.
ln -s $TEST_TMPDIR/infinite_loop1 $TEST_TMPDIR/infinite_loop2
ln -s $TEST_TMPDIR/infinite_loop2 $TEST_TMPDIR/infinite_loop1
$VCDIFF $VCD_OPTIONS \
encode -dictionary $TEST_TMPDIR/infinite_loop1 \
-target $TEST_TMPDIR/infinite_loop2 \
-delta $DELTA_FILE \
&& { echo "vcdiff with symbolic link loop should fail, but succeeded"; \
exit 1; }
echo "Test 12 ok";
rm $TEST_TMPDIR/infinite_loop1 $TEST_TMPDIR/infinite_loop2
# Test using -stats flag
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-stats \
|| { echo "Encode with -stats failed"; \
exit 1; }
$VCDIFF -stats \
decode -dictionary $DICTIONARY_FILE \
-delta $DELTA_FILE \
-target $OUTPUT_TARGET_FILE \
|| { echo "Decode with -stats failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original"; \
exit 1; }
echo "Test 13 ok";
rm $DELTA_FILE
rm $OUTPUT_TARGET_FILE
# Using /dev/null as dictionary should work, but (because dictionary is empty)
# it will not produce a small delta file.
$VCDIFF $VCD_OPTIONS \
test -dictionary /dev/null \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-stats \
|| { echo "vcdiff test with /dev/null as dictionary failed"; \
exit 1; }
echo "Test 14 ok";
rm $DELTA_FILE
# Using /dev/kmem as dictionary or target should produce an error
# (permission denied, or too large, or special file type)
$VCDIFF $VCD_OPTIONS \
encode -dictionary /dev/kmem \
-target $TARGET_FILE \
-delta $DELTA_FILE \
&& { echo "vcdiff with /dev/kmem as dictionary should fail, but succeeded"; \
exit 1; }
echo "Test 15 ok";
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target /dev/kmem \
-delta $DELTA_FILE \
&& { echo "vcdiff with /dev/kmem as target should fail, but succeeded"; \
exit 1; }
echo "Test 16 ok";
# Decode using something that isn't a delta file
$VCDIFF decode -dictionary $DICTIONARY_FILE \
-delta /etc/fstab \
-target $OUTPUT_TARGET_FILE \
&& { echo "vcdiff with invalid delta file should fail, but succeeded"; \
exit 1; }
echo "Test 17 ok";
$VCDIFF $VCD_OPTIONS \
encode -target $TARGET_FILE \
-delta $DELTA_FILE \
-dictionary \
&& { echo "-dictionary option with no file name should fail, but succeeded"; \
exit 1; }
echo "Test 18 ok";
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-delta $DELTA_FILE \
-target \
&& { echo "-target option with no file name should fail, but succeeded"; \
exit 1; }
echo "Test 19 ok";
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta \
&& { echo "-delta option with no file name should fail, but succeeded"; \
exit 1; }
echo "Test 20 ok";
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-buffersize \
&& { echo "-buffersize option with no argument should fail, but succeeded"; \
exit 1; }
echo "Test 21 ok";
# Using -buffersize=1 should still work.
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-buffersize 1 \
-stats \
|| { echo "vcdiff test with -buffersize=1 failed"; \
exit 1; }
echo "Test 22 ok";
rm $DELTA_FILE
# Using -buffersize=1 with stdin/stdout means that vcdiff
# will create a separate target window for each byte read.
{ $VCDIFF encode -dictionary $DICTIONARY_FILE \
-buffersize 1 \
-stats \
< $TARGET_FILE \
> $DELTA_FILE; } \
|| { echo "Encode using stdin/stdout with -buffersize=1 failed"; \
exit 1; }
{ $VCDIFF decode -dictionary $DICTIONARY_FILE \
-buffersize 1 \
-stats \
< $DELTA_FILE \
> $OUTPUT_TARGET_FILE; } \
|| { echo "Decode using stdin/stdout with -buffersize=1 failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original with -buffersize=1"; \
exit 1; }
echo "Test 23 ok";
rm $DELTA_FILE
rm $OUTPUT_TARGET_FILE
# Using -buffersize=0 should fail.
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-buffersize 0 \
&& { echo "vcdiff test with -buffersize=0 should fail, but succeeded"; \
exit 1; }
echo "Test 24 ok";
rm $DELTA_FILE
# Using -buffersize=128M (larger than default maximum) should still work.
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-buffersize 134217728 \
-stats \
|| { echo "vcdiff test with -buffersize=128M failed"; \
exit 1; }
echo "Test 25 ok";
rm $DELTA_FILE
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-froobish \
&& { echo "vdiff test with unrecognized option should fail, but succeeded"; \
exit 1; }
echo "Test 26 ok";
$VCDIFF $VCD_OPTIONS \
encode -target $TARGET_FILE \
-delta $DELTA_FILE \
&& { echo "encode with no dictionary option should fail, but succeeded"; \
exit 1; }
echo "Test 27 ok";
$VCDIFF decode -target $TARGET_FILE \
-delta $DELTA_FILE \
&& { echo "decode with no dictionary option should fail, but succeeded"; \
exit 1; }
echo "Test 28 ok";
# Remove -interleaved and -checksum options
{ $VCDIFF encode -dictionary $DICTIONARY_FILE \
< $TARGET_FILE \
> $DELTA_FILE; } \
|| { echo "Encode without -interleaved and -checksum options failed"; \
exit 1; }
{ $VCDIFF decode -dictionary $DICTIONARY_FILE \
< $DELTA_FILE \
> $OUTPUT_TARGET_FILE; } \
|| { echo "Decode non-interleaved output failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original with -interleaved"; \
exit 1; }
echo "Test 29 ok";
# -target_matches option
{ $VCDIFF encode -dictionary $DICTIONARY_FILE \
-target_matches \
-stats \
< $TARGET_FILE \
> $DELTA_FILE; } \
|| { echo "Encode with -target_matches option failed"; \
exit 1; }
# The decode operation ignores the -target_matches option.
{ $VCDIFF decode -dictionary $DICTIONARY_FILE \
< $DELTA_FILE \
> $OUTPUT_TARGET_FILE; } \
|| { echo "Decode output failed with -target_matches"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original with -target_matches"; \
exit 1; }
echo "Test 30 ok";
rm $DELTA_FILE
rm $OUTPUT_TARGET_FILE
$VCDIFF $VCD_OPTIONS \
dencode -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
&& { echo "vdiff with unrecognized action should fail, but succeeded"; \
exit 1; }
echo "Test 31 ok";
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
&& { echo "vdiff test without delta option should fail, but succeeded"; \
exit 1; }
echo "Test 32 ok";
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-delta $DELTA_FILE \
&& { echo "vdiff test without target option should fail, but succeeded"; \
exit 1; }
echo "Test 33 ok";
# open-vcdiff bug 8 (https://github.com/google/open-vcdiff/issues/8)
# A malicious encoding that tries to produce a 4GB target file made up of 64
# windows, each window having a size of 64MB.
# Limit memory usage to 256MB per process, so the test doesn't take forever
# to run out of memory.
OLD_ULIMIT=$(ulimit -v)
echo "Old ulimit: $OLD_ULIMIT"
ulimit -S -v 262144
echo "New ulimit: $(ulimit -v)"
$VCDIFF $VCD_OPTIONS \
decode -dictionary $DICTIONARY_FILE \
-delta $MALICIOUS_ENCODING \
-target /dev/null \
-max_target_file_size=65536 \
&& { echo "Decoding malicious file should fail, but succeeded"; \
exit 1; }
echo "Test 34 ok";
$VCDIFF $VCD_OPTIONS \
decode -dictionary $DICTIONARY_FILE \
-delta $MALICIOUS_ENCODING \
-target /dev/null \
-max_target_window_size=65536 \
&& { echo "Decoding malicious file should fail, but succeeded"; \
exit 1; }
echo "Test 35 ok";
ulimit -S -v $OLD_ULIMIT
# Decoding a small target with the -max_target_file_size option should succeed.
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-max_target_file_size=65536 \
|| { echo "vcdiff test with -max_target_file_size failed"; \
exit 1; }
echo "Test 36 ok";
# Decoding a small target with -max_target_window_size option should succeed.
$VCDIFF $VCD_OPTIONS \
test -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-max_target_window_size=65536 \
|| { echo "vcdiff test with -max_target_window_size failed"; \
exit 1; }
echo "Test 37 ok";
rm $DELTA_FILE
# Test using -allow_vcd_target=false
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-allow_vcd_target=false \
|| { echo "Encode with -allow_vcd_target=false failed"; \
exit 1; }
$VCDIFF $VCD_OPTIONS \
decode -dictionary $DICTIONARY_FILE \
-delta $DELTA_FILE \
-target $OUTPUT_TARGET_FILE \
-allow_vcd_target=false \
|| { echo "Decode with -allow_vcd_target=false failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original"; \
exit 1; }
echo "Test 38 ok";
rm $DELTA_FILE
rm $OUTPUT_TARGET_FILE
# Test using -allow_vcd_target=true
$VCDIFF $VCD_OPTIONS \
encode -dictionary $DICTIONARY_FILE \
-target $TARGET_FILE \
-delta $DELTA_FILE \
-allow_vcd_target=true \
|| { echo "Encode with -allow_vcd_target=true failed"; \
exit 1; }
$VCDIFF $VCD_OPTIONS \
decode -dictionary $DICTIONARY_FILE \
-delta $DELTA_FILE \
-target $OUTPUT_TARGET_FILE \
-allow_vcd_target=true \
|| { echo "Decode with -allow_vcd_target=true failed"; \
exit 1; }
cmp $TARGET_FILE $OUTPUT_TARGET_FILE \
|| { echo "Decoded target does not match original"; \
exit 1; }
echo "Test 39 ok";
echo "PASS"
| elly/open-vcdiff | src/vcdiff_test.sh | Shell | apache-2.0 | 17,234 |
#!/bin/bash
#sleep 10 seconds
sleep 5
hdfs dfsadmin -safemode leave
#create directories
hadoop fs -mkdir /tmp
hadoop fs -mkdir /user/hive
hadoop fs -mkdir /user/hive/warehouse
hadoop fs -chmod g+w /tmp
hadoop fs -chmod g+w /user/hive/warehouse
#prepare mongod configure
mongod -f /etc/mongod.conf
| bhlx3lyx7/griffin-base-env | griffin/pre-start.sh | Shell | apache-2.0 | 301 |
#!/bin/bash
#
# Dump svn info to {tgt}
gwt_dir=~/src/google-web-toolkit
trunk_dir=${gwt_dir}/trunk
tgt=upstream.txt
if [ -d ${trunk_dir} ]
then
if svn info ${trunk_dir} | egrep -v '(^Path|UUID|Node|Schedule|Author|URL)' | egrep '[A-Z][a-z]*' > ${tgt}
then
cat -n ${tgt}
wc -l ${tgt}
exit 0
else
echo "Error in 'svn info' in '$(pwd)'"
exit 1
fi
else
echo "Error, directory not found '${trunk_dir}'"
exit 1
fi
| syntelos/gwtcc | upstream.sh | Shell | apache-2.0 | 477 |
#!/bin/sh
set -ex
# For some unknown reason libz is not found in the android docker image, so we
# use this workaround
case $TARGET in
arm-linux-androideabi | armv7-linux-androideabi )
export DEP_Z_ROOT=/android-ndk/arm/sysroot/usr/;;
aarch64-linux-android )
export DEP_Z_ROOT=/android-ndk/arm64/sysroot/usr/;;
i686-linux-android )
export DEP_Z_ROOT=/android-ndk/x86/sysroot/usr/;;
esac
upper_target=$(echo $TARGET | tr '[a-z]' '[A-Z]' | tr '-' '_')
export PATH=/travis-rust/bin:$PATH
export LD_LIBRARY_PATH=/travis-rust/lib:$LD_LIBRARY_PATH
# ==============================================================================
# First up, let's compile OpenSSL
#
# The artifacts that we distribute must all statically be linked to OpenSSL
# because we have no idea what system we're going to be running on eventually.
# The target system may or may not have OpenSSL installed and it also may have
# any one of a number of ABI-incompatible OpenSSL versions installed.
#
# To get around all this we just compile it statically for the rustup *we*
# distribute (this can be changed by others of course).
# ==============================================================================
OPENSSL_VERS=1.0.2k
OPENSSL_SHA256=6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0
case $TARGET in
x86_64-*-linux-*)
OPENSSL_OS=linux-x86_64
OPENSSL_CC=gcc
OPENSSL_AR=ar
;;
i686-*-linux-*)
OPENSSL_OS=linux-elf
OPENSSL_CC=gcc
OPENSSL_AR=ar
OPENSSL_SETARCH='setarch i386'
OPENSSL_CFLAGS=-m32
;;
arm-linux-androideabi)
OPENSSL_OS=android
OPENSSL_CC=arm-linux-androideabi-gcc
OPENSSL_AR=arm-linux-androideabi-ar
;;
armv7-linux-androideabi)
OPENSSL_OS=android-armv7
OPENSSL_CC=arm-linux-androideabi-gcc
OPENSSL_AR=arm-linux-androideabi-ar
;;
aarch64-linux-android)
OPENSSL_OS=linux-generic64
OPENSSL_CC=aarch64-linux-android-gcc
OPENSSL_AR=aarch64-linux-android-ar
OPENSSL_CFLAGS="-mandroid -fomit-frame-pointer"
;;
i686-linux-android)
OPENSSL_OS=android-x86
OPENSSL_CC=i686-linux-android-gcc
OPENSSL_AR=i686-linux-android-ar
;;
arm-*-linux-gnueabi)
OPENSSL_OS=linux-armv4
OPENSSL_CC=arm-linux-gnueabi-gcc
OPENSSL_AR=arm-linux-gnueabi-ar
;;
arm-*-linux-gnueabihf)
OPENSSL_OS=linux-armv4
OPENSSL_CC=arm-linux-gnueabihf-gcc
OPENSSL_AR=arm-linux-gnueabihf-ar
;;
armv7-*-linux-gnueabihf)
OPENSSL_OS=linux-armv4
OPENSSL_CC=armv7-linux-gnueabihf-gcc
OPENSSL_AR=armv7-linux-gnueabihf-ar
;;
aarch64-*-linux-gnu)
OPENSSL_OS=linux-aarch64
OPENSSL_CC=aarch64-linux-gnu-gcc
OPENSSL_AR=aarch64-linux-gnu-ar
;;
x86_64-*-freebsd)
OPENSSL_OS=BSD-x86_64
OPENSSL_CC=x86_64-unknown-freebsd10-gcc
OPENSSL_AR=x86_64-unknown-freebsd10-ar
;;
x86_64-*-netbsd)
OPENSSL_OS=BSD-x86_64
OPENSSL_CC=x86_64-unknown-netbsd-gcc
OPENSSL_AR=x86_64-unknown-netbsd-ar
;;
powerpc-*-linux-*)
OPENSSL_OS=linux-ppc
OPENSSL_CC=powerpc-linux-gnu-gcc
OPENSSL_AR=powerpc-linux-gnu-ar
;;
powerpc64-*-linux-*)
OPENSSL_OS=linux-ppc64
OPENSSL_CC=powerpc64-linux-gnu-gcc-5
OPENSSL_AR=powerpc64-linux-gnu-ar
OPENSSL_CFLAGS=-m64
;;
powerpc64le-*-linux-*)
OPENSSL_OS=linux-ppc64le
OPENSSL_CC=powerpc64le-linux-gnu-gcc
OPENSSL_AR=powerpc64le-linux-gnu-ar
;;
mips-*-linux-*)
OPENSSL_OS=linux-mips32
OPENSSL_CC=mips-linux-gnu-gcc
OPENSSL_AR=mips-linux-gnu-ar
;;
mipsel-*-linux-*)
OPENSSL_OS=linux-mips32
OPENSSL_CC=mipsel-linux-gnu-gcc
OPENSSL_AR=mipsel-linux-gnu-ar
;;
mips64-*-linux-*)
OPENSSL_OS=linux64-mips64
OPENSSL_CC=mips64-linux-gnuabi64-gcc
OPENSSL_AR=mips64-linux-gnuabi64-ar
;;
mips64el-*-linux-*)
OPENSSL_OS=linux64-mips64
OPENSSL_CC=mips64el-linux-gnuabi64-gcc
OPENSSL_AR=mips64el-linux-gnuabi64-ar
;;
s390x-*-linux-*)
OPENSSL_OS=linux64-s390x
OPENSSL_CC=s390x-linux-gnu-gcc
OPENSSL_AR=s390x-linux-gnu-ar
;;
*)
echo "can't cross compile OpenSSL for $TARGET"
exit 1
;;
esac
mkdir -p target/$TARGET/openssl
install=`pwd`/target/$TARGET/openssl/openssl-install
out=`pwd`/target/$TARGET/openssl/openssl-$OPENSSL_VERS.tar.gz
curl -o $out https://www.openssl.org/source/openssl-$OPENSSL_VERS.tar.gz
sha256sum $out > $out.sha256
test $OPENSSL_SHA256 = `cut -d ' ' -f 1 $out.sha256`
tar xf $out -C target/$TARGET/openssl
(cd target/$TARGET/openssl/openssl-$OPENSSL_VERS && \
CC=$OPENSSL_CC \
AR=$OPENSSL_AR \
$SETARCH ./Configure --prefix=$install no-dso $OPENSSL_OS $OPENSSL_CFLAGS -fPIC && \
make -j4 && \
make install)
# Variables to the openssl-sys crate to link statically against the OpenSSL we
# just compiled above
export OPENSSL_STATIC=1
export OPENSSL_DIR=$install
# ==============================================================================
# Actually delgate to the test script itself
# ==============================================================================
# Our only writable directory is `target`, so place all output there and go
# ahead and throw the home directory in there as well.
export CARGO_TARGET_DIR=`pwd`/target
export CARGO_HOME=`pwd`/target/cargo-home
export CARGO_TARGET_${upper_target}_LINKER=$OPENSSL_CC
exec sh ci/run.sh
| polonez/rustup.rs | ci/run-docker.sh | Shell | apache-2.0 | 5,331 |
#!/bin/sh
set -e
/usr/local/bin/dockerd \
--host=unix:///var/run/docker.sock \
--host=tcp://0.0.0.0:2375 \
--storage-driver=overlay &>/var/log/docker.log &
tries=0
d_timeout=60
until docker info >/dev/null 2>&1
do
if [ "$tries" -gt "$d_timeout" ]; then
cat /var/log/docker.log
echo 'Timed out trying to connect to internal docker host.' >&2
exit 1
fi
tries=$(( $tries + 1 ))
sleep 1
done
eval "$@" | NitorCreations/nitor-deploy-tools | docker-base/dockerd-entrypoint.sh | Shell | apache-2.0 | 435 |
#!/usr/bin/env bash
# TODO Fix https://access.redhat.com/solutions/2333821
find /root -type f -delete
yum reinstall rootfiles -y
cat <<< '
127.0.0.1 localhost.localdomain localhost
127.0.0.1 localhost4.localdomain4 localhost4
::1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
' > /etc/hosts
yum -y remove bison
yum -y remove flex
yum -y remove gcc
yum -y remove gcc-c++
yum -y remove kernel-devel
yum -y remove kernel-headers
yum -y remove cloog-ppl
yum -y remove cpp
yum -y clean all
rm -rf /etc/udev/rules.d/70-persistent-net.rules
mkdir /etc/udev/rules.d/70-persistent-net.rules
rm /lib/udev/rules.d/75-persistent-net-generator.rules
rm -rf /dev/.udev/
for ndev in /etc/sysconfig/network-scripts/ifcfg-*; do
if [ "$(basename "${ndev}")" != "ifcfg-lo" ]; then
sed -i '/^HWADDR/d' "$ndev";
sed -i '/^UUID/d' "$ndev";
fi
done
rm -rf /etc/ssh/*_host_*
rm -rf /var/run/console/*
rm -rf /var/run/faillock/*
rm -rf /var/run/sepermit/*
if [ -d /var/log/account ]; then
rm -f /var/log/account/pacct*
touch /var/log/account/pacct
fi
rm -rf /var/spool/abrt/*
if [ -d /etc/machine-id ]; then
rm -f /etc/machine-id
touch /etc/machine-id
fi
if [ -d /var/lib/dbus/machine-id ]; then
rm -f /var/lib/dbus/machine-id
touch /var/lib/dbus/machine-id
fi
rm -rf /var/spool/mail/*
rm -rf /var/mail/*
find /var/log -type f -exec truncate -s 0 {} \;
find /tmp -type f -delete
find /var/tmp -type f -delete
find /var/cache/yum -type f -delete
rm -f /var/lib/dhclient/*
> /etc/resolv.conf
rm -f /var/lib/yum/uuid
rm -f /var/lib/rpm/__db*
rpm --rebuilddb
rm -rf /var/lib/cloud/sem/* /var/lib/cloud/instance /var/lib/cloud/instances/*
| GauchoConsulting/machine-image-catalog | scripts/cleanup.sh | Shell | apache-2.0 | 1,675 |
#!/bin/bash -ex
source /img_build/buildconfig
curl -s https://packagecloud.io/install/repositories/datawire/${DATAWIRE_REPOSITORY}/script.deb.sh | bash
apt-get install -y datawire-proton datawire-directory datawire-sherlock datawire-watson python-pip
yes | pip install docopt flask
| datawire/bakerstreet-docker-template | image/install.sh | Shell | apache-2.0 | 284 |
cd /home/vhuang/src/gold/binutils-2.21
armconf
cp /home/vhuang/src/gold/binutils-2.21/gold/ld-new /opt/medial/local/bin/ld
cd ~/src
cd git-1.7.1
armconf
echo "^c to break, anykey to build svn..."
read readline
cd subversion-1.6.12dfsg
make distclean
./go.sh
cd ..
cd ~/src/gold/binutils-2.21/
make install
cd ~/src/bzlib/bzip2-1.0.5
armconf
| bayvictor/distributed-polling-system | bin/insbox_max_init_sbox_src.sh | Shell | apache-2.0 | 352 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -evx
cd contrib/clojure-package
lein test
| navrasio/mxnet | contrib/clojure-package/vi ci-test.sh | Shell | apache-2.0 | 847 |
#/bin/bash
###################################
###################################
###################################
#PLZ RUN THE AUTO SCRIPT AS ROOT
###################################
#TO DO
#1.log file integreation
###################################
help_message () {
echo "Try '$0 --help' for more information."
exit 1
}
status_apache () {
testrun "$XAMPP_ROOT/logs/httpd.pid" httpd
if [ $? -eq 1 ] ; then
return 1
else
return 0
fi
}
status_mysql () {
testrun "$XAMPP_ROOT/var/mysql/$(hostname).pid" mysqld
if [ $? -eq 1 ] ; then
return 1
else
return 0
fi
}
testrun () {
if test -f $1 ; then
pid=`cat $1`
if ps ax 2>/dev/null | egrep "^ *$pid.*$2" > /dev/null; then
return 1
else
return 0
fi
else
return 0
fi
}
check_perlmodule() {
perl -e "use $1" 2>/dev/null
if [ $? -ne 0 ] ; then
return 0
fi
return 1
}
#checking the number of argument
if test $# -ne 1; then
help_message
fi
#checking the provided argument
if test "$1" = "--help" ; then
echo "Usage : $0 devbox\n or : $0 prodbox"
exit 1
fi
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
BOX_ENV=$1
XAMPP_ROOT=""
#checking the provided argumant || can remove the above if
case $BOX_ENV in
"devbox")
XAMPP_ROOT="/opt/lampp"
;;
"prodbox")
;;
*) help_message ;;
esac
###############################
#test for service status
###############################
#check the status of apache
status_apache
returnVal=$?
if [ $returnVal -eq 1 ] ; then
printf "Status : apache is up\n"
else
printf "Status : apache server is not up\n"
exit 1
fi
#check the status of mysql
status_mysql
returnVal=$?
if [ $returnVal -eq 1 ] ; then
printf "Status : mysql service is up\n"
else
printf "Status : mysql service is not up\n"
exit 1
fi
echo "Status : Process Id of the script : $$"
###############################
#pre-requsite module for perl
###############################
PERL_MODULE="DBI DBD::mysql Smart::Comments"
#check the status of perl module
for modName in $PERL_MODULE ; do
check_perlmodule $modName
returnVal=$?
if [ $returnVal -ne 1 ] ; then
echo "Error msg : perl module \`$modName\` is not installed ";
exit 1
else
echo "Status : perl module \`$modName\` is installed ";
fi
done
###############################
#table and proc fucntion creation
###############################
#running the db setup
echo "Status : creating the table"
perl /opt/lampp/htdocs/PostAScholarship/automation/perl/script/dbAutomation.pl >/dev/null
if [ $? -ne 0 ] ; then
echo "Error msg : Error occured for the \`dbAutomation.pl\` script"
fi
# ###############################
# #paring the sql file for comments
# ###############################
sqlFileName="/opt/lampp/htdocs/PostAScholarship/mysql/plsql/admin/StoreProc.sql"
tempSqlFileName="/tmp/$$.sql"
# echo "$tempSqlFileName"
if [ ! -e $sqlFileName ] ; then
echo "Error msg : file $sqlFileName not present"
exit 1
fi #<-- this bug wasted my 1 hour
while IFS= read -r line ; do
echo "$line"
echo "$line" | grep -v "^--" >> $tempSqlFileName
done <"$sqlFileName"
if [ ! -e $tempSqlFileName ] ; then
echo "Error : file $tempSqlFileName not present"
exit 1
fi
echo "Status : $tempSqlFileName has been created"
/opt/lampp/bin/mysql --user=root --password="" --database="postascholarship_db" < $tempSqlFileName > /dev/null
if [ $? -ne 0 ] ; then
echo "Error msg : Error occured for the \`$tempSqlFileName\` script"
else
echo "Status : creating the procedure and function from $tempSqlFileName"
fi
if false ; then
tagSqlFileName="/opt/lampp/htdocs/PostAScholarship/mysql/Tag.sql"
echo "Status : inserting tag into db"
if [ ! -e $tagSqlFileName ] ; then
echo "Error : file cant be found \`$tagSqlFileName\`"
exit 1
fi
/opt/lampp/bin/mysql --user=root --password="" --database="postascholarship_db" < $tagSqlFileName
if [ $? -ne 0 ] ; then
echo "Error msg : Error occured for the \`$tagSqlFileName\` script"
else
echo "Status : inserted the tag from $tagSqlFileName"
fi
else
tagSqlFileName="/opt/lampp/htdocs/PostAScholarship/mysql/plsql/admin/insertCallTag.sql"
echo "Status : inserting tag into db"
if [ ! -e $tagSqlFileName ] ; then
echo "Error : file cant be found \`$tagSqlFileName\`"
exit 1
fi
/opt/lampp/bin/mysql --user=root --password="" --database="postascholarship_db" < $tagSqlFileName
if [ $? -ne 0 ] ; then
echo "Error msg : Error occured for the \`$tagSqlFileName\` script"
else
echo "Status : inserted the tag from $tagSqlFileName"
fi
fi
badgeSqlFileName="/opt/lampp/htdocs/PostAScholarship/mysql/plsql/admin/insertCallBadge.sql"
echo "Status : inserting tag into db"
if [ ! -e $badgeSqlFileName ] ; then
echo "Error : file cant be found \`$badgeSqlFileName\`"
exit 1
fi
/opt/lampp/bin/mysql --user=root --password="" --database="postascholarship_db" < $badgeSqlFileName
if [ $? -ne 0 ] ; then
echo "Error msg : Error occured for the \`$badgeSqlFileName\` script"
else
echo "Status : inserted the tag from $badgeSqlFileName"
fi | abuzarhamza/PostAScholarship | automation/autorun.sh | Shell | artistic-2.0 | 5,282 |
#!/bin/bash
#
# Debugs a mocha test suite.
#
# change directory to that containing this script
cd "$(dirname "$0")"
# import configuration and library
source config.sh
source lib.sh
# testsuite to run
testsuite="$1"
# delayed calls
delay=0.8 #seconds
# start node-inspector if not already running
if [ ! $(isActiveProcess "node-inspector") ]
then
$debugInterface &
fi
# run all test suites or a particular one
sleep $delay
if [ $testsuite ]
then
# overwrite default mocha options with the given options
if [ "$2" ]
then mochaOptions="$2"
fi
# complement default mocha options with given additional options
if [ "$3" ]
then mochaOptions="$mochaOptions $3"
fi
$mocha $mochaOptions --debug-brk "../test/"$suite".js" &
else
$mocha $mochaOptions --debug-brk ${testSuites[@]} &
fi
# start front end
sleep $delay
$debugFrontEnd $debugUrl
# wait until chromium exits before sending SIGTERM to node-inspect,
# mocha, and mocha child processes
pids=$(ps -ef \
| grep -v grep \
| grep -v /scripts/debug.sh \
| grep "node-inspect\|mocha" \
| awk '{ print $2 }')
echo -n "Sending SIGTERM to the following processes: "
echo $pids
kill $pids
echo "Finished."
| mpecherstorfer/pat | scripts/debug.sh | Shell | bsd-2-clause | 1,167 |
#!/bin/sh
# Copyright (c) 2009, Martijn P. Rijkeboer <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
SERVER='http://localhost:3000'
SERVERNAME="ns1"
PASSWORD="test1234"
BINDADMIN_DIR="/var/spool/bindadmin"
TARGET_ZONES_DIR="${BINDADMIN_DIR}/zones"
TARGET_CONFIG="${BINDADMIN_DIR}/zones.conf"
TEMP_ZONES_DIR="${BINDADMIN_DIR}/zones-temp"
TEMP_CONFIG="${BINDADMIN_DIR}/zones.conf-temp"
CONFIG_SCRIPT_NAME="config.sh"
LOCK_FILE="${BINDADMIN_DIR}/lock"
check_cmd() {
if ! which $1 >/dev/null; then
echo "Can't find command: $1"
exit 1
fi
}
check_cmd basename
check_cmd cat
check_cmd chmod
check_cmd chown
check_cmd id
check_cmd mkdir
check_cmd mv
check_cmd named-checkzone
check_cmd rm
check_cmd rndc
check_cmd touch
check_cmd wget
PROG=`basename $0`
# Check if root
if [ "`id -u`" != "0" ]; then
echo "${PROG}: must be run as root"
exit 1
fi
# Check if another instance is still running
if [ -f ${LOCK_FILE} ]; then
echo "${PROG}: an other instance is still running"
exit 1
else
touch ${LOCK_FILE}
fi
# Check if BINDADMIN_DIR exists
if [ ! -d ${BINDADMIN_DIR} ]; then
mkdir -p ${BINDADMIN_DIR}
fi
# Check if TARGET_ZONES_DIR exists
if [ ! -d ${TARGET_ZONES_DIR} ]; then
mkdir -p ${TARGET_ZONES_DIR}
fi
# Check if TEMP_ZONES_DIR exists
if [ ! -d ${TEMP_ZONES_DIR} ]; then
mkdir -p ${TEMP_ZONES_DIR}
fi
# Set rights
chown root ${BINDADMIN_DIR}
chmod 0700 ${BINDADMIN_DIR}
# Change directory
cd ${BINDADMIN_DIR}
# Check if timestamp file exists
if [ ! -f timestamp ]; then
echo "0" > timestamp
fi
# Get last timestamp
LAST_TIMESTAMP=`cat timestamp`
# Get new configuration
wget -q -O ${CONFIG_SCRIPT_NAME} ${SERVER}/bind/configs/${SERVERNAME}/${PASSWORD}/${LAST_TIMESTAMP}
# Execute new configuration
sh ${CONFIG_SCRIPT_NAME}
# Check zone files and move them into place
cd ${TEMP_ZONES_DIR}
for file in `ls`; do
named-checkzone -q ${file} ${file}
if [ "$?" = "0" ]; then
mv ${file} ${TARGET_ZONES_DIR}
fi
done
# Move zones.conf into place
cd ${BINDADMIN_DIR}
mv ${TEMP_CONFIG} ${TARGET_CONFIG}
# Reload bind config
rndc reload
# Clean up
rm -f ${CONFIG_SCRIPT_NAME}
rm -f ${LOCK_FILE}
exit 0
| mrijkeboer/bindadmin | client/client.sh | Shell | bsd-2-clause | 3,411 |
#!/bin/bash
virtualenv -p /usr/bin/python2.7 leonardo_venv
cd leonardo_venv
. $PWD/bin/activate
pip install -e git+https://github.com/django-leonardo/django-leonardo@master#egg=django-leonardo
pip install -r $PWD/src/django-leonardo/requirements.txt
pip install -e git+https://github.com/leonardo-modules/leonardo-module-blog#egg=leonardo_module_blog
pip install -r $PWD/src/leonardo_store/requirements.txt
django-admin startproject --template=https://github.com/django-leonardo/site-template/archive/master.zip myproject
export PYTHONPATH=$PWD/myproject
cd myproject
python manage.py makemigrations --noinput
python manage.py migrate --noinput
python manage.py bootstrap_site --url=http://github.com/django-leonardo/django-leonardo/raw/master/contrib/bootstrap/blog.yaml
echo "from django.contrib.auth.models import User; User.objects.create_superuser('root', '[email protected]', 'admin')" | python manage.py shell
python manage.py runserver 0.0.0.0:80
| django-leonardo/django-leonardo | contrib/scripts/install_blog.sh | Shell | bsd-3-clause | 962 |
#!/usr/bin/env bash
set -euo pipefail
set -x
nightlyTag="${IMAGE_TAG}.nightly"
nightlyBuildTag="${IMAGE_TAG}-build.nightly"
date=$(date -u '+%Y-%m-%d-%H.%M.%S')
dateTag="${PRIVATE_IMAGE_TAG}.${date}"
dateBuildTag="${PRIVATE_IMAGE_TAG}-build.${date}"
bin/build.sh "${STACK}" "${nightlyTag}" "${nightlyBuildTag}"
# Disable tracing temporarily to prevent logging DOCKER_HUB_PASSWORD.
(set +x; echo "${DOCKER_HUB_PASSWORD}" | docker login -u "${DOCKER_HUB_USERNAME}" --password-stdin)
docker push "${nightlyTag}"
docker tag "${nightlyTag}" "${dateTag}"
docker push "${dateTag}"
docker push "${nightlyBuildTag}"
docker tag "${nightlyBuildTag}" "${dateBuildTag}"
docker push "${dateBuildTag}"
if [[ -v CIRCLE_TAG ]]; then
releaseTag="${IMAGE_TAG}.${CIRCLE_TAG}"
releaseBuildTag="${IMAGE_TAG}-build.${CIRCLE_TAG}"
latestTag="${IMAGE_TAG}"
latestBuildTag="${IMAGE_TAG}-build"
docker tag "${nightlyTag}" "${releaseTag}"
docker tag "${nightlyTag}" "${latestTag}"
docker push "${releaseTag}"
docker push "${latestTag}"
docker tag "${nightlyBuildTag}" "${releaseBuildTag}"
docker tag "${nightlyBuildTag}" "${latestBuildTag}"
docker push "${releaseBuildTag}"
docker push "${latestBuildTag}"
fi
| heroku/stack-images | bin/publish-to-dockerhub.sh | Shell | bsd-3-clause | 1,218 |
#!/usr/bin/env bash
set -e
PROJECT_DIR=$(dirname $0)/../../..
source $PROJECT_DIR/scripts/cf-utils.sh
STACK_NAME=$StackPrefix-my-api
update-stack $1 --template-url $AWSMusingsS3URL/api-gateway-developer-guide/api-gateway-create-api-step-by-step/my-api.yml
| myshkin5/aws-musings | api-gateway-developer-guide/api-gateway-create-api-step-by-step/scripts/my-api.sh | Shell | bsd-3-clause | 261 |
#!/usr/bin/env bash
./unigraph.py "$@"
| skhal/algorithms_old | ch4/python/ch4_ex4.1.7.sh | Shell | mit | 40 |
export PROJECT=cd-example
export META_SYNC_BUCKET=cd-example-meta-sync
export REGION=us-east-1
export REGION_VAR_NAME=useast1
export TRAVIS_TEST_RESULT=0
export TRAVIS_PULL_REQUEST="false"
export TRAVIS_BRANCH=$(git symbolic-ref --short -q HEAD)
echo "Region: $REGION or $REGION_VAR_NAME"
echo "Branch: $TRAVIS_BRANCH"
echo "Is Pull Request: $TRAVIS_PULL_REQUEST"
| serverless-examples/serverless-cd-example | v0.5/setup-deploy-envvars.sh | Shell | mit | 367 |
#!/bin/bash
##########################################################################
# This script starts the installation and configuration process of the
# specified application and finally it shows a log file which contains
# reported installation steps and posible errors.
# @author César Rodríguez González
# @version 1.3, 2016-10-08
# @license MIT
##########################################################################
# Basic Variables
scriptRootFolder="`pwd`/.."; username="`whoami`"; homeFolder="$HOME"
# Import common variables and functions
. $scriptRootFolder/common/commonFunctions.sh
declare -a appsToInstall=( "VNC_server" )
# Lauch menu and install selected applications
prepareScript "$0"
installAndSetupApplications appsToInstall[@] | cesar-rgon/desktop-app-installer | app-scripts/vnc-server.sh | Shell | mit | 763 |
#!/bin/sh
### BEGIN INIT INFO
# Provides: ledplay
# Required-Start: $local_fs
# Required-Stop: $local_fs
# Default-Start:
# Default-Stop:
# Short-Description: Enables/Disables each LED once at boot
### END INIT INFO
. @LIBEXEC@/ledctrl
led_bootdown
exit 0
| rdm-dev/meta-jens | recipes-rdm/ledctrl/ledctrl/ledbootdown.sh | Shell | mit | 282 |
#!/bin/bash
# Copyright (c) 2015-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# ******************************************************************************
# This is an end-to-end test intended to run on CI.
# You can also run it locally but it's slow.
# ******************************************************************************
# Start in tasks/ even if run from root directory
cd "$(dirname "$0")"
# CLI and app temporary locations
# http://unix.stackexchange.com/a/84980
temp_cli_path=`mktemp -d 2>/dev/null || mktemp -d -t 'temp_cli_path'`
temp_app_path=`mktemp -d 2>/dev/null || mktemp -d -t 'temp_app_path'`
function cleanup {
echo 'Cleaning up.'
cd "$root_path"
# Uncomment when snapshot testing is enabled by default:
# rm ./packages/react-scripts/template/src/__snapshots__/App.test.js.snap
rm -rf "$temp_cli_path" $temp_app_path
}
# Error messages are redirected to stderr
function handle_error {
echo "$(basename $0): ERROR! An error was encountered executing line $1." 1>&2;
cleanup
echo 'Exiting with error.' 1>&2;
exit 1
}
function handle_exit {
cleanup
echo 'Exiting without error.' 1>&2;
exit
}
function create_react_app {
node "$temp_cli_path"/node_modules/create-react-app/index.js "$@"
}
function install_package {
local pkg=$(basename $1)
# Clean target (for safety)
rm -rf node_modules/$pkg/
rm -rf node_modules/**/$pkg/
# Copy package into node_modules/ ignoring installed deps
# rsync -a ${1%/} node_modules/ --exclude node_modules
cp -R ${1%/} node_modules/
rm -rf node_modules/$pkg/node_modules/
# Install `dependencies`
cd node_modules/$pkg/
if [ "$USE_YARN" = "yes" ]
then
yarn install --production
else
npm install --only=production
fi
# Remove our packages to ensure side-by-side versions are used (which we link)
rm -rf node_modules/{babel-preset-react-app,eslint-config-react-app,react-dev-utils,react-error-overlay,react-scripts}
cd ../..
}
# Check for the existence of one or more files.
function exists {
for f in $*; do
test -e "$f"
done
}
# Exit the script with a helpful error message when any error is encountered
trap 'set +x; handle_error $LINENO $BASH_COMMAND' ERR
# Cleanup before exit on any termination signal
trap 'set +x; handle_exit' SIGQUIT SIGTERM SIGINT SIGKILL SIGHUP
# Echo every command being executed
set -x
# Go to root
cd ..
root_path=$PWD
# Clear cache to avoid issues with incorrect packages being used
if hash yarnpkg 2>/dev/null
then
# AppVeyor uses an old version of yarn.
# Once updated to 0.24.3 or above, the workaround can be removed
# and replaced with `yarnpkg cache clean`
# Issues:
# https://github.com/yarnpkg/yarn/issues/2591
# https://github.com/appveyor/ci/issues/1576
# https://github.com/facebookincubator/create-react-app/pull/2400
# When removing workaround, you may run into
# https://github.com/facebookincubator/create-react-app/issues/2030
case "$(uname -s)" in
*CYGWIN*|MSYS*|MINGW*) yarn=yarn.cmd;;
*) yarn=yarnpkg;;
esac
$yarn cache clean
fi
if hash npm 2>/dev/null
then
# npm 5 is too buggy right now
if [ $(npm -v | head -c 1) -eq 5 ]; then
npm i -g npm@^4.x
fi;
npm cache clean || npm cache verify
fi
# Prevent bootstrap, we only want top-level dependencies
cp package.json package.json.bak
grep -v "postinstall" package.json > temp && mv temp package.json
npm install
mv package.json.bak package.json
# We need to install create-react-app deps to test it
cd "$root_path"/packages/create-react-app
npm install
cd "$root_path"
# If the node version is < 6, the script should just give an error.
nodeVersion=`node --version | cut -d v -f2`
nodeMajor=`echo $nodeVersion | cut -d. -f1`
nodeMinor=`echo $nodeVersion | cut -d. -f2`
if [[ nodeMajor -lt 6 ]]
then
cd $temp_app_path
err_output=`node "$root_path"/packages/create-react-app/index.js test-node-version 2>&1 > /dev/null || echo ''`
[[ $err_output =~ You\ are\ running\ Node ]] && exit 0 || exit 1
fi
if [ "$USE_YARN" = "yes" ]
then
# Install Yarn so that the test can use it to install packages.
npm install -g yarn
yarn cache clean
fi
# We removed the postinstall, so do it manually here
node bootstrap.js
# Lint own code
./node_modules/.bin/eslint --max-warnings 0 packages/babel-preset-react-app/
./node_modules/.bin/eslint --max-warnings 0 packages/create-react-app/
./node_modules/.bin/eslint --max-warnings 0 packages/eslint-config-react-app/
./node_modules/.bin/eslint --max-warnings 0 packages/react-dev-utils/
./node_modules/.bin/eslint --max-warnings 0 packages/react-scripts/
cd packages/react-error-overlay/
./node_modules/.bin/eslint --max-warnings 0 src/
npm test
npm run build:prod
cd ../..
# ******************************************************************************
# First, test the create-react-app development environment.
# This does not affect our users but makes sure we can develop it.
# ******************************************************************************
# Test local build command
npm run build
# Check for expected output
exists build/*.html
exists build/static/js/*.js
exists build/static/css/*.css
exists build/static/media/*.svg
exists build/favicon.ico
# Run tests with CI flag
CI=true npm test
# Uncomment when snapshot testing is enabled by default:
# exists template/src/__snapshots__/App.test.js.snap
# Test local start command
npm start -- --smoke-test
# ******************************************************************************
# Next, pack react-scripts and create-react-app so we can verify they work.
# ******************************************************************************
# Pack CLI
cd "$root_path"/packages/create-react-app
cli_path=$PWD/`npm pack`
# Go to react-scripts
cd "$root_path"/packages/react-scripts
# Save package.json because we're going to touch it
cp package.json package.json.orig
# Replace own dependencies (those in the `packages` dir) with the local paths
# of those packages.
node "$root_path"/tasks/replace-own-deps.js
# Finally, pack react-scripts
scripts_path="$root_path"/packages/react-scripts/`npm pack`
# Restore package.json
rm package.json
mv package.json.orig package.json
# ******************************************************************************
# Now that we have packed them, create a clean app folder and install them.
# ******************************************************************************
# Install the CLI in a temporary location
cd "$temp_cli_path"
# Initialize package.json before installing the CLI because npm will not install
# the CLI properly in the temporary location if it is missing.
npm init --yes
# Now we can install the CLI from the local package.
npm install "$cli_path"
# Install the app in a temporary location
cd $temp_app_path
create_react_app --scripts-version="$scripts_path" test-app
# ******************************************************************************
# Now that we used create-react-app to create an app depending on react-scripts,
# let's make sure all npm scripts are in the working state.
# ******************************************************************************
function verify_env_url {
# Backup package.json because we're going to make it dirty
cp package.json package.json.orig
# Test default behavior
grep -F -R --exclude=*.map "\"/static/" build/ -q; test $? -eq 0 || exit 1
# Test relative path build
awk -v n=2 -v s=" \"homepage\": \".\"," 'NR == n {print s} {print}' package.json > tmp && mv tmp package.json
npm run build
# Disabled until this can be tested
# grep -F -R --exclude=*.map "../../static/" build/ -q; test $? -eq 0 || exit 1
grep -F -R --exclude=*.map "\"./static/" build/ -q; test $? -eq 0 || exit 1
grep -F -R --exclude=*.map "\"/static/" build/ -q; test $? -eq 1 || exit 1
PUBLIC_URL="/anabsolute" npm run build
grep -F -R --exclude=*.map "/anabsolute/static/" build/ -q; test $? -eq 0 || exit 1
grep -F -R --exclude=*.map "\"/static/" build/ -q; test $? -eq 1 || exit 1
# Test absolute path build
sed "2s/.*/ \"homepage\": \"\/testingpath\",/" package.json > tmp && mv tmp package.json
npm run build
grep -F -R --exclude=*.map "/testingpath/static/" build/ -q; test $? -eq 0 || exit 1
grep -F -R --exclude=*.map "\"/static/" build/ -q; test $? -eq 1 || exit 1
PUBLIC_URL="https://www.example.net/overridetest" npm run build
grep -F -R --exclude=*.map "https://www.example.net/overridetest/static/" build/ -q; test $? -eq 0 || exit 1
grep -F -R --exclude=*.map "\"/static/" build/ -q; test $? -eq 1 || exit 1
grep -F -R --exclude=*.map "testingpath/static" build/ -q; test $? -eq 1 || exit 1
# Test absolute url build
sed "2s/.*/ \"homepage\": \"https:\/\/www.example.net\/testingpath\",/" package.json > tmp && mv tmp package.json
npm run build
grep -F -R --exclude=*.map "/testingpath/static/" build/ -q; test $? -eq 0 || exit 1
grep -F -R --exclude=*.map "\"/static/" build/ -q; test $? -eq 1 || exit 1
PUBLIC_URL="https://www.example.net/overridetest" npm run build
grep -F -R --exclude=*.map "https://www.example.net/overridetest/static/" build/ -q; test $? -eq 0 || exit 1
grep -F -R --exclude=*.map "\"/static/" build/ -q; test $? -eq 1 || exit 1
grep -F -R --exclude=*.map "testingpath/static" build/ -q; test $? -eq 1 || exit 1
# Restore package.json
rm package.json
mv package.json.orig package.json
}
function verify_module_scope {
# Create stub json file
echo "{}" >> sample.json
# Save App.js, we're going to modify it
cp src/App.js src/App.js.bak
# Add an out of scope import
echo "import sampleJson from '../sample'" | cat - src/App.js > src/App.js.temp && mv src/App.js.temp src/App.js
# Make sure the build fails
npm run build; test $? -eq 1 || exit 1
# TODO: check for error message
# Restore App.js
rm src/App.js
mv src/App.js.bak src/App.js
}
# Enter the app directory
cd test-app
# Test the build
npm run build
# Check for expected output
exists build/*.html
exists build/static/js/*.js
exists build/static/css/*.css
exists build/static/media/*.svg
exists build/favicon.ico
# Run tests with CI flag
CI=true npm test
# Uncomment when snapshot testing is enabled by default:
# exists src/__snapshots__/App.test.js.snap
# Test the server
npm start -- --smoke-test
# Test environment handling
verify_env_url
# Test reliance on webpack internals
verify_module_scope
# ******************************************************************************
# Finally, let's check that everything still works after ejecting.
# ******************************************************************************
# Eject...
echo yes | npm run eject
# Ensure Yarn is ran after eject; at the time of this commit, we don't run Yarn
# after ejecting. Soon, we may only skip Yarn on Windows. Let's try to remove
# this in the near future.
if hash yarnpkg 2>/dev/null
then
yarnpkg install --check-files
fi
# ...but still link to the local packages
install_package "$root_path"/packages/babel-preset-react-app
install_package "$root_path"/packages/eslint-config-react-app
install_package "$root_path"/packages/react-dev-utils
# Test the build
npm run build
# Check for expected output
exists build/*.html
exists build/static/js/*.js
exists build/static/css/*.css
exists build/static/media/*.svg
exists build/favicon.ico
# Run tests, overring the watch option to disable it.
# `CI=true npm test` won't work here because `npm test` becomes just `jest`.
# We should either teach Jest to respect CI env variable, or make
# `scripts/test.js` survive ejection (right now it doesn't).
npm test -- --watch=no
# Uncomment when snapshot testing is enabled by default:
# exists src/__snapshots__/App.test.js.snap
# Test the server
npm start -- --smoke-test
# Test environment handling
verify_env_url
# Test reliance on webpack internals
verify_module_scope
# Cleanup
cleanup
| HelpfulHuman/helpful-react-scripts | tasks/e2e-simple.sh | Shell | mit | 12,021 |
#!/bin/bash
[ -f /boot/config/no-run ] && exit
[ -f /boot/config/room.txt ] && cp /boot/config/room.txt /srv/info-beamer/webcamp-2017/schedule/room
nohup /home/webcamp/info-beamer-pi/info-beamer /srv/info-beamer/webcamp-2017 &
/srv/info-beamer/check-git.sh
| goranche/info-beamer | run-beamer.sh | Shell | mit | 257 |
#!/bin/bash
set -e
timer=/usr/bin/time
exe_moon=./moonlight
exe_rose=./rosebud
void=/dev/null
moonlight_data=${exe_moon}.dat
rosebud_data=${exe_rose}.dat
if [ ! -e $moonlight_data ]
then
echo "#N time" >> $moonlight_data
for (( i=100; i <= 15000; i += 100))
do
time_elapsed_moon=$(($timer -f "%e" $exe_moon $i > $void) 2>&1)
echo "$i $time_elapsed_moon" >> $moonlight_data
done
fi
if [ ! -e $rosebud_data ]
then
echo "#N time" >> $rosebud_data
for (( i=100; i <= 15000; i += 100))
do
time_elapsed_rose=$(($timer -f "%e" $exe_rose $i > $void) 2>&1)
echo "$i $time_elapsed_rose" >> $rosebud_data
done
fi
# Plotting the two data files collected, provided they all exist
if [ -e $moonlight_data ] && [ -e $rosebud_data ] && [ -e plot.gp ]
then
gnuplot -p -c plot.gp
fi
echo "done"
| tavaresdong/courses | uw_cse374/hw1/profile.sh | Shell | mit | 858 |
#!
echo "/* Generated file, do not edit */" > all.h
echo "" >> all.h
export count=`ls -1 [0-9][0-9]* | wc -l`
echo "#define IMPULSES $count" >> all.h
echo "" >> all.h
ls -1 [0-9][0-9]* | awk '{print "#include \"impulses/" $0 "\""}' >> all.h
echo "" >> all.h
echo "#ifdef __clang__" >> all.h
echo "static void mk_imps(fftw_real **impulse_freq)" >> all.h
echo "#else" >> all.h
echo "static inline void mk_imps(fftw_real **impulse_freq)" >> all.h
echo "#endif" >> all.h
echo "{" >> all.h
echo " int c = 0;" >> all.h
ls -1 [0-9][0-9]* | sed 's/...//;s/\.h//;s/-/_/g' | awk '{print "\tMK_IMP(" $0 ");"}' >> all.h
echo "};" >> all.h
| swh/ladspa | impulses/mkall-h.sh | Shell | gpl-2.0 | 628 |
#!/bin/sh
#
# Create a key of each type possible
#
TEST=./Tspi_Key_CreateKey04
for TYPE in legacy bind signing;do
for SIZE in 512 1024 2048;do
echo "$TEST -t $TYPE -s $SIZE -m -v -a"
$TEST -t $TYPE -s $SIZE -m -v -a
echo "$TEST -t $TYPE -s $SIZE -m -a"
$TEST -t $TYPE -s $SIZE -m -a
echo "$TEST -t $TYPE -s $SIZE -v -a"
$TEST -t $TYPE -s $SIZE -v -a
echo "$TEST -t $TYPE -s $SIZE -a"
$TEST -t $TYPE -s $SIZE -a
echo "$TEST -t $TYPE -s $SIZE -m -v"
$TEST -t $TYPE -s $SIZE -m -v
echo "$TEST -t $TYPE -s $SIZE -m"
$TEST -t $TYPE -s $SIZE -m
echo "$TEST -t $TYPE -s $SIZE -v"
$TEST -t $TYPE -s $SIZE -v
echo "$TEST -t $TYPE -s $SIZE"
$TEST -t $TYPE -s $SIZE
done
done
# Do storage keys manually, since only size 2048 is valid
echo "$TEST -t storage -s 2048 -m -v -a"
$TEST -t storage -s 2048 -m -v -a
echo "$TEST -t storage -s 2048 -m -a"
$TEST -t storage -s 2048 -m -a
echo "$TEST -t storage -s 2048 -v -a"
$TEST -t storage -s 2048 -v -a
echo "$TEST -t storage -s 2048 -a"
$TEST -t storage -s 2048 -a
echo "$TEST -t storage -s 2048 -m -v"
$TEST -t storage -s 2048 -m -v
echo "$TEST -t storage -s 2048 -m"
$TEST -t storage -s 2048 -m
echo "$TEST -t storage -s 2048 -v"
$TEST -t storage -s 2048 -v
echo "$TEST -t storage -s 2048"
$TEST -t storage -s 2048
| srajiv/testsuite | tcg/highlevel/key/create_key_tests.sh | Shell | gpl-2.0 | 1,288 |
if [ "${KEETOREALUSER}" ]; then
PS1="[\u~"${KEETOREALUSER}"@\h \W]\$ "
else
PS1="[\u@\h \W]\$ "
fi
| flix-/keeto | build/docker/keeto-openssh/config/ps1.sh | Shell | gpl-3.0 | 108 |
#!/bin/sh
font="/usr/share/fonts/truetype/droid/DroidSansMono.ttf"
nrchar=95 # 32 .. 126
echo Read $font
echo Export ASCII into test.png
#
# Compile C-File
#
gcc -std=c11 -owrite write_c_file.c || exit 1
#
# Generate font.png with fontforge
#
fontforge -lang=ff -script export.ff "$font" 2>/dev/null || fontforge -lang=ff -script export.ff "$font" || exit 1
size=`file font.png | cut -d , -f 2 -`
X=`echo $size | cut -d " " -f 1`
Y=`echo $size | cut -d " " -f 3`
charwidth=$(( X/nrchar ))
charheight=40
echo IMAGE-SIZE: $X x $Y
echo CHAR WIDTH: $charwidth
echo CHAR HEIGHT: $charheight
xoff=0
yoff=0
convert -depth 1 -depth 8 font.png font.gray
convert -depth 1 -depth 8 font.png font-sw.png
c_file="font_${charwidth}x${charheight}.c"
./write font.gray $X $Y $xoff $yoff $charwidth $charheight > $c_file
rm ./font.gray ./font.png ./write
| je-so/testcode | stm32f3/util/font/generate_bitmap_font.sh | Shell | gpl-3.0 | 838 |
#!/bin/bash
path=`dirname "${0}"`
link=`readlink "${0}"`
[ -n "${link}" ] && path=`dirname "${link}"`
cd "${path}"
case "${0##*/}" in
*dedicated*) mode="dedicated" ;;
*sdl*) mode="sdl" ;;
*) mode="glx" ;;
esac
case "$(uname -m)" in
x86_64) arch="linux64" ;;
*) arch="linux32" ;;
esac
xonotic="xonotic-${arch}-${mode}"
set -- ./${xonotic} "${@}"
xserver=
xlayout=
setdisplay()
{
VALUE=$1
VALUE=${VALUE#\"}
VALUE=${VALUE%\"}
case "$VALUE" in
:*)
;;
*)
VALUE=:$VALUE
;;
esac
VALUE="$VALUE/"
xserver="${VALUE%%/*}"
xserver=${xserver#:}
xlayout=${VALUE#*/}
xlayout=${xlayout%/}
}
# now how do we execute it?
if [ -r ~/.xonotic/data/config.cfg ]; then
while read -r CMD KEY VALUE; do
case "$CMD:$KEY" in
seta:vid_x11_display)
setdisplay "$VALUE"
;;
esac
done < ~/.xonotic/data/config.cfg
fi
m=0
for X in "$@"; do
case "$m:$X" in
0:+vid_x11_display)
m=1
;;
0:+vid_x11_display\ *)
setdisplay "${X#+vid_x11_display }"
;;
1:*)
setdisplay "$X"
m=0
;;
*)
;;
esac
done
case "$xserver" in
'')
;;
*[!0-9]*)
echo "Not using display ':$xserver': evil characters"
;;
*)
msg=
lf='
'
prefix=
# check for a listening X server on that socket
if netstat -nl | grep -F " /tmp/.X11-unix/X$xserver" >/dev/null; then
# X server already exists
export DISPLAY=:$xserver
prefix="DISPLAY=:$xserver "
msg=$msg$lf"- Running Xonotic on already existing display :$xserver"
else
set -- startx "$@" -fullscreen -- ":$xserver"
msg=$msg$lf"- Running Xonotic on a newly created X server :$xserver."
case "$xlayout" in
'')
;;
*[!A-Za-z0-9]*)
echo >&2 "Not using layout '$xlayout': evil characters"
xlayout=
;;
*)
set -- "$@" -layout "$xlayout"
msg=$msg$lf"- Using the ServerLayout section named $xlayout."
;;
esac
fi
echo "X SERVER OVERRIDES IN EFFECT:$msg"
echo
echo "Resulting command line:"
echo " $prefix$*"
echo
echo "To undo these overrides, edit ~/.xonotic/data/config.cfg and remove the line"
echo "starting with 'seta vid_x11_display'."
echo
echo
;;
esac
# if pulseaudio
if [ -z "$SDL_AUDIODRIVER" ]; then
if ps -C pulseaudio >/dev/null; then
if ldd /usr/lib/libSDL.so 2>/dev/null | grep pulse >/dev/null; then
export SDL_AUDIODRIVER=pulse
fi
fi
fi
exec "$@"
| jmanoel7/my_shell_scripts | bin/xonotic-linux-sdl.sh | Shell | gpl-3.0 | 2,352 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-generalinfo_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::generalinfo_0:1.0 -N ID0000002 -R condorpool -L example_workflow -T 2016-11-07T22:50:01+00:00 ./example_workflow-generalinfo_0-1.0
job_ec=$?
set -e
| elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1A/logs/w-11_1/20161107T225001+0000/00/00/generalinfo_0_ID0000002.sh | Shell | gpl-3.0 | 1,243 |
#!/bin/bash
PACKAGE="php"
xdg-icon-resource uninstall --novendor --size 256 "eclipse"
gtk-update-icon-cache -f -t /usr/share/icons/hicolor
rm -f "/usr/bin/eclipse-$PACKAGE"
rm -f "/usr/share/applications/eclipse-$PACKAGE.desktop"
rm -rf "/opt/eclipse-$PACKAGE"
| folkswithhats/fedy | plugins/eclipsephp.plugin/uninstall.sh | Shell | gpl-3.0 | 264 |
#!/bin/bash
VERSION="0.8"
MEGA_API_URL="https://g.api.mega.co.nz"
MEGA_API_KEY=""
MC_API_URL="http://megacrypter.com/api"
OPENSSL_AES_CTR_DEC="openssl enc -d -aes-128-ctr"
OPENSSL_AES_CBC_DEC="openssl enc -a -A -d -aes-128-cbc"
WATCH_DIR="."
WATCHDOG_SLEEP_SECS=5
DOWNLOAD_STOP_FILE="force_stop.tmp"
DOWNLOAD_FINISHED_FILE="download_finished.tmp"
# 1:download url
function watchdog {
echo -e "Watchdog loaded!\n"
WATCHDOG=true
while [ $WATCHDOG ] && [ "$res" != "$DOWNLOAD_STOP_FILE" ]; do
sleep $WATCHDOG_SLEEP_SECS
res=$(ls "$DOWNLOAD_STOP_FILE" 2>&1)
done
if [ $WATCHDOG ]
then
WATCHDOG=false
pid=$(pgrep -f "$1")
kill -s 9 "$pid"
else
rm "${WATCH_DIR}/${DOWNLOAD_FINISHED_FILE}"
fi
echo -e "Watchdog bye bye!\n"
}
# 1:json_string 2:index
function json_param {
echo -ne "$1" | tr -d '\n' | perl -pe "s/^.*\"$2\" *?\: *?([0-9]+|true|false|\".*?(?<!\\\\)\").*?$/\1/s" | perl -pe "s/^\"(.+)\"$/\1/" | tr -d '\\'
}
# 1:b64_encoded_String
function b64_pad {
b64=$(echo -ne "$1" | tr '\-_' '+/')
pad=$(((4-${#1}%4)%4))
for i in $(seq 1 $pad)
do
b64="${b64}="
done
echo -n "$b64"
}
# 1:string
function md5 {
echo -n "$1" | md5sum | tr -d -c [:alnum:]
}
# 1:pass 2:double_md5 3:salt
function is_valid_pass {
if [ $(md5 "$3$(md5 "$1")$3") != $2 ]
then
echo -n "0"
else
echo -n "1"
fi
}
# 1:hex_raw_key
function hrk2hk {
key[0]=$(( 0x${1:0:16} ^ 0x${1:32:16} ))
key[1]=$(( 0x${1:16:16} ^ 0x${1:48:16} ))
printf "%016x" ${key[*]}
}
echo -e "\nThis is MEGA-DOWN (streamondemand-pureita MOD) $VERSION"
if [ -z $1 ]
then
echo -e "\n$0 <mega_url|mc_url> [output_file] [mc_url_pass]\n\nNote: use '-' for output to STDOUT\n"
else
if [ $(echo -n $1 | grep -E -o 'mega\.co\.nz') ]
then
#MEGA.CO.NZ LINK
file_id=$(echo -n $1 | perl -pe "s/^.*\/#!(.+)!.*$/\1/s")
file_key=$(echo -n $1 | perl -pe "s/^.*\/#!.+!(.*)$/\1/s")
hex_raw_key=$(echo -n $(b64_pad $file_key) | base64 -d -i 2>/dev/null | od -An -t x1 | tr -d '\n ')
mega_req_url="${MEGA_API_URL}/cs?id=$seqno&ak=$MEGA_API_KEY"
mega_req_json="[{\"a\":\"g\", \"p\":\"$file_id\"}]"
mega_res_json=$(curl -s -X POST --data-binary "$mega_req_json" "$mega_req_url")
if [ $(echo -n "$mega_res_json" | grep -E -o '\[\-[0-9]\]') ]
then
error_code=$(echo -n "$mega_res_json" | perl -pe "s/^.*\[(.*?)\].*$/\1/s")
echo -e "\nMEGA ERROR: $error_code\n" 1>&2
exit
else
file_size=$(json_param "$mega_res_json" s)
at=$(json_param "$mega_res_json" at)
hex_key=$(hrk2hk "$hex_raw_key")
at_dec_json=$(echo -n $(b64_pad "$at") | $OPENSSL_AES_CBC_DEC -K $hex_key -iv "00000000000000000000000000000000" -nopad)
if [ $(echo -n "$at_dec_json" | grep -E -o 'MEGA') ]
then
file_name=$(json_param "$(echo -n $(b64_pad "$at") | $OPENSSL_AES_CBC_DEC -K $hex_key -iv "00000000000000000000000000000000" -nopad)" n)
mega_req_json="[{\"a\":\"g\", \"g\":\"1\", \"p\":\"$file_id\"}]"
mega_res_json=$(curl -s -X POST --data-binary "$mega_req_json" "$mega_req_url")
dl_temp_url=$(json_param "$mega_res_json" g)
else
echo -e "\nMEGA ERROR: bad link\n" 1>&2
exit
fi
fi
else
#MEGACRYPTER LINK
info_link=$(curl -s -X POST --data-binary "{\"m\":\"info\", \"link\":\"$1\"}" "$MC_API_URL")
if [ $(echo $info_link | grep '"error"') ]
then
error_code=$(json_param "$info_link" error)
echo -e "\nMEGACRYPTER ERROR: $error_code\n" 1>&2
else
if [ -z $2 ]
then
file_name=$(json_param "$info_link" name)
else
file_name="$2"
fi
pass=$(json_param "$info_link" pass)
if [ $pass != "false" ]
then
arr_pass=(${pass//#/ })
pass_double_md5=${arr_pass[0]}
pass_salt=${arr_pass[1]}
pass=""
if [ $3 ]
then
pass="$3"
if [ $(is_valid_pass $pass $pass_double_md5 $pass_salt) -eq 0 ]
then
pass=""
fi
fi
if [ -z $pass ]
then
read -e -p "Link is password protected. Enter password: " pass
until [ $(is_valid_pass $pass $pass_double_md5 $pass_salt) -eq 1 ]; do
read -e -p "Wrong password! Try again: " pass
done
fi
hex_raw_key=$(echo -n $(b64_pad $(json_param "$info_link" key)) | $OPENSSL_AES_CBC_DEC -K $(md5 "$pass") -iv "00000000000000000000000000000000" | od -An -t x1 | tr -d '\n ')
if [ -z $2 ]
then
file_name=$(echo -n $(b64_pad "$file_name") | $OPENSSL_AES_CBC_DEC -K $(md5 "$pass") -iv "00000000000000000000000000000000")
fi
else
hex_raw_key=$(echo -n $(b64_pad $(json_param "$info_link" key)) | base64 -d -i 2>/dev/null | od -An -t x1 | tr -d '\n ')
fi
file_size=$(json_param "$info_link" size)
hex_key=$(hrk2hk "$hex_raw_key")
dl_link=$(curl -s -X POST --data-binary "{\"m\":\"dl\", \"link\":\"$1\"}" "$MC_API_URL")
dl_temp_url=$(json_param "$dl_link" url)
fi
fi
watchdog $dl_temp_url &
echo -e "Downloading...!\n"
if [ "$2" != "-" ]
then
hex_iv="${hex_raw_key:32:16}0000000000000000"
curl -s $dl_temp_url | $OPENSSL_AES_CTR_DEC -K $hex_key -iv $hex_iv > "${file_name}"
else
hex_iv="${hex_raw_key:32:16}0000000000000000"
curl -s $dl_temp_url | $OPENSSL_AES_CTR_DEC -K $hex_key -iv $hex_iv
fi
if [ $WATCHDOG ]
then
WATCHDOG=false
echo -e "\nFILE DOWNLOADED :)!\n"
touch "${WATCH_DIR}/${DOWNLOAD_FINISHED_FILE}"
else
echo -e "\nFILE DOWNLOAD STOPPED!\n"
rm "${WATCH_DIR}/${DOWNLOAD_STOP_FILE}"
fi
fi
| orione7/Italorione | tools/megacrypter.sh | Shell | gpl-3.0 | 5,494 |
#!/usr/bin/env bash
#
# Piwi Bash Library - An open source day-to-day bash library
# Copyleft (ↄ) 2013-2015 Pierre Cassat <[email protected]> & contributors
# Some rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For sources & updates, see <http://github.com/piwi/bash-library>.
#
# For documentation, see <http://github.com/piwi/bash-library/wiki/>.
#
# To transmit bugs, see <http://github.com/piwi/bash-library/issues>.
#
# To read GPL-3.0 license conditions, see <http://www.gnu.org/licenses/gpl-3.0.html>.
#
##@!@##
## for dev usage
#set -u
#set -e
#### REFERENCES #####################################################################
##@ Bash Reference Manual: <http://www.gnu.org/software/bash/manual/bashref.html>
##@ Bash Guide for Beginners: <http://www.tldp.org/LDP/Bash-Beginners-Guide/html/Bash-Beginners-Guide.html>
##@ Advanced Bash-Scripting Guide: <http://www.tldp.org/LDP/abs/html/abs-guide.html>
##@ GNU coding standards: <http://www.gnu.org/prep/standards/standards.html>
#### ENVIRONMENT #############################################################################
##@ SCRIPT_VARS = ( NAME VERSION DATE DESCRIPTION LICENSE HOMEPAGE SYNOPSIS OPTIONS ) (read-only)
## List of variables defined by the caller script used to build all informational strings.
## These are all RECOMMENDED.
declare -rxa SCRIPT_VARS="(NAME VERSION DATE DESCRIPTION LICENSE HOMEPAGE SYNOPSIS OPTIONS)" 2>/dev/null;
##@ USAGE_VARS = ( NAME VERSION DATE DESCRIPTION_USAGE SYNOPSIS_USAGE OPTIONS_USAGE ) (read-only)
## List of variables defined by the caller script used to build the 'usage' string (common option `--usage`).
declare -rxa USAGE_VARS="(NAME VERSION DATE DESCRIPTION_USAGE SYNOPSIS_USAGE OPTIONS_USAGE)" 2>/dev/null;
##@ USAGE_SUFFIX = "_USAGE"
## Suffix used to build some of the `USAGE_VARS` variable names ; it is stripped to fallback over "classic" variable.
declare -rx USAGE_SUFFIX="_USAGE" 2>/dev/null;
##@ VERSION_VARS = ( NAME VERSION DATE DESCRIPTION COPYRIGHT LICENSE HOMEPAGE SOURCES ADDITIONAL_INFO ) (read-only)
## List of variables defined by the caller script used to build the 'version' string (common option `--version`).
##@see <http://www.gnu.org/prep/standards/standards.html#g_t_002d_002dversion>
declare -rxa VERSION_VARS="(NAME VERSION DATE DESCRIPTION COPYRIGHT LICENSE HOMEPAGE SOURCES ADDITIONAL_INFO)" 2>/dev/null;
##@ MANPAGE_VARS = ( NAME VERSION DATE DESCRIPTION_MANPAGE SYNOPSIS_MANPAGE OPTIONS_MANPAGE EXAMPLES_MANPAGE EXIT_STATUS_MANPAGE FILES_MANPAGE ENVIRONMENT_MANPAGE COPYRIGHT_MANPAGE HOMEPAGE_MANPAGE BUGS_MANPAGE AUTHOR_MANPAGE SEE_ALSO_MANPAGE ) (read-only)
## List of variables defined by the caller script used to build the 'manpage' string (common option `--manpage`).
##@see <http://en.wikipedia.org/wiki/Man_page>
declare -rxa MANPAGE_VARS="(NAME VERSION DATE DESCRIPTION_MANPAGE SYNOPSIS_MANPAGE OPTIONS_MANPAGE EXAMPLES_MANPAGE EXIT_STATUS_MANPAGE FILES_MANPAGE ENVIRONMENT_MANPAGE COPYRIGHT_MANPAGE HOMEPAGE_MANPAGE BUGS_MANPAGE AUTHOR_MANPAGE SEE_ALSO_MANPAGE)" 2>/dev/null;
##@ MANPAGE_SUFFIX = "_MANPAGE"
## Suffix used to build some of the `USAGE_VARS` variable names ; it is stripped to fallback over "classic" variable.
declare -rx MANPAGE_SUFFIX="_MANPAGE" 2>/dev/null;
##@ LIB_FLAGS = ( VERBOSE QUIET DEBUG INTERACTIVE FORCED ) (read-only)
## List of variables defined as global flags ; they are enabled/disabled by common options.
declare -rxa LIB_FLAGS="(VERBOSE QUIET DEBUG INTERACTIVE FORCED)" 2>/dev/null;
##@ INTERACTIVE = DEBUG = VERBOSE = QUIET = FORCED = DRYRUN = false
##@ WORKINGDIR = `pwd`
declare -x INTERACTIVE=false
declare -x QUIET=false
declare -x VERBOSE=false
declare -x FORCED=false
declare -x DEBUG=false
declare -x DRYRUN=false
declare -x WORKINGDIR="$(pwd)"
declare -x LOGFILE=''
declare -x LOGFILEPATH=''
declare -x TEMPDIR=''
##@ COLOR_VARS = ( COLOR_LIGHT COLOR_DARK COLOR_INFO COLOR_NOTICE COLOR_WARNING COLOR_ERROR COLOR_COMMENT ) (read-only)
## List of variables defined by the library as "common colors" names.
declare -rxa COLOR_VARS="(COLOR_LIGHT COLOR_DARK COLOR_INFO COLOR_NOTICE COLOR_WARNING COLOR_ERROR COLOR_COMMENT)" 2>/dev/null;
##@ USEROS = "$(uname)" (read-only)
## Current running operating system name.
declare -rx USEROS="$(uname)" 2>/dev/null;
##@ LINUX_OS = ( Linux FreeBSD OpenBSD SunOS ) (read-only)
## List of Linux-like OSs.
declare -rxa LINUX_OS="(Linux FreeBSD OpenBSD SunOS)" 2>/dev/null;
##@ USERSHELL = "$SHELL" (read-only)
## Path of the shell currently in use (value of the global `$SHELL` variable).
declare -rx USERSHELL="$SHELL" 2>/dev/null;
##@ SHELLVERSION = "$BASH_VERSION" (read-only)
## Version number of current shell in use (value of the global `$BASH_VERSION` variable).
declare -rx SHELLVERSION="$BASH_VERSION" 2>/dev/null;
#### SETTINGS #####################################################################
# lib error codes
##@ E_ERROR=90
##@ E_OPTS=81
##@ E_CMD=82
##@ E_PATH=83
## Error codes in Bash must return an exit code between 0 and 255.
## In the library, to be conform with C/C++ programs, we will try to use codes from 80 to 120
## (error codes in C/C++ begin at 64 but the recent evolutions of Bash reserved codes 64 to 78).
declare -x E_ERROR=90
declare -x E_OPTS=81
declare -x E_CMD=82
declare -x E_PATH=83
# colors settings depending on OS
case "$USEROS" in
Linux|FreeBSD|OpenBSD|SunOS)
declare -x COLOR_LIGHT='yellow'
declare -x COLOR_DARK='lightgrey'
declare -x COLOR_INFO='green'
declare -x COLOR_NOTICE='blue'
declare -x COLOR_WARNING='bgmagenta'
declare -x COLOR_ERROR='bgred'
declare -x COLOR_COMMENT='small'
;;
*)
declare -x COLOR_LIGHT='yellow'
declare -x COLOR_DARK='lightgrey'
declare -x COLOR_INFO='green'
declare -x COLOR_NOTICE='cyan'
declare -x COLOR_WARNING='bgcyan'
declare -x COLOR_ERROR='bgred'
declare -x COLOR_COMMENT='grey'
;;
esac
##@ LIB_FILENAME_DEFAULT = "piwi-bash-library" (read-only)
declare -rx LIB_FILENAME_DEFAULT='piwi-bash-library' 2>/dev/null;
##@ LIB_NAME_DEFAULT = "piwibashlib" (read-only)
declare -rx LIB_NAME_DEFAULT='piwibashlib' 2>/dev/null;
##@ LIB_LOGFILE = "piwibashlib.log" (read-only)
declare -rx LIB_LOGFILE="${LIB_NAME_DEFAULT}.log" 2>/dev/null;
##@ LIB_TEMPDIR = "tmp" (read-only)
declare -rx LIB_TEMPDIR='tmp' 2>/dev/null;
##@ LIB_SYSHOMEDIR = "${HOME}/.piwi-bash-library/" (read-only)
declare -rx LIB_SYSHOMEDIR="${HOME}/.${LIB_FILENAME_DEFAULT}" 2>/dev/null;
##@ LIB_SYSCACHEDIR = "${LIB_SYSHOMEDIR}/cache/" (read-only)
declare -rx LIB_SYSCACHEDIR="${LIB_SYSHOMEDIR}/cache" 2>/dev/null;
declare -rx VCS_VERSION_MASK='@vcsversion@' 2>/dev/null;
declare -x TEST_VAR='test'
#### COMMON OPTIONS #############################################################################
##@ COMMON_OPTIONS_ALLOWED = "fhiqvVx-:"
## List of default common short options.
##@ COMMON_OPTIONS_ALLOWED_MASK : REGEX mask that matches all common short options
##@ COMMON_LONG_OPTIONS_ALLOWED="working-dir:,working-directory:,force,help,interactive,log:,logfile:,quiet,verbose,version,debug,dry-run,libvers,man,usage"
## List of default common long options.
##@ COMMON_LONG_OPTIONS_ALLOWED_MASK : REGEX mask that matches all common long options
declare -x COMMON_OPTIONS_ALLOWED='fhiqvVx-:'
declare -x COMMON_LONG_OPTIONS_ALLOWED='working-dir:,force,help,interactive,log:,quiet,verbose,version,debug,dry-run,libvers,man,usage'
declare -x COMMON_OPTIONS_ALLOWED_MASK='h|f|i|q|v|x|V'
declare -x COMMON_LONG_OPTIONS_ALLOWED_MASK='working-dir|force|help|interactive|log|quiet|verbose|version|debug|dry-run|libvers|man|usage'
##@ OPTIONS_ALLOWED | LONG_OPTIONS_ALLOWED : to be defined by the script
declare -x OPTIONS_ALLOWED="$COMMON_OPTIONS_ALLOWED"
declare -x LONG_OPTIONS_ALLOWED="$COMMON_LONG_OPTIONS_ALLOWED"
##@ COMMON_SYNOPSIS COMMON_SYNOPSIS_ACTION COMMON_SYNOPSIS_ERROR COMMON_SYNOPSIS_MANPAGE COMMON_SYNOPSIS_ACTION_MANPAGE COMMON_SYNOPSIS_ERROR_MANPAGE (read-only)
## Default values for synopsis strings (final fallback).
declare -rx COMMON_SYNOPSIS="${0} -[common options] -[script options [=value]] [--] [arguments]" 2>/dev/null;
declare -rx COMMON_SYNOPSIS_ACTION="${0} -[common options] -[script options [=value]] [--] <action>" 2>/dev/null;
declare -rx COMMON_SYNOPSIS_ERROR="${0} [-${COMMON_OPTIONS_ALLOWED_MASK}]\n\t[--${COMMON_LONG_OPTIONS_ALLOWED_MASK}]\n\t[--script-options [=value]] [--] <arguments>" 2>/dev/null;
declare -rx COMMON_SYNOPSIS_MANPAGE="~\$ <bold>${0}</bold> -[<underline>common options</underline>] -[<underline>script options</underline> [=<underline>value</underline>]] [--] [<underline>arguments</underline>]" 2>/dev/null;
declare -rx COMMON_SYNOPSIS_ACTION_MANPAGE="~\$ <bold>${0}</bold> -[<underline>common options</underline>] -[<underline>script options</underline> [=<underline>value</underline>]] [--] [<underline>action</underline>]" 2>/dev/null;
declare -rx COMMON_SYNOPSIS_ERROR_MANPAGE="${0} [-${COMMON_OPTIONS_ALLOWED_MASK}]\n\t[--${COMMON_LONG_OPTIONS_ALLOWED_MASK}]\n\t[--script-options [=value]] [--] <arguments>" 2>/dev/null;
##@ OPTIONS_ADDITIONAL_INFOS_MANPAGE (read-only)
## Information string about command line options how-to
declare -rx OPTIONS_ADDITIONAL_INFOS_MANPAGE="\tYou can group short options like '<bold>-xc</bold>', set an option argument like '<bold>-d(=)value</bold>' \n\
\tor '<bold>--long=value</bold>' and use '<bold>--</bold>' to explicitly specify the end of the script options.\n \
\tOptions are treated in the command line order." 2>/dev/null;
##@ COMMON_OPTIONS_MANPAGE (read-only)
## Information string about common script options
declare -rx COMMON_OPTIONS_MANPAGE="<bold>-h | --help</bold>\t\t\tshow this information message \n\
\t<bold>-v | --verbose</bold>\t\t\tincrease script verbosity \n\
\t<bold>-q | --quiet</bold>\t\t\tdecrease script verbosity, nothing will be written unless errors \n\
\t<bold>-f | --force</bold>\t\t\tforce some commands to not prompt confirmation \n\
\t<bold>-i | --interactive</bold>\t\task for confirmation before any action \n\
\t<bold>-x | --debug</bold>\t\t\tenable debug mode \n\
\t<bold>-V | --version</bold>\t\t\tsee the script version when available ; use option '--quiet' to get the version number only\n\
\t<bold>--working-dir=PATH</bold>\t\tredefine the working directory (default is 'pwd' - 'PATH' must exist)\n\
\t<bold>--log=FILENAME</bold>\t\t\tdefine the log filename to use (default is '${LIB_LOGFILE}')\n\
\t<bold>--usage</bold>\t\t\t\tshow quick usage information \n\
\t<bold>--man</bold>\t\t\t\tsee the current script manpage if available \n\
\t<bold>--dry-run</bold>\t\t\tsee commands to run but not run them actually \n\
\t<bold>--libvers</bold>\t\t\tsee the library version" 2>/dev/null;
##@ COMMON_OPTIONS_USAGE (read-only)
## Raw information string about common script options
declare -rx COMMON_OPTIONS_USAGE="\n\
\t-v, --verbose\t\tincrease script verbosity \n\
\t-q, --quiet\t\tdecrease script verbosity, nothing will be written unless errors \n\
\t-f, --force\t\tforce some commands to not prompt confirmation \n\
\t-i, --interactive\task for confirmation before any action \n\
\t-x, --debug\t\tenable debug mode \n\
\t--working-dir=PATH\tredefine the working directory (default is 'pwd' - 'PATH' must exist)\n\
\t--log=FILENAME\t\tdefine the log filename to use (default is '${LIB_LOGFILE}')\n\
\t--dry-run\t\tsee commands to run but not run them actually \n\n\
\t-V, --version\t\tsee the script version when available\n\
\t\t\t\tuse option '--quiet' to get the version number only\n\
\t-h, --help\t\tshow this information message \n\
\t--usage\t\t\tshow quick usage information \n\
\t--man\t\t\tsee the current script manpage if available \n\
\t\t\t\ta 'manpage-like' output will be guessed otherwise\n\
\t--libvers\t\tsee the library version" 2>/dev/null;
##@ COMMON_OPTIONS_FULLINFO_MANPAGE (read-only)
## Concatenation of COMMON_OPTIONS_MANPAGE & OPTIONS_ADDITIONAL_INFOS_MANPAGE
declare -rx COMMON_OPTIONS_FULLINFO_MANPAGE="${COMMON_OPTIONS_MANPAGE}\n\n${OPTIONS_ADDITIONAL_INFOS_MANPAGE}" 2>/dev/null;
#### LOREM IPSUM #############################################################################
##@ LOREMIPSUM (844 chars.) , LOREMIPSUM_SHORT (446 chars.) , LOREMIPSUM_MULTILINE (861 chars. / 5 lines) (read-only)
declare -rx LOREMIPSUM="At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat." 2>/dev/null;
declare -rx LOREMIPSUM_SHORT="Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." 2>/dev/null;
declare -rx LOREMIPSUM_MULTILINE="At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi. \n\
Sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. \n\
Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus \n\
autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, \n\
ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat." 2>/dev/null;
#### LIBRARY SETUP #######################################################################
##@ LIB_NAME LIB_VERSION LIB_DATE LIB_VCSVERSION LIB_VCSVERSION LIB_COPYRIGHT LIB_LICENSE_TYPE LIB_LICENSE_URL LIB_SOURCES_URL (read-only)
## Library internal setup
declare -rx LIB_NAME="Piwi Bash library" 2>/dev/null;
declare -rx LIB_VERSION="0.2.0" 2>/dev/null;
declare -rx LIB_DATE="2014-12-20" 2>/dev/null;
declare -rx LIB_VCSVERSION="master@7df65b11b1ae085a7df840e636778e030be8c5e2" 2>/dev/null;
declare -rx LIB_DESCRIPTION="An open source day-to-day bash library" 2>/dev/null;
declare -rx LIB_LICENSE_TYPE="GPL-3.0" 2>/dev/null;
declare -rx LIB_LICENSE_URL="http://www.gnu.org/licenses/gpl-3.0.html" 2>/dev/null;
declare -rx LIB_COPYRIGHT="Copyleft (ↄ) 2013-2015 Pierre Cassat & contributors" 2>/dev/null;
declare -rx LIB_PACKAGE="piwi/bash-library" 2>/dev/null;
declare -rx LIB_SCRIPT_VCS='git' 2>/dev/null;
declare -rx LIB_SOURCES_URL="https://github.com/piwi/bash-library" 2>/dev/null;
declare -rx LIB_LICENSE="License ${LIB_LICENSE_TYPE}: <${LIB_LICENSE_URL}>" 2>/dev/null;
declare -rx LIB_SOURCES="Sources & updates: <${LIB_SOURCES_URL}>" 2>/dev/null;
declare -rx LIB_ADDITIONAL_INFO="This is free software: you are free to change and redistribute it ; there is NO WARRANTY, to the extent permitted by law." 2>/dev/null;
declare -rx LIB_DEPEDENCY_MANPAGE_INFO="This script is based on the <bold>${LIB_NAME}</bold>, \"${LIB_DESCRIPTION}\". \n\
\t${LIB_COPYRIGHT} - Some rights reserved. \n\
\tPackage [<${COLOR_NOTICE}>${LIB_PACKAGE}</${COLOR_NOTICE}>] version [<${COLOR_NOTICE}>${LIB_VERSION}</${COLOR_NOTICE}>].\n\
\t${LIB_LICENSE}.\n\
\t${LIB_SOURCES}.\n\
\tBug reports: <http://github.com/piwi/bash-library/issues>.\n\
\t${LIB_ADDITIONAL_INFO}" 2>/dev/null;
#### SYSTEM #############################################################################
#### get_system_info ()
get_system_info () {
if in_array "${USEROS}" "${LINUX_OS[@]}"
then uname -osr
else uname -vsr
fi
return "$?"
}
#### get_machine_name ()
get_machine_name () {
uname -n
return "$?"
}
#### get_path ()
## read current PATH values as human readable string
get_path () {
echo -e "$PATH" | tr : \\n; return 0;
}
#### add_path ( path )
## add a path to global environment PATH
add_path () {
if [ -n "$1" ]
then
export PATH="${PATH}:${1}"; return 0;
else
echo "add_path: empty argument!" >&2; return 1;
fi
}
#### get_script_path ( script = $0 )
## get the full real path of a script directory (passed as argument) or from current executed script
get_script_path () {
local arg="${1:-${0}}"
local relpath="$(dirname "$arg")"
local abspath="$(cd "$relpath" && pwd)"
if [ -z "$abspath" ]; then return 1; fi
echo "$abspath"
return 0
}
#### get_date ( timestamp = NOW )
## cf. <http://www.admin-linux.fr/?p=1965>
get_date () {
if [ -n "$1" ]
then date -d "@${1}" +'%d/%m/%Y (%A) %X (UTC %z)'
else date +'%d/%m/%Y (%A) %X (UTC %z)'
fi
return $?
}
#### get_ip ()
## this will load current IP address in USERIP & USERISP
get_ip () {
export USERIP="$(ifconfig | awk '/inet / { print $2 } ' | sed -e "s/addr://" 2>&-)"
export USERISP="$(ifconfig | awk '/P-t-P/ { print $3 } ' | sed -e "s/P-t-P://" 2>&-)"
return 0
}
#### FILES #############################################################################
#### file_exists ( file_path )
## test if a file, link or directory exists in the file-system
file_exists () {
local tmpvar="$1"
if [ -n "$tmpvar" ] && [ -e "$tmpvar" ]; then
return 0
fi
return 1
}
#### is_file ( file_path )
## test if a file exists in the file-system and is a 'true' file
is_file () {
local tmpvar="$1"
if [ -n "$tmpvar" ] && file_exists "$tmpvar" && [ -f "$tmpvar" ]; then
return 0
fi
return 1
}
#### is_dir ( file_path )
## test if a file exists in the file-system and is a directory
is_dir () {
local tmpvar="$1"
if [ -n "$tmpvar" ] && file_exists "$tmpvar" && [ -d "$tmpvar" ]; then
return 0
fi
return 1
}
#### is_link ( file_path )
## test if a file exists in the file-system and is a symbolic link
is_link () {
local tmpvar="$1"
if [ -n "$tmpvar" ] && file_exists "$tmpvar" && [ -L "$tmpvar" ]; then
return 0
fi
return 1
}
#### is_executable ( file_path )
## test if a file or link exists in the file-system and has executable rights
is_executable () {
local tmpvar="$1"
if
[ -n "$tmpvar" ] && file_exists "$tmpvar" &&
( is_file "$tmpvar" || ( is_link "$tmpvar" && is_file "$(readlink "$tmpvar")" ) ) &&
[ -x "$tmpvar" ];
then
return 0
fi
return 1
}
#### get_extension ( path = $0 )
## retrieve a file extension
get_extension () {
local arg="${1:-${0}}"
echo "${arg##*.}" && return 0 || return 1;
}
#### get_filename ( path = $0 )
## isolate a file name without dir & extension
get_filename () {
local arg="${1:-${0}}"
filename="$(get_basename "$arg")"
echo "${filename%.*}" && return 0 || return 1;
}
#### get_basename ( path = $0 )
## isolate a file name
get_basename () {
local arg="${1:-${0}}"
basename "$arg" && return 0 || return 1;
}
#### get_dirname ( path = $0 )
## isolate a file directory name
get_dirname () {
local arg="${1:-${0}}"
dirname "$arg" && return 0 || return 1;
}
#### get_absolute_path ( script = $0 )
## get the real path of a script (passed as argument) or from current executed script
get_absolute_path () {
local arg="${1:-${0}}"
local dirpath="$(get_script_path "$arg")"
if [ -z "$dirpath" ]; then return 1; fi
echo "${dirpath}/$(basename "$arg")" && return 0 || return 1;
}
#### / realpath ( string )
## alias of 'get_absolute_path'
realpath () { get_absolute_path $*; }
#### resolve ( path )
## resolve a system path replacing '~' and '.'
resolve () {
if [ $# -eq 0 ]; then return 0; fi
local _path="$1"
if [ "${_path:0:1}" = '~' ]; then _path="${_path/\~/${HOME}}"; fi
if [ "${_path:0:1}" != '/' ]; then _path="$(pwd)/${_path}"; fi
echo "$_path"
}
#### ARRAY #############################################################################
#### in_array ( item , $array[@] )
##@return 0 if item is found in array
in_array () {
needle="$1"; shift
for item; do
[ "$needle" = "$item" ] && return 0
done
return 1
}
#### array_search ( item , $array[@] )
##@return the index of an array item, 0 based
array_search () {
local i=0; local search="$1"; shift
while [ "$search" != "$1" ]
do ((i++)); shift
[ -z "$1" ] && { i=0; break; }
done
[ "$i" != '0' ] && echo "$i" && return 0
return 1
}
#### array_filter ( $array[@] )
##@return array with cleaned values
array_filter () {
local -a cleaned=()
for item; do
if test "$item"; then cleaned+=("$item"); fi
done
echo "${cleaned[@]-}"
}
#### STRING #############################################################################
#### string_length ( string )
##@return the number of characters in string
string_length () {
echo "${#1}"; return 0;
}
#### / strlen ( string )
## alias of 'string_length'
strlen () { string_length "$*"; }
#### string_to_upper ( string )
string_to_upper () {
if [ -n "$1" ]; then
echo "$1" | tr '[:lower:]' '[:upper:]'; return 0;
fi
return 1
}
#### / strtoupper ( string )
## alias of 'string_to_upper'
strtoupper () { string_to_upper "$*"; }
#### string_to_lower ( string )
string_to_lower () {
if [ -n "$1" ]; then
echo "$1" | tr '[:upper:]' '[:lower:]'; return 0;
fi
return 1
}
#### / strtolower ( string )
## alias of 'string_to_lower'
strtolower () { string_to_lower "$*"; }
#### upper_case_first ( string )
upper_case_first () {
if [ -n "$1" ]; then
echo "$(string_to_upper "${1:0:1}")${1:1:${#1}}"; return 0;
fi
return 1
}
#### / ucfirst ( string )
## alias of 'upper_case_first'
ucfirst () { upper_case_first "$*"; }
##@ MAX_LINE_LENGTH = 80 : default max line length for word wrap (integer)
declare -xi MAX_LINE_LENGTH=80
##@ LINE_ENDING = \n : default line ending character for word wrap
declare -x LINE_ENDING="\n"
#### word_wrap ( text )
# wrap a text in MAX_LINE_LENGTH max lengthes lines
word_wrap () {
if [ $# -gt 0 ]; then
echo "$*" | sed -e "s/.\{${MAX_LINE_LENGTH}\} /&${LINE_ENDING}/g"
return 0
fi
return 1
}
#### explode ( str , delim = ' ' )
# explode a string in an array using a delimiter
# result is loaded in '$EXPLODED_ARRAY'
explode () {
if [ -n "$1" ]; then
local IFS="${2:- }"
read -a EXPLODED_ARRAY <<< "$1"
export EXPLODED_ARRAY
return 0
fi
return 1
}
#### implode ( array[@] , delim = ' ' )
# implode an array in a string using a delimiter
implode () {
if [ -n "$1" ]; then
declare -a _array=("${!1}")
declare _delim="${2:- }"
local oldIFS="$IFS"
IFS="$_delim"
local _arraystr="${_array[*]}"
IFS="$oldIFS"
export IFS
echo "$_arraystr"
return 0
fi
return 1
}
#### explode_letters ( str )
# explode a string in an array of single letters
# result is loaded in '$EXPLODED_ARRAY'
explode_letters () {
if [ -n "$1" ]; then
local _input="$1"
local i=0
local -a letters=()
while [ $i -lt ${#_input} ]; do letters[$i]="${_input:$i:1}"; i=$((i+1)); done
EXPLODED_ARRAY=( "${letters[@]}" )
export EXPLODED_ARRAY
return 0
fi
return 1
}
#### BOOLEAN #############################################################################
#### onoff_bit ( bool )
## echoes 'on' if bool=true, 'off' if it is false
onoff_bit () {
if [ "$1" = 'true' ]; then echo 'on'; else echo 'off'; fi; return 0;
}
#### UTILS #############################################################################
#### is_numeric ( value )
is_numeric () {
local var="$1"
echo -e "$var" | grep "[^0-9]" > /dev/null
local _status="$?"
[ "$_status" = '1' ] && return 0;
[ "$_status" = '0' ] && return 1;
}
#### is_numeric_by_variable_name ( variable_name )
is_numeric_by_variable_name () {
export tmpvar="${!1}"
is_numeric "$tmpvar"
return "$?"
}
#### is_array ( $array[*] )
## this will (only for now) test if there 1 or more arguments passed
##+ and will therfore return '1' (false) for a single item array
is_array () {
[ $# -gt 1 ] && return 0;
return 1;
# export tmpvar=${!1}
## echo "${tmpvar[*]}"
# is_array_by_variable_name tmpvar
# return "$?"
}
#### is_array_by_variable_name ( variable_name )
is_array_by_variable_name () {
local var="$1"
declare -p "$var" 2> /dev/null | grep -q 'declare \-a'
return "$?"
}
#### is_boolean ( value )
is_boolean () {
local var="$1"
if [ "$var" = 'true' ]||[ "$var" = 'false' ]||[ "$var" = '1' ]||[ "$var" = '0' ]
then return 0
else return 1
fi
}
#### is_boolean_by_variable_name ( variable_name )
is_boolean_by_variable_name () {
export tmpvar="${!1}"
is_boolean "$tmpvar"
return "$?"
}
#### is_string ( value )
is_string () {
local tmpvar=$1
if
[ $# -eq 1 ] &&
! is_numeric "$tmpvar" &&
! is_boolean "$tmpvar" ;
then
return 0
fi
return 1
}
#### is_string_by_variable_name ( variable_name )
is_string_by_variable_name () {
export tmpvar="${!1}"
is_string "$tmpvar"
return "$?"
}
#### VCS #############################################################################
declare -rxa VCS_VARS="(VCSVERSION SCRIPT_VCS)" 2>/dev/null;
##@ VCSVERSION
## Variable used as version marker like `branch@commit_sha`
##@ SCRIPT_VCS
## VCS type of the script (only 'git' for now)
declare -x SCRIPT_VCS=''
#### get_version_string ( file_path = $0 , constant_name = VCSVERSION )
## get the version string from a file_path
get_version_string () {
local fpath="${1:-$0}"
local cstname="${2:-VCSVERSION}"
if [ ! -z "${!cstname}" ]
then
echo "${!cstname}"
else
local _infile
if [ -f "$fpath" ]; then
_infile=$(head -n200 "$fpath" | grep -o -e "${cstname}=\".*\"" | sed "s|^${cstname}=\"\(.*\)\"$|\1|g")
fi
if [ ! -z "$_infile" ]; then
echo "$_infile"
elif [ "$SCRIPT_VCS" = 'git' ]; then
if git_is_clone 2>/dev/null; then
local _gitvers=$(git_get_version 2>/dev/null)
[ ! -z "$_gitvers" ] && echo "$_gitvers";
fi
fi
fi
return 0
}
#### get_version_sha ( get_version_string )
## get last commit sha from a GIT version string
get_version_sha () {
if [ $# -gt 0 ]; then
echo "$1" | cut -d'@' -f 2
return 0
fi
return 1
}
#### get_version_branch ( get_version_string )
## get the branch name from a GIT version string
get_version_branch () {
if [ $# -gt 0 ]; then
echo "$1" | cut -d'@' -f 1
return 0
fi
return 1
}
#### vcs_is_clone ( path = pwd , remote_url = null )
vcs_is_clone () {
if [ "$SCRIPT_VCS" = '' ]; then
error "You must define the 'SCRIPT_VCS' variable to use vcs methods!"
fi
local _path="${1:-$(pwd)}"
local _remote_url="${2:-}"
local _status=1
case "$SCRIPT_VCS" in
git) git_is_clone "$_path" "$_remote_url"; _status="$?";;
esac
return "$_status"
}
#### vcs_get_branch ( path = pwd )
vcs_get_branch () {
if [ "$SCRIPT_VCS" = '' ]; then
error "You must define the 'SCRIPT_VCS' variable to use vcs methods!"
fi
local _path="${1:-$(pwd)}"
local _status=1
case "$SCRIPT_VCS" in
git) git_get_branch "$_path"; _status="$?";;
esac
return "$_status"
}
#### vcs_get_commit ( path = pwd )
vcs_get_commit () {
if [ "$SCRIPT_VCS" = '' ]; then
error "You must define the 'SCRIPT_VCS' variable to use vcs methods!"
fi
local _path="${1:-$(pwd)}"
local _status=1
case "$SCRIPT_VCS" in
git) git_get_commit "$_path"; _status="$?";;
esac
return "$_status"
}
#### vcs_get_version ( path = pwd )
vcs_get_version () {
if [ "$SCRIPT_VCS" = '' ]; then
error "You must define the 'SCRIPT_VCS' variable to use vcs methods!"
fi
local _path="${1:-$(pwd)}"
local _status=1
case "$SCRIPT_VCS" in
git) git_get_version "$_path"; _status="$?";;
esac
return "$_status"
}
#### vcs_get_remote_version ( path = pwd , branch = HEAD )
vcs_get_remote_version () {
if [ "$SCRIPT_VCS" = '' ]; then
error "You must define the 'SCRIPT_VCS' variable to use vcs methods!"
fi
local _path="${1:-$(pwd)}"
local _branch="${2:-HEAD}"
local _status=1
case "$SCRIPT_VCS" in
git) git_get_remote_version "$_path" "$_branch"; _status="$?";;
esac
return "$_status"
}
#### vcs_make_clone ( repository_url , target_dir = LIB_SYSCACHEDIR )
vcs_make_clone () {
if [ "$SCRIPT_VCS" = '' ]; then
error "You must define the 'SCRIPT_VCS' variable to use vcs methods!"
fi
local _repo_url="${1:-}"
local _target_dir="${2:-${LIB_SYSCACHEDIR}}"
local _status=1
case "$SCRIPT_VCS" in
git) git_make_clone "$_repo_url" "$_target_dir"; _status="$?";;
esac
return "$_status"
}
#### vcs_update_clone ( target_dir )
vcs_update_clone () {
if [ "$SCRIPT_VCS" = '' ]; then
error "You must define the 'SCRIPT_VCS' variable to use vcs methods!"
fi
local _target_dir="${1:-}"
local _status=1
case "$SCRIPT_VCS" in
git) git_update_clone "$_target_dir"; _status="$?";;
esac
return "$_status"
}
#### vcs_change_branch ( target_dir , branch = 'master' )
vcs_change_branch () {
if [ "$SCRIPT_VCS" = '' ]; then
error "You must define the 'SCRIPT_VCS' variable to use vcs methods!"
fi
local _target_dir="${1:-}"
local _branch="${2:-master}"
local _status=1
case "$SCRIPT_VCS" in
git) git_change_branch "$_target_dir" "$_branch"; _status="$?";;
esac
return "$_status"
}
##@ CURRENT_GIT_CLONE_DIR
## Environment variable to store current GIT clone directory
declare -x CURRENT_GIT_CLONE_DIR
#### git_is_clone ( path = pwd , remote_url = null )
## check if a path, or `pwd`, is a git clone of a remote if 2nd argument is set
git_is_clone () {
local curpath="$(pwd)"
local targetpath="${1:-${curpath}}"
local gitcmd="$(which git)"
if [ $# -gt 1 ]; then
if [ -n "$gitcmd" ]
then
cd "$targetpath"
local gitremote="$(git config --get remote.origin.url)"
if [ "$gitremote" = "${2}.git" ]||[ "$gitremote" = "$2" ]
then return 0; else return 1;
fi
cd "$curpath"
else
command_error 'git'
fi
fi
local gitpath="${targetpath}/.git"
if [ -d "$gitpath" ]; then return 0; fi;
targetpath="$(get_absolute_path "$targetpath")"
local subdir="${targetpath/${curpath}/}"
if [ "$subdir" != "$targetpath" ]; then
local subdirgitpath="${subdir}/.git"
if [ -d "$subdirgitpath" ]; then cd "$curpath" && return 0; fi;
fi
return 1
}
#### git_get_branch ( path = pwd )
git_get_branch () {
if [ $# -gt 0 ]; then cd "$1"; fi
if git_is_clone; then
local gitcmd="$(which git)"
if [ -n "$gitcmd" ]; then
git rev-parse --abbrev-ref HEAD
return 0
fi
fi
return 1
}
#### git_get_commit ( path = pwd )
git_get_commit () {
if [ $# -gt 0 ]; then cd "$1"; fi
if git_is_clone; then
local gitcmd="$(which git)"
if [ -n "$gitcmd" ]; then
git rev-parse HEAD
return 0
fi
fi
return 1
}
#### git_get_version ( path = pwd )
git_get_version () {
if [ $# -gt 0 ]; then cd "$1"; fi
if git_is_clone; then
local gitcmd="$(which git)"
if [ -n "$gitcmd" ]; then
local _getbr="$(git_get_branch)"
local _gethash="$(git_get_commit)"
if [ -n "$_getbr" ] && [ -n "$_gethash" ]; then
echo "${_getbr}@${_gethash}"
return 0
fi
fi
fi
return 1
}
#### git_get_remote_version ( path = pwd , branch = HEAD )
## get the last GIT commit SHA from the remote in branch
git_get_remote_version () {
local clonedir="${1:-$(pwd)}"
local branch="${2:-HEAD}"
git ls-remote "$clonedir" | awk "/${branch}/ {print \$1}" | sort -u ;
return 0
}
#### git_make_clone ( repository_url , target_dir = LIB_SYSCACHEDIR )
## create a git clone of a distant repository in CURRENT_GIT_CLONE_DIR
##@env clone directory is loaded in CURRENT_GIT_CLONE_DIR
git_make_clone () {
if [ $# -eq 0 ]; then return 0; fi
local repourl="$1"
if [ $# -eq 1 ]
then
local _dirname="$(basename "$repourl")"
_dirname="${_dirname/.git/}"
local target="${LIB_SYSCACHEDIR}/${_dirname}"
make_library_cachedir
else
local target="$2"
if [ ! -d "$target" ]; then mkdir -p "$target"; fi
fi
local oldpwd="$(pwd)"
local gitcmd="$(which git)"
if [ -z "$gitcmd" ]; then command_error 'git'; fi
local tocreate=true
if [ -d "$target" ]; then
if git_is_clone "$target" "$repourl"; then tocreate=false; fi
fi
if [ "$tocreate" = 'true' ]; then
rm -rf "$target" && mkdir "$target"
verecho "- creating git clone of repository '${repourl}' into '${target}' ..."
git clone -q "$repourl" "$target"
cd "$oldpwd"
fi
export CURRENT_GIT_CLONE_DIR="$target"
return 0
}
#### git_update_clone ( target_dir )
## update a git clone
##@param target_dir: name of the clone in LIB_SYSCACHEDIR or full path of concerned clone
git_update_clone () {
if [ $# -eq 0 ]; then return 0; fi
local oldpwd="$(pwd)"
local gitcmd="$(which git)"
if [ -z "$gitcmd" ]; then command_error 'git'; fi
local target="$1"
if [ ! -d "$target" ]; then
local fulltarget="${LIB_SYSCACHEDIR}/${target}"
if [ ! -d "$fulltarget" ]; then
path_error "git clone target '${target}' not found"
fi
target="$fulltarget"
fi
cd "$target"
verecho "- updating git clone in '${target}' ..."
git pull -q
cd "$oldpwd"
export CURRENT_GIT_CLONE_DIR="$target"
return 0
}
#### git_change_branch ( target_dir , branch = 'master' )
## change a git clone tracking branch
##@param target_dir: name of the clone in LIB_SYSCACHEDIR or full path of concerned clone
git_change_branch () {
if [ $# -eq 0 ]; then return 0; fi
local oldpwd="$(pwd)"
local gitcmd="$(which git)"
if [ -z "$gitcmd" ]; then command_error 'git'; fi
local target="$1"
if [ ! -d "$target" ]; then
local fulltarget="${LIB_SYSCACHEDIR}/${target}"
if [ ! -d "$fulltarget" ]; then
path_error "git clone target '${target}' not found"
fi
target="$fulltarget"
fi
local targetbranch="${2:-master}"
cd "$target"
if [ "$targetbranch" != "$(git_get_branch)" ]; then
verecho "- switching git clone branch to '${targetbranch}' in '${target}' ..."
git checkout -q "$targetbranch" && git pull -q
fi
cd "$oldpwd"
return 0
}
#### COLORIZED CONTENTS #############################################################################
##@ LIBCOLORS = ( default black red green yellow blue magenta cyan grey white lightred lightgreen lightyellow lightblue lightmagenta lightcyan lightgrey ) (read-only)
## Terminal colors table
declare -rxa LIBCOLORS="(default black red green yellow blue magenta cyan grey white lightred lightgreen lightyellow lightblue lightmagenta lightcyan lightgrey)" 2>/dev/null;
declare -rxa LIBCOLORS_CODES_FOREGROUND="(39 30 31 32 33 34 35 36 90 97 91 92 93 94 95 96 37)" 2>/dev/null;
declare -rxa LIBCOLORS_CODES_BACKGROUND="(49 40 41 42 43 44 45 46 100 107 101 102 103 104 105 106 47)" 2>/dev/null;
##@ LIBTEXTOPTIONS = ( normal bold small underline blink reverse hidden ) (read-only)
## Terminal text options table
declare -rxa LIBTEXTOPTIONS="(normal bold small underline blink reverse hidden)" 2>/dev/null;
declare -rxa LIBTEXTOPTIONS_CODES="(0 1 2 4 5 7 8)" 2>/dev/null;
#### get_text_format_tag ( code )
## echoes the terminal tag code for color: "\ 033[CODEm"
##@param code must be one of the library colors or text-options codes
get_text_format_tag () {
if [ -n "$1" ]; then
case "$USEROS" in
Linux|FreeBSD|OpenBSD|SunOS)
echo "\033[${1}m";;
*)
echo "\x1B[${1}m";;
esac
return 0
fi
return 1
}
#### get_color_code ( name , background = false )
##@param name must be in LIBCOLORS
get_color_code () {
if [ -n "$1" ]; then
if in_array "$1" "${LIBCOLORS[@]}"; then
if [ $# -gt 1 ]
then echo "${LIBCOLORS_CODES_BACKGROUND[$(array_search "$1" "${LIBCOLORS[@]}")]}";
else echo "${LIBCOLORS_CODES_FOREGROUND[$(array_search "$1" "${LIBCOLORS[@]}")]}";
fi
return 0
fi
fi
return 1
}
#### get_color_tag ( name , background = false )
##@param name must be in LIBCOLORS
get_color_tag () {
if [ -n "$1" ]; then
if in_array "$1" "${LIBCOLORS[@]}"; then
if [ $# -gt 1 ]
then get_text_format_tag "${LIBCOLORS_CODES_BACKGROUND[$(array_search "$1" "${LIBCOLORS[@]}")]}";
else get_text_format_tag "${LIBCOLORS_CODES_FOREGROUND[$(array_search "$1" "${LIBCOLORS[@]}")]}";
fi
return 0;
fi
fi
return 1
}
#### get_text_option_code ( name )
##@param name must be in LIBTEXTOPTIONS
get_text_option_code () {
if [ -n "$1" ]; then
if in_array "$1" "${LIBTEXTOPTIONS[@]}"
then
echo "${LIBTEXTOPTIONS_CODES[$(array_search "$1" "${LIBTEXTOPTIONS[@]}")]}";
return 0;
fi
fi
return 1
}
#### get_text_option_tag ( name )
##@param name must be in LIBTEXTOPTIONS
get_text_option_tag () {
if [ -n "$1" ]; then
if in_array "$1" "${LIBTEXTOPTIONS[@]}"
then
get_text_format_tag "${LIBTEXTOPTIONS_CODES[$(array_search "$1" "${LIBTEXTOPTIONS[@]}")]}";
return 0;
fi
fi
return 1
}
#### get_text_option_tag_close ( name )
##@param name must be in LIBTEXTOPTIONS
get_text_option_tag_close () {
if [ -n "$1" ]; then
if in_array "$1" "${LIBTEXTOPTIONS[@]}"
then
get_text_format_tag "2${LIBTEXTOPTIONS_CODES[$(array_search "$1" "${LIBTEXTOPTIONS[@]}")]}";
return 0;
fi
fi
return 1
}
#### colorize ( string , text_option , foreground , background )
## echoes a colorized string ; all arguments are optional except `string`
##@param text_option must be in LIBTEXTOPTIONS
##@param foreground must be in LIBCOLORS
##@param background must be in LIBCOLORS
colorize () {
if [ $# -eq 0 ]; then return 0; fi
local textopt
if [ $# -gt 1 ]; then textopt=$(get_text_option_code "$2"); fi
local fgopt
if [ $# -gt 2 ]; then fgopt=$(get_color_code "$3"); fi
local bgopt
if [ $# -gt 3 ]; then bgopt=$(get_color_code "$4" true); fi
local add=''
if [ ! -z "$textopt" ]; then add+="$textopt"; fi
if [ ! -z "$fgopt" ]; then
if [ -n "$add" ]; then add+=";${fgopt}"; else add+="$fgopt"; fi
fi
if [ ! -z "$bgopt" ]; then
if [ -n "$add" ]; then add+=";${bgopt}"; else add+="$bgopt"; fi
fi
opentag="$(get_text_format_tag "$add")"
closetag="$(get_text_format_tag "$(get_text_option_code normal)")"
if [ ! -n "$add" ]
then echo "$1"
else echo "${opentag}${1}${closetag}"
fi
return 0
}
#### parse_color_tags ( "string with <bold>tags</bold>" )
## parse in-text tags like:
## ... <bold>my text</bold> ... // "tag" in LIBTEXTOPTIONS
## ... <red>my text</red> ... // "tag" in LIBCOLORS
## ... <bgred>my text</bgred> ... // "tag" in LIBCOLORS, constructed as "bgTAG"
parse_color_tags () {
if [ $# -eq 0 ]; then return 0; fi
transformed=''
while read -r line; do
doneopts=()
transformedline="$line"
for opt in $(echo "$line" | grep -o '<.[^/>]*>' | sed "s|^.*<\(.[^>]*\)>.*\$|\1|g"); do
opt="${opt/\//}"
if in_array "$opt" "${doneopts[@]-}"; then continue; fi
doneopts+=("$opt")
if in_array "$opt" "${LIBTEXTOPTIONS[@]}"; then
code="$(get_text_option_code "$opt")"
tag="$(get_text_option_tag "$opt")"
if in_array "$USEROS" "${LINUX_OS[@]}"
then normaltag="$(get_text_option_tag_close "$opt")"
else normaltag="$(get_text_option_tag normal)"
fi
elif in_array "$opt" "${LIBCOLORS[@]}"; then
code="$(get_color_code "$opt")"
tag="$(get_color_tag "$opt")"
normaltag="$(get_color_tag default)"
else
code="$(get_color_code "${opt/bg/}" true)"
tag="$(get_color_tag "${opt/bg/}" true)"
normaltag="$(get_color_tag default true)"
fi
if in_array "$USEROS" "${LINUX_OS[@]}"; then
tag="$(printf '\%s' "$tag")"
normaltag="$(printf '\%s' "$normaltag")"
fi
if [ ! -z "$tag" ]; then
strsubstituted=$(echo "$transformedline" | sed "s|<${opt}>|${tag}|g;s|</${opt}>|${normaltag}|g" 2> /dev/null);
if [ ! -z "$strsubstituted" ]; then transformedline="$strsubstituted"; fi
fi
done
if [ -n "$transformed" ]; then transformed+="\n"; fi
transformed+="$transformedline"
done <<< "$1"
_echo "$transformed"
return 0
}
#### strip_colors ( string )
strip_colors () {
if [ $# -eq 0 ]; then return 0; fi
transformed=''
while read -r line; do
case "$USEROS" in
Linux|FreeBSD|OpenBSD|SunOS)
stripped_line=$(echo "${line}" | sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g");;
*)
stripped_line=$(echo "${line}" | sed 's|\x1B\[[0-9;]*[a-zA-Z]||g');;
esac
if [ -n "${transformed}" ]; then transformed+="\n"; fi
transformed+="${stripped_line}"
done <<< "$1"
_echo "${transformed}"
return 0
}
#### TEMPORARY FILES #####################################################################
#### get_tempdir_path ( dirname = "LIB_TEMPDIR" )
## creates a default temporary dir with fallback: first in current dir then in system '/tmp/'
## the real temporary directory path is loaded in the global `TEMPDIR`
##@param dirname The name of the directory to create (default is `tmp/`)
get_tempdir_path () {
if [ -n "$TEMPDIR" ]; then return 0; fi
local tmpdir="${1:-${LIB_TEMPDIR}}"
local tmpsyspath="/tmp/${LIB_NAME_DEFAULT}"
local tmppwdpath="${WORKINGDIR}/${tmpdir}"
if [ ! -d "$tmppwdpath" ]; then
mkdir "$tmppwdpath" && chmod 777 "$tmppwdpath"
if [ ! -d "$tmppwdpath" ]||[ ! -w "$tmppwdpath" ]; then
if [ ! -d "$tmpsyspath" ]; then
mkdir "$tmpsyspath" && chmod 777 "$tmpsyspath"
if [ ! -d "$tmpsyspath" ]||[ ! -w "$tmpsyspath" ]
then error "Can not create temporary directory (tries to create '${tmppwdpath}' then '${tmpsyspath}')"
else export TEMPDIR="$tmpsyspath";
fi
else export TEMPDIR="$tmpsyspath";
fi
else export TEMPDIR="$tmppwdpath";
fi
else export TEMPDIR="$tmppwdpath";
fi
return 0
}
#### get_tempfile_path ( filename , dirname = "LIB_TEMPDIR" )
## this will echoes a unique new temporary file path
##@param filename The temporary filename to use
##@param dirname The name of the directory to create (default is `tmp/`)
get_tempfile_path () {
if [ -z "$1" ]; then return 0; fi
local tmpfile="$1"
if [ ! -z "$2" ]
then
export TEMPDIR=''
get_tempdir_path "$2"
else
get_tempdir_path
fi
local filepath="${TEMPDIR}/${tmpfile}"
while [ -f "$filepath" ]; do
n=$(( ${n:=0} + 1 ))
filepath="${TEMPDIR}/${tmpfile}-${n}"
done
echo "$filepath"
return 0
}
#### create_tempdir ( dirname = "LIB_TEMPDIR" )
## this will create a temporary directory in the working directory with full rights
## use this method to over-write an existing temporary directory
##@param dirname The name of the directory to create (default is `tmp/`)
create_tempdir () {
if [ $# -gt 0 ]
then
export TEMPDIR=''
get_tempdir_path "$1"
else
get_tempdir_path
fi
return 0
}
#### clear_tempdir ( dirname = "LIB_TEMPDIR" )
## this will deletes the temporary directory
##@param dirname The name of the directory (default is `tmp/`)
clear_tempdir () {
if [ $# -gt 0 ]
then get_tempdir_path "$1"
else get_tempdir_path
fi
if [ -d "$TEMPDIR" ]; then
rm -rf "$TEMPDIR"
fi
return 0
}
#### clear_tempfiles ( dirname = "LIB_TEMPDIR" )
## this will deletes the temporary directory contents (not the directory itself)
##@param dirname The name of the directory (default is `tmp/`)
clear_tempfiles () {
if [ $# -gt 0 ]
then get_tempdir_path "$1"
else get_tempdir_path
fi
if [ -d "$TEMPDIR" ]; then
rm -rf "${TEMPDIR}/*"
fi
return 0
}
#### LOG FILES #####################################################################
#### get_log_filepath ()
## creates a default placed log file with fallback: first in '/var/log' then in LIB_SYSHOMEDIR, finally in current dir
## the real log file path is loaded in the global `LOGFILEPATH
get_log_filepath () {
if [ ! -n "$LOGFILE" ]; then export LOGFILE="$LIB_LOGFILE"; fi
local logsys="/var/log/${LOGFILE}"
touch "$logsys" 2> /dev/null;
if [ -w "$logsys" ]
then
export LOGFILEPATH="$logsys"
else
make_library_homedir
local logsys="${LIB_SYSHOMEDIR}/${LOGFILE}"
touch "$logsys" 2> /dev/null;
if [ -w "$logsys" ]
then export LOGFILEPATH="$logsys"
else export LOGFILEPATH="$LOGFILE"
fi
fi
return 0
}
#### log ( message , type='' )
## this will add an entry in LOGFILEPATH
log () {
if [ $# -eq 0 ]; then return 0; fi
local add=''
if [ $# -gt 1 ]; then add=" <${2}>"; fi
if [ ! -n "$LOGFILEPATH" ]; then get_log_filepath; fi
echo "$(date '+%B %d %T') $(get_machine_name) [${USER}] [$$]${add} - ${1}" >> "$LOGFILEPATH"
return 0
}
#### read_log ()
## this will read the LOGFILEPATH content
read_log () {
if [ ! -n "$LOGFILEPATH" ]; then get_log_filepath; fi
if [ -r "$LOGFILEPATH" ] && [ -f "$LOGFILEPATH" ]; then cat "$LOGFILEPATH"; fi
return 0
}
#### CONFIGURATION FILES #####################################################################
#### get_global_configfile ( file_name )
get_global_configfile () {
if [ -z "$1" ]; then
warning "'get_global_configfile()' requires a file name as argument"
return 0
fi
local filename="$1"
echo "/etc/${filename}.conf"
return 0
}
#### get_user_configfile ( file_name )
get_user_configfile () {
if [ -z "$1" ]; then
warning "'get_user_configfile()' requires a file name as argument"
return 0
fi
local filename="$1"
echo "~/.${filename}.conf"
return 0
}
#### read_config ( file_name )
## read a default placed config file with fallback: first in 'etc/' then in '~/'
read_config () {
if [ -z "$1" ]; then
warning "'read_config()' requires a file name as argument"
return 0
fi
local filename="$1"
local global_filepath="/etc/${filename}"
local user_filepath="~/.${filename}"
if [ -r "$global_filepath" ]; then
source "$global_filepath"
fi
if [ -r "$user_filepath" ]; then
source "$user_filepath"
fi
return 0
}
#### read_configfile ( file_path )
## read a config file
read_configfile () {
if [ -z "$1" ]; then
warning "'read_configfile()' requires a file path as argument"
return 0
fi
local filepath="$1"
if [ ! -f "$filepath" ]; then
warning "Config file '$1' not found!"
return 0
fi
while read line; do
if [[ "$line" =~ ^[^#]*= ]]; then
name="${line%%=*}"
value="${line##*=}"
export "${name}"="${value}"
fi
done < "$filepath"
return 0
}
#### write_configfile ( file_path , array_keys , array_values )
## array params must be passed as "array[@]" (no dollar sign)
write_configfile () {
if [ -z "$1" ]; then
warning "'write_configfile()' requires a file name as 1st argument"
return 0
fi
if [ -z "$2" ]; then
warning "'write_configfile()' requires a configuration keys array as 2nd argument"
return 0
fi
if [ -z "$3" ]; then
warning "'write_configfile()' requires a configuration values array as 3rd argument"
return 0
fi
local filepath="$1"
declare -a array_keys=("${!2}")
declare -a array_values=("${!3}")
touch "$filepath"
cat > "$filepath" <<EOL
$(build_configstring array_keys[@] array_values[@])
EOL
if [ -f "$filepath" ]
then return 0
else return 1
fi
}
#### set_configval ( file_path , key , value )
set_configval () {
if [ -z "$1" ]; then
warning "'set_configval()' requires a file name as 1st argument"
return 0
fi
if [ -z "$2" ]; then
warning "'set_configval()' requires a configuration key as 2nd argument"
return 0
fi
if [ -z "$3" ]; then
warning "'set_configval()' requires a configuration value as 3rd argument"
return 0
fi
local filepath="$1"
local key="$2"
local value="$3"
touch "$filepath"
if grep -q "${key}=*" "$filepath"; then
if in_array "${USEROS}" "${LINUX_OS[@]}"
then sed -i -e "s|\(${key}=\).*|\1${value}|" "$filepath"
else sed -i '' -e "s|\(${key}=\).*|\1${value}|" "$filepath"
fi
else
echo "${key}=${value}" >> "$filepath"
fi
return 0
}
#### get_configval ( file_path , key )
get_configval () {
if [ -z "$1" ]; then
warning "'get_configval()' requires a file name as 1st argument"
return 0
fi
if [ -z "$2" ]; then
warning "'get_configval()' requires a configuration key as 2nd argument"
return 0
fi
local filepath="$1"
local key="$2"
if [ -f "$filepath" ]; then
if grep -q "${key}=*" "$filepath"; then
local line="$(grep "^${key}=" "$filepath")"
echo "${line##*=}"
fi
fi
return 0
}
#### build_configstring ( array_keys , array_values )
## params must be passed as "array[@]" (no dollar sign)
build_configstring () {
if [ $# -eq 0 ]; then return 0; fi
declare -a array_keys=("${!1}")
declare -a array_values=("${!2}")
local i=0
local CONFIG_STR=''
for key in "${array_keys[@]}"; do
value="${array_values[${i}]}"
sep=''
if [ -n "$CONFIG_STR" ]; then sep="\n"; fi
CONFIG_STR="${CONFIG_STR}${sep}${key}=${value}"
((i++))
done
_echo "$CONFIG_STR"
return 0
}
#### LIBRARY VARS #############################################################################
#### verbose_mode ( 1/0 )
## This enables or disables the "verbose" mode.
## If it is enabled, the "quiet" mode is disabled.
##@env VERBOSE
verbose_mode () {
if [ -n "$1" ] && ( [ "$1" = '1' ]||[ "$1" = 'true' ] ); then
quiet_mode 0
export VERBOSE=true
return 0
elif [ -n "$1" ] && ( [ "$1" = '0' ]||[ "$1" = 'false' ] ); then
export VERBOSE=false
return 0
fi
return 1
}
#### quiet_mode ( 1/0 )
## This enables or disables the "quiet" mode.
## If it is enabled, the "verbose" mode is disabled.
##@env QUIET
quiet_mode () {
if [ -n "$1" ] && ( [ "$1" = '1' ]||[ "$1" = 'true' ] ); then
debug_mode 0
verbose_mode 0
export QUIET=true
return 0
elif [ -n "$1" ] && ( [ "$1" = '0' ]||[ "$1" = 'false' ] ); then
export QUIET=false
return 0
fi
return 1
}
#### debug_mode ( 1/0 )
## This enables or disables the "debug" mode.
## If it is enabled, the "verbose" mode is enabled too and the "quiet" mode is disabled.
##@env DEBUG
debug_mode () {
if [ -n "$1" ] && ( [ "$1" = '1' ]||[ "$1" = 'true' ] ); then
verbose_mode 1
export DEBUG=true
return 0
elif [ -n "$1" ] && ( [ "$1" = '0' ]||[ "$1" = 'false' ] ); then
export DEBUG=false
return 0
fi
return 1
}
#### interactive_mode ( 1/0 )
## This enables or disables the "interactive" mode.
## If it is enabled, the "forced" mode is disabled.
##@env INTERACTIVE
interactive_mode () {
if [ -n "$1" ] && ( [ "$1" = '1' ]||[ "$1" = 'true' ] ); then
forcing_mode 0
export INTERACTIVE=true
return 0
elif [ -n "$1" ] && ( [ "$1" = '0' ]||[ "$1" = 'false' ] ); then
export INTERACTIVE=false
return 0
fi
return 1
}
#### forcing_mode ( 1/0 )
## This enables or disables the "forced" mode.
## If it is enabled, the "interactive" mode is disabled.
##@env INTERACTIVE
forcing_mode () {
if [ -n "$1" ] && ( [ "$1" = '1' ]||[ "$1" = 'true' ] ); then
interactive_mode 0
export FORCED=true
return 0
elif [ -n "$1" ] && ( [ "$1" = '0' ]||[ "$1" = 'false' ] ); then
export FORCED=false
return 0
fi
return 1
}
#### dryrun_mode ( 1/0 )
## This enables or disables the "dry-run" mode.
## If it is enabled, the "interactive" and "forced" modes are disabled.
##@env DRYRUN
dryrun_mode () {
if [ -n "$1" ] && ( [ "$1" = '1' ]||[ "$1" = 'true' ] ); then
forcing_mode 0
interactive_mode 0
export DRYRUN=true
verecho "- dry-run option enabled: commands shown as 'debug >> \"cmd\"' are not executed"
return 0
elif [ -n "$1" ] && ( [ "$1" = '0' ]||[ "$1" = 'false' ] ); then
export DRYRUN=false
return 0
fi
return 1
}
#### set_working_directory ( path )
## handles the '-d' option for instance
## throws an error if 'path' does not exist
set_working_directory () {
if [ -n "$1" ]
then
local _wd="$(resolve "$1")"
if [ -d "$1" ]
then export WORKINGDIR="$_wd"
else path_error "$1"
fi
cd "$WORKINGDIR"
return 0
else
echo "set_working_directory: empty argument!" >&2; return 1;
fi
}
#### set_log_filename ( path )
## handles the '-l' option for instance
set_log_filename () {
if [ -n "$1" ]
then
export LOGFILE="$1"; return 0;
else
echo "set_log_filename: empty argument!" >&2; return 1;
fi
}
##@ ECHOCMD (read-only: 'builtin' or 'gnu')
## Test if 'echo' is shell builtin or program
if [ "$($(which echo) --version)" = '--version' ]
then declare -rx ECHOCMD='builtin' 2>/dev/null;
else declare -rx ECHOCMD='gnu' 2>/dev/null;
fi
#### _echo ( string )
## echoes the string with the true 'echo -e' command
## use this for colorization
_echo () {
# tput sgr0
case "$ECHOCMD" in
gnu) $(which echo) -e "$*";;
builtin) echo "$*";;
esac
return 0
}
#### _necho ( string )
## echoes the string with the true 'echo -en' command
## use this for colorization and no new line
_necho () {
# tput sgr0
case "$ECHOCMD" in
gnu) $(which echo) -en "$*";;
builtin) echo -n "$*" >&2;;
esac
return 0
}
#### prompt ( string , default = y , options = Y/n )
## prompt user a string proposing different response options and selecting a default one
## final user fill is loaded in USERRESPONSE
prompt () {
if [ $# -eq 0 ]; then return 0; fi
local add=''
if [ -n "$3" ]; then add+="[${3}] "; fi
colored=$(colorize "? >> ${1} ?" bold)
_necho "${colored} ${add}" >&2
read answer
export USERRESPONSE="${answer:-${2}}"
return 0
}
#### selector_prompt ( list[@] , string , list_string , default = 1 )
## prompt user a string proposing an indexed list of answers for selection
##+ and returns a valid result (user is re-prompted while the answer seems not correct)
## NOTE - the 'list' MUST be passed like `list[@]` (no quotes and dollar sign)
## final user choice is loaded in USERRESPONSE
selector_prompt () {
if [ $# -eq 0 ]; then return 0; fi
local list=( "${!1}" )
local list_count="${#list[@]}"
local string="$2"
local list_string="$3"
local selected=0
local first_time=1
while [ "$selected" -lt 1 ]||[ "$selected" -gt "$list_count" ]; do
if [ "$first_time" -eq 0 ]
then
echo "! - Unknown index '${selected}'"
prompt "Please select a value in the list (use indexes between brackets)" "${3:-1}"
else
first_time=0
if [ ! -z "$string" ]; then echo "> ${list_string}:"; fi
for i in "${!list[@]}"; do echo " - [$((i + 1))] ${list[$i]}"; done
prompt "$string" "${3:-1}"
fi
selected="$USERRESPONSE"
done
export USERRESPONSE="${list[$((selected - 1))]}"
return 0
}
#### verbose_echo ( string )
## Echoes the string(s) in "verbose" mode.
verbose_echo () {
if [ "$VERBOSE" = 'true' ]; then _echo "$*"; fi; return 0;
}
#### / verecho ( string )
## alias of 'verbose_echo'
verecho () { verbose_echo $*; return "$?"; }
#### quiet_echo ( string )
## Echoes the string(s) in not-"quiet" mode.
quiet_echo () {
if [ "$QUIET" != 'true' ]; then _echo "$*"; fi; return 0;
}
#### / quietecho ( string )
## alias of 'quiet_echo'
quietecho () { quiet_echo $*; return "$?"; }
#### debug_echo ( string )
## Echoes the string(s) in "debug" mode.
debug_echo () {
if [ "$DEBUG" = 'true' ]; then _echo "$*"; fi; return 0;
}
#### / debecho ( string )
## alias of 'debug_echo'
debecho () { debug_echo $*; return "$?"; }
#### evaluate ( command )
## evaluates the command catching events:
## - stdout is loaded in global `$CMD_OUT`
## - stderr is loaded in global `$CMD_ERR`
## - final status is loaded in global `$CMD_STATUS`
##@env CMD_OUT CMD_ERR CMD_STATUS : loaded with evaluated command's STDOUT, STDERR and STATUS
##@error will end with any caught error (exit status !=0)
evaluate () {
unset CMD_OUT CMD_ERR CMD_STATUS
local f_out=$(mktemp);
local f_err=$(mktemp);
eval "($*) 1>$f_out 2>$f_err;";
CMD_STATUS="$?";
CMD_OUT="$(cat "$f_out")" && rm -f "$f_out";
CMD_ERR="$(cat "$f_err")" && rm -f "$f_err";
echo "$CMD_OUT" >&1;
echo "$CMD_ERR" >&2;
export CMD_OUT CMD_ERR CMD_STATUS
return "$CMD_STATUS"
}
#### debug_evaluate ( command )
## evaluates the command if "dryrun" is "off", just write it on screen otherwise
debug_evaluate () {
if [ $# -eq 0 ]; then return 0; fi
unset CMD_OUT CMD_ERR CMD_STATUS
if [ "$DRYRUN" = 'true' ]
then
_echo "$(colorize 'dry-run >>' bold) \"$*\""
local status=0
else
debug_echo "$(colorize '>>' bold) \"$*\""
unset CMD_OUT CMD_ERR CMD_STATUS
evaluate "$@" 1>/dev/null 2>&1
[ ! -z "$CMD_OUT" ] && echo "$CMD_OUT" >&1
[ ! -z "$CMD_ERR" ] && echo "$CMD_ERR" >&2
local status="$CMD_STATUS"
fi
return "$status"
}
#### / debevaluate ( command )
## alias of 'debug_evaluate'
debevaluate () { debug_evaluate "$1"; return "$?"; }
#### / debeval ( command )
## alias of 'debug_evaluate'
debeval () { debug_evaluate "$1"; return "$?"; }
#### interactive_evaluate ( command , debug_exec = true )
## evaluates the command after user confirmation if "interactive" is "on"
interactive_evaluate () {
if [ $# -eq 0 ]; then return 0; fi
local DEBEXECUTION="${2:-true}"
if [ "$INTERACTIVE" = 'true' ]; then
prompt "Run command: \"$1\"" "y" "Y/n"
while true; do
case "$USERRESPONSE" in
[yY]* ) break;;
* ) _echo "_ no"; return 0; break;;
esac
done
fi
cmd_fct="${FUNCNAME[2]}"
cmd_line="${BASH_LINENO[1]}"
if [ "$DEBEXECUTION" = 'true' ]
then
debug_evaluate "$1"
else
evaluate "$1"
fi
if [ "$CMD_STATUS" -ne 0 ]; then
error "error on execution: ${CMD_ERR}" "$CMD_STATUS" "$cmd_fct" "$cmd_line"
fi
return "${CMD_STATUS:-0}"
}
#### / ievaluate ( command )
## alias of 'interactive_evaluate'
ievaluate () { interactive_evaluate "$1" "${2:-true}"; return "$?"; }
#### / ieval ( command )
## alias of 'interactive_evaluate'
ieval () { interactive_evaluate "$1" "${2:-true}"; return "$?"; }
#### execute ( command )
## executes the command with outputs and status handling
execute () {
if [ $# -eq 0 ]; then return 0; fi
cmd_fct="${FUNCNAME[1]}"
cmd_line="${BASH_LINENO[1]}"
cmd_out="$( eval "$1" 2>&1 )"
cmd_status="$?"
if [ -n "$cmd_status" ] && [ "$cmd_status" -eq '0' ]; then
echo "$cmd_out"
return "$cmd_status"
else
error "error on execution: ${cmd_out}" "$cmd_status" "$cmd_fct" "$cmd_line"
return "$?"
fi
}
#### debug_execute ( command )
## execute the command if "dryrun" is "off", just write it on screen otherwise
debug_execute () {
if [ $# -eq 0 ]; then return 0; fi
if [ "$DRYRUN" = 'true' ]
then
_echo "$(colorize 'dry-run >>' bold) \"$1\""
return 0
else
debug_echo "$(colorize '>>' bold) \"$1\""
execute "$1"
return "$?"
fi
}
#### / debug_exec ( command )
## alias of 'debug_execute'
debug_exec () { debug_execute "$1"; return "$?"; }
#### / debexec ( command )
## alias of 'debug_execute'
debexec () { debug_execute "$1"; return "$?"; }
#### interactive_execute ( command , debug_exec = true )
## executes the command after user confirmation if "interactive" is "on"
##@return
interactive_execute () {
if [ $# -eq 0 ]; then return 0; fi
local DEBEXECUTION="${2:-true}"
if [ "$INTERACTIVE" = 'true' ]; then
prompt "Run command: \"$1\"" "y" "Y/n"
while true; do
case "$USERRESPONSE" in
[yY]* ) break;;
* ) _echo "_ no"; return 1; break;;
esac
done
fi
if [ "$DEBEXECUTION" = 'true' ]
then
debug_execute "$1"
return "$?"
else
execute "$1"
return "$?"
fi
}
#### / interactive_exec ( command , debug_exec = true )
## alias of 'interactive_execute'
interactive_exec () { interactive_execute "$1" "${2:-true}"; return "$?"; }
#### / iexec ( command , debug_exec = true )
## alias of 'interactive_execute'
iexec () { interactive_execute "$1" "${2:-true}"; return "$?"; }
#### MESSAGES / ERRORS #############################################################################
#### info ( string, bold = true )
## writes the string on screen and return
info () {
if [ $# -eq 0 ]; then return 0; fi
local USEBOLD="${2:-true}"
if [ "$USEBOLD" = 'true' ]
then _echo "$(colorize " >> ${1}" bold "${COLOR_INFO}")"
else _echo "$(colorize ' >>' bold) ${1}"
fi
return 0
}
#### warning ( string , funcname = FUNCNAME[1] , line = BASH_LINENO[1] , tab=' ' )
## writes the error string on screen and return
warning () {
if [ $# -eq 0 ]; then return 0; fi
local TAG="${4:- }"
local PADDER=$(printf '%0.1s' " "{1..1000})
local LINELENGTH="$(tput cols)"
local FIRSTLINE="${TAG}[at ${3:-${FUNCNAME[1]}} line ${4:-${BASH_LINENO[1]}}]"
local SECONDLINE="$(colorize "${TAG}!! >> ${1:-unknown warning}" bold)"
printf -v TMPSTR \
"%*.*s\\\n%-*s\\\n%-*s\\\n%*.*s" \
0 "$LINELENGTH" "$PADDER" \
$((LINELENGTH - $(string_length "$FIRSTLINE"))) "$FIRSTLINE" \
$((LINELENGTH - $(string_length "$SECONDLINE"))) "${SECONDLINE}<${COLOR_WARNING}>";
parse_color_tags "\n<${COLOR_WARNING}>${TMPSTR}</${COLOR_WARNING}>\n"
return 0
}
#### error ( string , status = 90 , funcname = FUNCNAME[1] , line = BASH_LINENO[1] , tab=' ' )
## writes the error string on screen and then exit with an error status
##@error default status is E_ERROR (90)
error () {
local TAG="${5:- }"
local PADDER=$(printf '%0.1s' " "{1..1000})
local LINELENGTH=$(tput cols)
local ERRSTRING="${1:-unknown error}"
local ERRSTATUS="${2:-${E_ERROR}}"
if [ -n "$LOGFILEPATH" ]; then log "${ERRSTRING}" "error:${ERRSTATUS}"; fi
local FIRSTLINE="${TAG}[at ${3:-${FUNCNAME[1]}} line ${4:-${BASH_LINENO[1]}}] (to get help, try option '--help')"
local SECONDLINE="$(colorize "${TAG}!! >> ${ERRSTRING}" bold)"
printf -v TMPSTR \
"%*.*s\\\n%-*s\\\n%-*s\\\n%*.*s" \
0 "$LINELENGTH" "$PADDER" \
"$((LINELENGTH - $(string_length "$FIRSTLINE")))" "$FIRSTLINE" \
"$((LINELENGTH - $(string_length "$SECONDLINE")))" "${SECONDLINE}<${COLOR_ERROR}>";
parse_color_tags "\n<${COLOR_ERROR}>${TMPSTR}</${COLOR_ERROR}>\n" >&2
exit "$ERRSTATUS"
}
#### get_synopsis_string ( short_opts=OPTIONS_ALLOWED , long_opts=LONG_OPTIONS_ALLOWED )
## builds a synopsis string using script's declared available options
get_synopsis_string () {
local shortopts="${1:-${OPTIONS_ALLOWED}}"
local longopts="${2:-${LONG_OPTIONS_ALLOWED}}"
local -a short_options=( $(get_short_options_array "$shortopts") )
local -a long_options=( $(get_long_options_array "$longopts") )
local short_options_string=''
local long_options_string=''
local i=1
for o in "${short_options[@]}"; do
short_options_string+="${o//:/}"
if [ "$i" -lt "${#short_options[@]}" ]; then
short_options_string+='|'
fi
i=$((i + 1))
done
i=1
for o in "${long_options[@]}"; do
long_options_string+="${o//:/}"
if [ "$i" -lt "${#long_options[@]}" ]; then
long_options_string+='|'
fi
i=$((i + 1))
done
synopsis="${0} [-${short_options_string}]\n\t[--${long_options_string}]\n\t[--] <arguments>";
echo "$synopsis"
return 0
}
#### simple_synopsis ()
## writes a synopsis string using script's declared available options
simple_synopsis () {
printf "$(parse_color_tags "<bold>usage:</bold> %s \nRun option '--help' for help.")" "$(get_synopsis_string)";
echo
return 0
}
#### simple_usage ( synopsis = SYNOPSIS_ERROR )
## writes a synopsis usage info
simple_usage () {
local USAGESTR=''
if [ ! -z "${1:-}" ]; then
if [ "$1" = 'lib' ]; then
USAGESTR="$(_echo "$COMMON_SYNOPSIS")"
elif [ "$1" = 'action' ]; then
USAGESTR="$(_echo "$COMMON_SYNOPSIS_ACTION")"
else
USAGESTR="$(_echo "$1")"
fi
elif [ -n "$SYNOPSIS_ERROR" ]; then
USAGESTR="$(_echo "$SYNOPSIS_ERROR")"
else
USAGESTR="$(_echo "$COMMON_SYNOPSIS_ERROR")"
fi
printf "$(parse_color_tags "<bold>usage:</bold> %s \nRun option '--help' for help.")" "$USAGESTR";
echo
return 0
}
#### simple_error ( string , status = 90 , synopsis = SYNOPSIS_ERROR , funcname = FUNCNAME[1] , line = BASH_LINENO[1] )
## writes an error string as a simple message with a synopsis usage info
##@error default status is E_ERROR (90)
simple_error () {
local ERRSTRING="${1:-unknown error}"
local ERRSTATUS="${2:-${E_ERROR}}"
if [ "$DEBUG" = 'true' ]; then
ERRSTRING=$(gnu_error_string "${ERRSTRING}" '' "${3}" "${4}")
fi
if [ -n "$LOGFILEPATH" ]; then log "$ERRSTRING" "error:${ERRSTATUS}"; fi
printf "$(parse_color_tags "<bold>error:</bold> %s")" "$ERRSTRING" >&2;
echo >&2
simple_usage "${3:-}" >&2
exit "$ERRSTATUS"
}
#### simple_error_multi ( array[@] , status = 90 , synopsis = SYNOPSIS_ERROR , funcname = FUNCNAME[1] , line = BASH_LINENO[1] )
## writes multiple errors strings as a simple message with a synopsis usage info
##@error default status is E_ERROR (90)
simple_error_multi () {
local -a ERRSTACK=( "${!1:-unknown error}" )
local ERRSTATUS="${2:-${E_ERROR}}"
for ERRSTRING in "${ERRSTACK[@]}"; do
if [ "$DEBUG" = 'true' ]; then
ERRSTRING=$(gnu_error_string "${ERRSTRING}" '' "${3}" "${4}")
fi
if [ -n "$LOGFILEPATH" ]; then log "$ERRSTRING" "error:${ERRSTATUS}"; fi
printf "$(parse_color_tags "<bold>error:</bold> %s")" "$ERRSTRING" >&2;
echo >&2
done
simple_usage "${3:-}" >&2
exit "$ERRSTATUS"
}
#### dev_error ( string , status = 90 , filename = BASH_SOURCE[2] , funcname = FUNCNAME[2] , line = BASH_LINENO[2] )
## print a formated error string with dev info using the 'caller' stack trace and exit
## print a full back trace it `VERBOSE=true`
dev_error () {
local ERRSTRING="${1:-unknown error}"
local ERRSTATUS="${2:-${E_ERROR}}"
echo
gnu_error_string "$ERRSTRING" "${3:-${BASH_SOURCE[2]}}" "${4:-${FUNCNAME[2]}}" "${5:-${BASH_LINENO[2]}}" >&2
echo
if [ "$VERBOSE" = 'true' ]
then
get_stack_trace
echo "this is [${SHELL} ${BASH_VERSION}]"
else
echo "use option '--verbose' to get a stack trace."
fi
echo "use option '--help' to get help."
exit "$ERRSTATUS"
}
#### get_stack_trace ( first_item = 0 )
## get a formated stack trace
get_stack_trace () {
echo "Stack trace:"
local i="${1:-0}"
for t in "${BASH_SOURCE[@]}"; do
if [ "$i" -lt "$(("${#BASH_SOURCE[@]}" - 1))" ]
then echo "#${i} '${FUNCNAME[${i}]} ()' : called in '${BASH_SOURCE[$((i+1))]}' at line ${BASH_LINENO[${i}]}"
else echo "#${i} ${BASH_SOURCE[${i}]} : ${FUNCNAME[${i}]}"
fi
i=$((i + 1))
done
return 0
}
#### gnu_error_string ( string , filename = BASH_SOURCE[2] , funcname = FUNCNAME[2] , line = BASH_LINENO[2] )
## must echoes something like 'sourcefile:lineno: message'
gnu_error_string () {
local errorstr=''
local _source="${2:-${BASH_SOURCE[2]}}"
if [ -n "$_source" ]; then errorstr+="${_source}:"; fi
local _func="${3:-${FUNCNAME[2]}}"
if [ -n "$_func" ]; then errorstr+="${_func}:"; fi
local _line="${4:-${BASH_LINENO[2]}}"
if [ -n "$_line" ]; then errorstr+="${_line}:"; fi
echo "${errorstr} ${1}"
return 0
}
#### no_option_error ()
## no script option error
##@error exits with status E_OPTS (81)
no_option_error () {
error "No option or argument not understood ! Nothing to do ..." "${E_OPTS}" \
"${FUNCNAME[1]}" "${BASH_LINENO[0]}";
}
#### no_option_simple_error ()
## no script option simple error
##@error exits with status E_OPTS (81)
no_option_simple_error () {
simple_error "No option or argument not understood ! Nothing to do ..." "${E_OPTS}" \
"${FUNCNAME[1]}" "${BASH_LINENO[0]}";
}
#### unknown_option_error ( option )
## invalid script option error
##@error exits with status E_OPTS (81)
unknown_option_error () {
error "Unknown option '${1:-?}'" "${E_OPTS}" \
"${FUNCNAME[1]}" "${BASH_LINENO[0]}";
}
#### unknown_option_simple_error ( option )
## invalid script option simple error
##@error exits with status E_OPTS (81)
unknown_option_simple_error () {
simple_error "Unknown option '${1:-?}'" "${E_OPTS}" \
"${FUNCNAME[1]}" "${BASH_LINENO[0]}";
}
#### command_error ( cmd )
## command not found error
##@error exits with status E_CMD (82)
command_error () {
error "'$1' command seems not installed on your machine ... The process can't be done !" \
"${E_CMD}" "${FUNCNAME[1]}" "${BASH_LINENO[0]}";
}
#### command_simple_error ( cmd )
## command not found simple error
##@error exits with status E_CMD (82)
command_simple_error () {
simple_error "'$1' command seems not installed on your machine ... The process can't be done !" \
"${E_CMD}" "${FUNCNAME[1]}" "${BASH_LINENO[0]}";
}
#### path_error ( path )
## path not found error
##@error exits with status E_PATH (83)
path_error () {
error "Path '$1' (file or dir) can't be found ..." "${E_PATH}" \
"${FUNCNAME[1]}" "${BASH_LINENO[0]}";
}
#### path_simple_error ( path )
## path not found simple error
##@error exits with status E_PATH (83)
path_simple_error () {
simple_error "Path '$1' (file or dir) can't be found ..." "${E_PATH}" \
"${FUNCNAME[1]}" "${BASH_LINENO[0]}";
}
#### SCRIPT OPTIONS / ARGUMENTS #############################################################################
##@ ORIGINAL_SCRIPT_OPTS="$@" (read-only)
## Original list of raw command line arguments
declare -rx ORIGINAL_SCRIPT_OPTS="$@" 2>/dev/null;
##@ SCRIPT_PARAMS=''
## String of re-arranged parameters (options & arguments)
declare -x SCRIPT_PARAMS=''
##@ SCRIPT_PIPED_INPUT=''
## String of any piped content from previous command
declare -x SCRIPT_PIPED_INPUT=''
##@ SCRIPT_OPTS=()
## Array of options with arguments
declare -xa SCRIPT_OPTS=()
##@ SCRIPT_ARGS=()
## Array of script's arguments
declare -xa SCRIPT_ARGS=()
##@ SCRIPT_PROGRAMS=()
## Array of program's options
declare -xa SCRIPT_PROGRAMS=()
##@ SCRIPT_OPTS_ERRS=()
## Array of options errors
declare -ax SCRIPT_OPTS_ERRS=()
##@ ARGIND
## Integer of current argument index
declare -xi ARGIND=0
##@ ARGUMENT
## Current argument string (see `ARGIND`)
declare -x ARGUMENT=''
## Options errors messages
declare -rx UNKNOWN_OPTION_MASK="unknown option '%s'!" 2>/dev/null;
declare -rx MISSING_OPTION_ARGUMENT_MASK="option '%s' requires an argument!" 2>/dev/null;
#### read_from_pipe ( file=/dev/stdin )
read_from_pipe () {
local fpipe="${1:-/dev/stdin}"
local fpipedir="$(dirname "$fpipe")"
if [ -e "$fpipe" ] && [ -p "$fpipe" ]; then
# while [[ -L "$fpipe" ]]; do
# fpipe="$(readlink "$fpipe")"
# if [ ! -e "$fpipe" ]; then
# fpipe="${fpipedir}/${fpipe}"
# fi
# done
SCRIPT_PIPED_INPUT="$(cat "$fpipe")"
fi
export SCRIPT_PIPED_INPUT
}
#### get_short_options_array ( short_opts=OPTIONS_ALLOWED )
get_short_options_array () {
local shortopts="${1:-${OPTIONS_ALLOWED}}"
local -a short_options=()
local shortoptions="${shortopts//-:/}"
explode_letters "$shortoptions"
local b=''
for i in "${EXPLODED_ARRAY[@]}"; do
if [ "$i" = ':' ] && [ -n "$b" ]; then
b="${b}${i}"
elif [ "$i" != '-' ]; then
[ -n "$b" ] && short_options+=( "$b" );
b="$i"
else
[ -n "$b" ] && short_options+=( "$b" );
b=''
fi
done
[ -n "$b" ] && short_options+=( "$b" );
echo "${short_options[@]}"
return 0
}
#### get_short_options_string ( delimiter = '|' , short_opts=OPTIONS_ALLOWED )
get_short_options_string () {
local delimiter="${1:-|}"
local shortopts="${2:-${OPTIONS_ALLOWED}}"
local -a short_options=( $(get_short_options_array "$shortopts") )
implode short_options[@] "${delimiter}"
return 0
}
#### get_option_declaration ( option_name , short_opts=OPTIONS_ALLOWED )
get_option_declaration () {
local _optname="$1"
local shortopts="${2:-${OPTIONS_ALLOWED}}"
local -a opts_table=( $(get_options_array "$shortopts") )
local optiondef_ind=$(array_search "$_optname" "${opts_table[@]}")
[ -z "$optiondef_ind" ] && optiondef_ind=$(array_search "${_optname}:" "${opts_table[@]}");
[ -z "$optiondef_ind" ] && optiondef_ind=$(array_search "${_optname}::" "${opts_table[@]}");
echo "${opts_table[${optiondef_ind}]}"
return 0
}
#### get_option_argument ( "$x" )
## echoes the argument of an option
get_option_argument () {
if [ -n "$1" ]; then echo "${1#=}"; fi; return 0;
}
#### / get_option_arg ( "$x" )
## alias of 'get_option_argument'
get_option_arg () { get_option_argument $*; return "$?"; }
#### get_long_options_array ( long_opts=LONG_OPTIONS_ALLOWED )
get_long_options_array () {
local longopts="${1:-${LONG_OPTIONS_ALLOWED}}"
local -a long_options=()
explode "$longopts" ","
for i in "${EXPLODED_ARRAY[@]}"; do
long_options+=( "$i" )
done
echo "${long_options[@]}"
return 0
}
#### get_long_options_string ( delimiter = '|' , long_opts=LONG_OPTIONS_ALLOWED )
get_long_options_string () {
local delimiter="${1:-|}"
local longopts="${2:-${LONG_OPTIONS_ALLOWED}}"
local -a long_options=( $(get_long_options_array "$longopts") )
implode long_options[@] "$delimiter"
return 0
}
#### get_long_option_declaration ( option_name , long_opts=LONG_OPTIONS_ALLOWED )
get_long_option_declaration () {
local _optname="$1"
local longopts="${2:-${LONG_OPTIONS_ALLOWED}}"
local -a opts_table=( $(get_long_options_array "$longopts") )
local optiondef_ind=$(array_search "$_optname" "${opts_table[@]}")
[ -z "$optiondef_ind" ] && optiondef_ind=$(array_search "${_optname}:" "${opts_table[@]}");
[ -z "$optiondef_ind" ] && optiondef_ind=$(array_search "${_optname}::" "${opts_table[@]}");
echo "${opts_table[${optiondef_ind}]}"
return 0
}
#### get_long_option_name ( "$x" )
## echoes the name of a long option
get_long_option_name () {
local arg="$1"
if [ -n "$arg" ]; then
if [[ "$arg" =~ .*=.* ]]; then arg="${arg%=*}"; fi
echo "$arg" | cut -d " " -f1
return 0
fi
return 1
}
#### / get_long_option ( "$x" )
## alias of 'get_long_option_name()'
get_long_option () { get_long_option_name $*; return "$?"; }
#### get_long_option_argument ( "$x" )
## echoes the argument of a long option
get_long_option_argument () {
local arg=''
local argstr="$1"
if [ -n "$argstr" ]; then
[[ "$argstr" =~ .*\ .* ]] && arg="$(echo "$argstr" | cut -d " " -f2-)";
[[ "$argstr" =~ .*=.* ]] && arg="${argstr#*=}";
[[ "$argstr" != "$arg" ]] && echo "$arg";
return 0
fi
return 1
}
#### / get_long_option_arg ( "$x" )
## alias of 'get_long_option_argument'
get_long_option_arg () { get_long_option_argument $*; return "$?"; }
##@ LONGOPTNAME=''
## The name of current long option treated
declare -x LONGOPTNAME=''
##@ LONGOPTARG=''
## The argument set for current long option
declare -x LONGOPTARG=''
#### parse_long_option ( $OPTARG , ${!OPTIND} )
## This will parse and retrieve the name and argument of current long option.
parse_long_option () {
[ $# -eq 0 ] && return 1;
local _optarg="$1"
local _nextarg="${2:-}"
LONGOPTNAME="$(get_long_option "$_optarg")"
LONGOPTARG="$(get_long_option_arg "$_optarg")"
local optiondef="$(get_long_option_declaration "$LONGOPTNAME")"
# if [ -z "$LONGOPTARG" ] && [ "${optiondef: -1}" = ':' ] && [ "${optiondef: -2}" != '::' ]; then
if [ -z "$LONGOPTARG" ] && [ "${optiondef: -1}" = ':' ]; then
LONGOPTARG="$_nextarg"
((OPTIND++))
fi
LONGOPTARG="$(echo "$LONGOPTARG" | sed -e "s/^'//" -e "s/'$//")"
export LONGOPTNAME LONGOPTARG OPTIND
return 0
}
#### init_arguments ()
## init the script arguments treatment putting `ARGIND` on `1` if arguments exist
init_arguments () {
if [ "${#SCRIPT_ARGS[@]}" -gt 0 ]; then
export ARGIND=1
fi
return 0
}
#### getargs ( VAR_NAME )
## method to loop over command line's arguments just like `getopts` does for options
## this will load current argument's value in `VAR_NAME` and increment `ARGIND` at each turn
getargs () {
local argvar="${1:-ARGUMENT}"
get_next_argument
local _status=$?
if [ "$ARGIND" -eq 0 ]||[ "$_status" -ne 0 ]; then
return 1
fi
if [ "$argvar" != 'ARGUMENT' ]; then
eval "export $argvar=\"$ARGUMENT\"";
fi
return 0
}
#### get_next_argument ()
## get next script argument according to current `ARGIND`
## load it in `ARGUMENT` and let `ARGIND` incremented
get_next_argument () {
local argsnum="${#SCRIPT_ARGS[@]}"
if [ "${#SCRIPT_ARGS[@]}" -gt 0 ] && [ "$ARGIND" -lt "$((argsnum + 1))" ]
then
ARGUMENT="${SCRIPT_ARGS[$((ARGIND - 1))]}"
((ARGIND++))
export ARGIND ARGUMENT
return 0
else
return 1
fi
}
#### get_last_argument ()
## echoes the last script argument
get_last_argument () {
if [ "${#SCRIPT_ARGS[@]}" -gt 0 ]
then
echo "${SCRIPT_ARGS[${#SCRIPT_ARGS[@]}-1]}"
return 0
else
return 1
fi
}
#### rearrange_script_options_new ( "$0" , "$@" )
rearrange_script_options_new () {
getopt --test > /dev/null
local _vers="$?"
if [ -n "$_vers" ] && [ "$_vers" -ne 4 ]; then
verecho "> your version of 'getopt' seems to be old! Processing alternative 'rearrange_script_options()' method."
rearrange_script_options "$@"
return 0
fi
local progname="$1"
shift
# use '--alernative' option to allow single dash for long options
SCRIPT_PARAMS="$(getopt --quiet --shell 'bash' --options "$OPTIONS_ALLOWED" --longoptions "$LONG_OPTIONS_ALLOWED" --name "$progname" -- "$@")"
local _ret="$?"
case "$_ret" in
2) error "an internal 'getopt' error occurred!";;
3) error "an internal error occurred while calling 'getopt'!";;
esac
if [ "$SCRIPT_PARAMS" = ' --' ]; then SCRIPT_PARAMS=''; fi
export SCRIPT_PARAMS
eval set -- "$SCRIPT_PARAMS"
rearrange_script_options "$@"
return 0
}
#### rearrange_script_options ( "$@" )
## this will separate script options from script arguments (emulation of GNU "getopt")
## options are loaded in $SCRIPT_OPTS with their arguments
## arguments are loaded in $SCRIPT_ARGS
rearrange_script_options () {
SCRIPT_OPTS=()
SCRIPT_ARGS=()
local oldoptind="$OPTIND"
local -a params=( "$@" )
local numargs="${#params[@]}"
local -a longopts_table=( $(get_long_options_array) )
local firstoptdone=false
local firstchar
local arg
if [ -z "$SCRIPT_PARAMS" ]; then
for i in "${!params[@]}"; do
arg="${params[${i}]}"
firstchar="${arg:0:1}"
if [ "$firstchar" != "-" ]
then SCRIPT_ARGS+=( "$arg" )
elif [ "$firstoptdone" != 'true' ]; then firstoptdone=true;
fi
if [ "$firstoptdone" != 'true' ]; then unset params["$i"]; fi
done
fi
OPTIND=1
local eoo=false
while getopts ":${OPTIONS_ALLOWED}" OPTION "${params[@]-}"; do
OPTNAME="$OPTION"
OPTARG="$(get_option_arg "${OPTARG:-}")"
local argindex=false
case "$OPTNAME" in
-) LONGOPTNAME="$(get_long_option "$OPTARG")"
LONGOPTARG="$(get_long_option_arg "$OPTARG")"
optiondef=$(get_long_option_declaration "$LONGOPTNAME")
if [ -z "$LONGOPTARG" ] && [ "${optiondef: -1}" = ':' ]; then
if [ -z "$SCRIPT_PARAMS" ]; then
((OPTIND++))
fi
if [ "${!OPTIND:0:1}" != '-' ]
then
LONGOPTARG="${!OPTIND}"
if [ -n "$SCRIPT_PARAMS" ]; then
((OPTIND++))
fi
fi
fi
case "$LONGOPTNAME" in
-) eoo=true;
((OPTIND++))
break;;
*) if [ ! -z "$LONGOPTARG" ] && [ "$LONGOPTNAME" != "$LONGOPTARG" ]; then
SCRIPT_OPTS+=( "--${LONGOPTNAME}='${LONGOPTARG}'" )
SCRIPT_ARGS=( "${SCRIPT_ARGS[@]//${LONGOPTARG}}" )
else
SCRIPT_OPTS+=( "--${LONGOPTNAME}" )
fi;;
esac ;;
\?)
SCRIPT_ARGS+=( "-${OPTION}" )
;;
*) if [ "$eoo" != 'true' ]; then
if [ ! -z "$OPTARG" ]; then
SCRIPT_OPTS+=( "-${OPTION}='${OPTARG}'" )
SCRIPT_ARGS=( "${SCRIPT_ARGS[@]//${OPTARG}}" )
else
SCRIPT_OPTS+=( "-${OPTION}" )
fi
fi;;
esac
done
if [ -z "$SCRIPT_PARAMS" ]; then
((OPTIND++))
fi
while [ "$OPTIND" -lt $((numargs + 1)) ]; do
if [ "${!OPTIND}" != '--' ]; then
if [ -z "$SCRIPT_PARAMS" ]; then
SCRIPT_ARGS=( "${SCRIPT_ARGS[@]//${!OPTIND}}" )
fi
SCRIPT_ARGS+=( "${!OPTIND}" )
fi
((OPTIND++))
done
OPTIND="$oldoptind"
SCRIPT_ARGS=( $(array_filter "${SCRIPT_ARGS[@]-}") )
if [ -z "$SCRIPT_PARAMS" ]; then
if [ "${#SCRIPT_OPTS[@]}" -gt 0 ] && [ "${#SCRIPT_ARGS[@]}" -eq 0 ];
then SCRIPT_PARAMS="${SCRIPT_OPTS[*]}";
elif [ "${#SCRIPT_OPTS[@]}" -eq 0 ] && [ "${#SCRIPT_ARGS[@]}" -gt 0 ];
then SCRIPT_PARAMS="${SCRIPT_ARGS[*]}";
elif [ "${#SCRIPT_OPTS[@]}" -gt 0 ] && [ "${#SCRIPT_ARGS[@]}" -gt 0 ];
then SCRIPT_PARAMS="${SCRIPT_OPTS[*]} -- ${SCRIPT_ARGS[*]}";
fi
fi
init_arguments
export OPTIND SCRIPT_OPTS SCRIPT_ARGS SCRIPT_PARAMS
return 0
}
#### parse_common_options_strict ( "$@" = SCRIPT_OPTS )
## parse common script options as described in $COMMON_OPTIONS_INFO throwing an error for unknown options
## this will stop options treatment at '--'
parse_common_options_strict () {
if [ $# -gt 0 ]
then parse_common_options "$@"
else parse_common_options
fi
if [ "${#SCRIPT_OPTS[@]}" -gt 0 ] && [ "${#SCRIPT_ARGS[@]}" -eq 0 ]; then set -- "${SCRIPT_OPTS[@]}";
elif [ "${#SCRIPT_OPTS[@]}" -eq 0 ] && [ "${#SCRIPT_ARGS[@]}" -gt 0 ]; then set -- "${SCRIPT_ARGS[@]}";
elif [ "${#SCRIPT_OPTS[@]}" -gt 0 ] && [ "${#SCRIPT_ARGS[@]}" -gt 0 ]; then set -- "${SCRIPT_OPTS[@]}" -- "${SCRIPT_ARGS[@]}";
fi
local -a params=( "$@" )
local numargs="${#params[@]}"
local -a short_options=( $(get_short_options_array) )
local -a long_options=( $(get_long_options_array) )
local oldoptind="$OPTIND"
local argreq=false
local prevopt
OPTIND=1
while [ "$OPTIND" -lt $((numargs + 1)) ]; do
opt="${!OPTIND}"
if [ "$opt" = '--' ]
then
break;
elif [ "${opt:0:2}" = '--' ]
then
if [ "$argreq" = 'true' ]; then
SCRIPT_OPTS_ERRS+=("$(printf "$MISSING_OPTION_ARGUMENT_MASK" "$prevopt")")
fi
OPTARG="${opt:2}"
LONGOPT="$(get_long_option "$OPTARG")"
LONGOPTARG="$(get_long_option_arg "$OPTARG")"
prevopt="$LONGOPT"
if
! in_array "$LONGOPT" "${long_options[@]}" &&
! in_array "${LONGOPT}:" "${long_options[@]}" &&
! in_array "${LONGOPT}::" "${long_options[@]}";
then
SCRIPT_OPTS_ERRS+=("$(printf "$UNKNOWN_OPTION_MASK" "$LONGOPT")")
argreq=false
else
if in_array "${LONGOPT}:" "${long_options[@]}" && [ -z "$LONGOPTARG" ]; then
argreq=true
else
argreq=false
fi
fi
elif [ "${opt:0:1}" = '-' ]
then
if [ "$argreq" = 'true' ]; then
SCRIPT_OPTS_ERRS+=("$(printf "$MISSING_OPTION_ARGUMENT_MASK" "$prevopt")")
fi
SHORTOPT="${opt:1:1}"
SHORTOPTARG="$(get_option_arg "${opt:2}")"
prevopt="$SHORTOPT"
if
! in_array "$SHORTOPT" "${short_options[@]}" &&
! in_array "${SHORTOPT}:" "${short_options[@]}" &&
! in_array "${SHORTOPT}::" "${short_options[@]}";
then
SCRIPT_OPTS_ERRS+=("$(printf "$UNKNOWN_OPTION_MASK" "$SHORTOPT")")
else
if in_array "${SHORTOPT}:" "${short_options[@]}" && [ -z "$SHORTOPTARG" ]; then
argreq=true
else
argreq=false
fi
fi
else
argreq=false
fi
((OPTIND++))
done
if [ "${#SCRIPT_OPTS_ERRS[@]}" -gt 0 ]; then
simple_error_multi SCRIPT_OPTS_ERRS[@] "$E_OPTS"
return 1
fi
# while getopts ":${OPTIONS_ALLOWED}" OPTION; do
# if [ "$OPTION" = '-' ]
# then LONGOPT="$(get_long_option "$OPTARG")"
# if ! in_array "$LONGOPT" "${long_options[@]}"; then
# unknown_option_simple_error "$LONGOPT"
# fi
# else
# if ! in_array "$OPTION" "${short_options[@]}"; then
# unknown_option_simple_error "$OPTION"
# fi
# fi
# done
OPTIND="$oldoptind"
export OPTIND SCRIPT_OPTS_ERRS
return 0
}
#### parse_common_options ( "$@" = SCRIPT_OPTS )
## parse common script options as described in $COMMON_OPTIONS_INFO
## this will stop options treatment at '--'
parse_common_options () {
local oldoptind="$OPTIND"
local actiontodo
if [ $# -gt 0 ]
then local options=("$@")
else local options=("${SCRIPT_OPTS[@]-}")
fi
while getopts ":${OPTIONS_ALLOWED}" OPTION "${options[@]}"; do
OPTARG="$(get_option_arg "${OPTARG:-}")"
case "$OPTION" in
# common options
f) forcing_mode 1;;
h) if [ -z "$actiontodo" ]; then actiontodo='help'; fi;;
i) interactive_mode 1;;
q) quiet_mode 1;;
v) verbose_mode 1;;
V) if [ -z "$actiontodo" ]; then actiontodo='version'; fi;;
x) debug_mode 1;;
-) LONGOPTARG="$(get_long_option_arg "$OPTARG")"
case "$OPTARG" in
# common options
debug) debug_mode 1;;
dry-run) dryrun_mode 1;;
force) forcing_mode 1;;
help) if [ -z "$actiontodo" ]; then actiontodo='help'; fi;;
interactive) interactive_mode 1;;
man*) if [ -z "$actiontodo" ]; then actiontodo='man'; fi;;
quiet) quiet_mode 1;;
usage) if [ -z "$actiontodo" ]; then actiontodo='usage'; fi;;
verbose) verbose_mode 1;;
version) if [ -z "$actiontodo" ]; then actiontodo='version'; fi;;
# library options
libvers*) if [ -z "$actiontodo" ]; then actiontodo='libversion'; fi;;
log*) set_log_filename "$LONGOPTARG";;
working-dir*) set_working_directory "$LONGOPTARG";;
# no error for others
*) if [ -n "$LONGOPTARG" ] && [ "$(which "$OPTARG")" ]; then
SCRIPT_PROGRAMS+=( "$OPTARG" )
fi;;
esac ;;
*) continue;;
esac
done
OPTIND="$oldoptind"
export OPTIND SCRIPT_PROGRAMS
if [ ! -z "$actiontodo" ]; then
case "$actiontodo" in
help) script_long_usage; exit 0;;
usage) script_usage; exit 0;;
man) script_manpage; exit 0;;
version) script_version "$QUIET"; exit 0;;
libversion) library_version "$QUIET"; exit 0;;
esac
fi
return 0
}
#### SCRIPT INFO #####################################################################
#### get_script_version_string ( quiet = false )
get_script_version_string () {
local gitvers="$(vcs_get_version)"
if [ -n "$gitvers" ]
then
echo "$gitvers"
else
if [ -n "$VERSION" ]; then echo "$VERSION"; fi
fi
return 0
}
#### script_title ( lib = false )
## this function must echo an information about script NAME and VERSION
## setting `$lib` on true will add the library infos
script_title () {
local TITLE="$NAME"
if [ -n "$VERSION" ]; then TITLE="${TITLE} - v. [${VERSION}]"; fi
_echo "$(colorize "## ${TITLE} ##" bold)"
local _vers="$(get_version_string)"
if [ -n "$_vers" ]; then
_echo "[${_vers}]"
elif [ -n "$DATE" ]; then
_echo "[${DATE}]"
fi
if [ $# -gt 0 ]; then
_echo "[using $(library_version) - ${LIB_SOURCES_URL}]"
fi
return 0
}
#### script_short_title ()
## this function must echo an information about script NAME and VERSION
script_short_title () {
local TITLE="$NAME"
if [ -n "$VERSION" ]; then TITLE="${TITLE} ${VERSION}"; fi
local _vers="$(get_version_string)"
if [ -n "$_vers" ]; then
TITLE+=" [${_vers}]"
elif [ -n "$DATE" ]; then
TITLE+=" [${DATE}]"
fi
echo "$TITLE"
return 0
}
#### script_usage ()
## this function must echo the simple usage
script_usage () {
simple_usage
return 0
}
#### script_long_usage ( synopsis = SYNOPSIS_ERROR , options_string = COMMON_OPTIONS_USAGE )
## writes a long synopsis usage info
script_long_usage () {
local TMP_USAGE="$(parse_color_tags "<bold>$(script_short_title)</bold>")"
if [ -n "$DESCRIPTION_USAGE" ]; then
TMP_USAGE+="\n${DESCRIPTION_USAGE}";
elif [ -n "$DESCRIPTION" ]; then
TMP_USAGE+="\n${DESCRIPTION}";
fi
local SYNOPSIS_STR=''
if [ $# -gt 0 ]; then
if [ "$1" = 'lib' ]; then
SYNOPSIS_STR="$COMMON_SYNOPSIS"
elif [ "$1" = 'action' ]; then
SYNOPSIS_STR="$COMMON_SYNOPSIS_ACTION"
else
SYNOPSIS_STR="$1"
fi
elif [ -n "$SYNOPSIS_USAGE" ]; then
SYNOPSIS_STR="$SYNOPSIS_USAGE"
elif [ -n "$SYNOPSIS" ]; then
SYNOPSIS_STR="$SYNOPSIS"
elif [ -n "$SYNOPSIS_ERROR" ]; then
SYNOPSIS_STR="$SYNOPSIS_ERROR"
else
SYNOPSIS_STR="$COMMON_SYNOPSIS_ERROR"
fi
local OPTIONS_STR=''
if [ $# -gt 0 ]; then
if [ "$1" = 'lib' ]; then
OPTIONS_STR="$COMMON_OPTIONS_USAGE"
else
OPTIONS_STR="$1"
fi
elif [ -n "$OPTIONS_USAGE" ]; then
OPTIONS_STR="$OPTIONS_USAGE"
elif [ -n "$OPTIONS" ]; then
OPTIONS_STR="$OPTIONS"
fi
printf "$(parse_color_tags "\n%s\n\n<bold>usage:</bold> %s\n%s\n\n<${COLOR_COMMENT}>%s</${COLOR_COMMENT}>")" \
"$(_echo "$TMP_USAGE")" "$(_echo "$SYNOPSIS_STR")" "$(_echo "$OPTIONS_STR")" \
"$(library_info)";
echo
return 0
}
#### script_help ( lib_info = true )
## this function must echo the help information USAGE (with option "-h")
script_help () {
local lib_info="${1:-true}"
local TMP_VERS="$(library_info)"
local USAGESTR=''
if [ -n "$USAGE" ] && [ "$lib_info" = 'true' ]; then
USAGESTR+="$(script_title)"
USAGESTR+="$(parse_color_tags "\n$USAGE\n")"
USAGESTR+="$(parse_color_tags "\n<${COLOR_COMMENT}>${TMP_VERS}</${COLOR_COMMENT}>")"
else
local TMP_TITLE="${NAME:-?}"
if [ -n "$VERSION" ]; then TMP_TITLE="${TMP_TITLE} - v. [${VERSION}]"; fi
local TMP_USAGE="\n<bold>NAME</bold>\n\t<bold>${TMP_TITLE}</bold>";
TMP_USAGE+="\n";
for section in "${MANPAGE_VARS[@]}"; do
eval "section_name=\"${section/${MANPAGE_SUFFIX}/}\""
eval "section_ctt=\"\$${section}\""
eval "glob_section_ctt=\"\$${section_name}\""
local toshow=''
if [ "$section_name" != 'NAME' ] && [ "$section_name" != 'DATE' ] && [ "$section_name" != 'VERSION' ]; then
if [ -n "$section_ctt" ]; then
toshow="$section_ctt"
elif [ -n "$glob_section_ctt" ]; then
toshow="$glob_section_ctt"
elif [ "$section_name" = 'SYNOPSIS' ]; then
eval "section_default_ctt=\"\$COMMON_${section}\""
eval "section_global_default_ctt=\"\$COMMON_${section/${MANPAGE_SUFFIX}/}\""
if [ -n "$section_default_ctt" ]; then
toshow="$section_default_ctt"
elif [ -n "$section_global_default_ctt" ]; then
toshow="$section_global_default_ctt"
fi
fi
if [ -n "$toshow" ] && [ "$toshow" != '' ]; then
TMP_USAGE+="\n<bold>${section_name}</bold>\n\t${toshow}\n";
fi
fi
done
if [ "${MANPAGE_NODEPEDENCY:-false}" = 'false' ]; then
if [ "$lib_info" = 'true' ]; then
TMP_USAGE+="\n<bold>DEPENDENCIES</bold>\n\t${LIB_DEPEDENCY_MANPAGE_INFO}\n";
fi
fi
TMP_USAGE+="\n<${COLOR_COMMENT}>${TMP_VERS}</${COLOR_COMMENT}>";
USAGESTR+="$(parse_color_tags "$TMP_USAGE")"
fi
local _done=false
if [ "${#SCRIPT_PROGRAMS[@]}" -gt 0 ]; then
local _tmpfile="$(get_tempfile_path "$(get_filename "$0").usage")"
if in_array "less" "${SCRIPT_PROGRAMS[@]}"; then
echo "$USAGESTR" > "$_tmpfile"
cat "$_tmpfile" | less -cfre~
_done=true
elif in_array "more" "${SCRIPT_PROGRAMS[@]}"; then
echo "$USAGESTR" > "$_tmpfile"
cat "$_tmpfile" | more -cf
_done=true
fi
fi
if [ "$_done" != 'true' ]; then echo "$USAGESTR"; fi
return 0;
}
#### script_manpage ( cmd = $0 , section = 3 )
## will open the manpage of $0 if found in system manpages or if `$0.man` exists
## else will trigger 'script_help' method
script_manpage () {
local cmd="${1:-${0}}"
local cmd_filename=$(get_filename "$cmd")
local cmd_localman="${cmd%.*}.man"
local section="${2:-3}"
if man -w -s "$section" "$cmd_filename" 2> /dev/null; then
man -s "$section" "$cmd_filename"
elif [ -f "$cmd_localman" ]; then
man "./${cmd_localman}"
else
quietecho "- no manpage for '${cmd}' ; running 'help'"
clear; script_help
fi
return 0
}
#### script_short_version ( quiet = false )
script_short_version () {
local bequiet="${1:-false}"
if ${bequiet}; then
echo "${VERSION:-?}"
return 0
fi
local TMP_STR="$VERSION"
if [ -n "$VERSION" ]; then
if [ -n "$NAME" ]
then TMP_STR="${NAME} ${TMP_STR}"
else TMP_STR="${0} ${TMP_STR}"
fi
local gitvers="$(get_version_string)"
if [ -n "$gitvers" ]; then TMP_STR+=" ${gitvers}"
elif [ -n "$DATE" ]; then TMP_STR+=" ${DATE}"
fi
echo "$TMP_STR"
fi
return 0;
}
#### script_version ( quiet = false )
script_version () {
local bequiet="${1:-false}"
if [ "$bequiet" = 'true' ]; then
echo "${VERSION:-?}"
return 0
fi
script_short_version
for section in "${VERSION_VARS[@]}"; do
case "$section" in
NAME|VERSION|DATE);;
*) if [ -n "${!section}" ]; then echo "${!section}"; fi;;
esac
done
return 0;
}
#### DOCBUILDER ##########################################################################
## Documentation builder rules, tags and masks
##@ DOCBUILDER_MASKS = ()
declare -xa DOCBUILDER_MASKS=()
##@ DOCBUILDER_MARKER = '##@!@##'
declare -x DOCBUILDER_MARKER='##@!@##'
##@ DOCBUILDER_RULES = ( ... )
declare -xa DOCBUILDER_RULES=(
'^#### [^#]* #*$' # title line : #### title # (this will be followed by the line number and a new line)
'^#### [^#]*$' # fct name line : #### name ( what ever )
'^##@ .*$' # var line : ##@ varname ( what ever )
'^## .*$' # comment line : ## comment (will NOT match "##! comment")
'^##+ .*$' # 2nd comment line : ##+ comment (will NOT match "##! comment")
'^##@[^ ]* .*$' # tag line : ##@tagname string
);
declare -xa DOCBUILDER_TERMINAL_MASKS=(
"s|^#### \(.*\) #*$|\\\n# \1 #|g" # title line
"s|^#### \(.*\)$|\\\n\\\t\1|g" # fct name line
"s|^##@ \(.*\)$|\\\t\1|g" # var line
"s|^## \(.*\)$|\\\t\\\t\1|g" # comment line
"s|^##+ \(.*\)$|\\\t\\\t\1|g" # 2nd comment line
"s|^##\(@[^ ]*\) \(.*\)$|\\\t\\\t\1 \2|g" # tag line
);
declare -xa DOCBUILDER_MARKDOWN_MASKS=(
"s|^#### \(.*\) #*$|\\\n## \1|g" # title line
"s|^#### \(.*\)$|\\\n- \*\*\1\*\*\\\n|g" # fct name line
"s|^##@ \(.*\)$|\\\n- \*\*\1\*\*|g" # var line
"s|^## \(.*\)$|\\\n\\\t\1|g" # comment line
"s|^##+ \(.*\)$|\\\t\1|g" # 2nd comment line
"s|^##\(@[^ ]*\) \(.*\)$|\\\n\\\t\*\*\1:\*\* \2|g" # tag line
);
#### build_documentation ( type = TERMINAL , output = null , source = BASH_SOURCE[0] )
build_documentation () {
local type="${1:-TERMINAL}"
local output="${2}"
local source="${3:-${BASH_SOURCE[0]}}"
local type_var="DOCBUILDER_$(string_to_upper "$type")_MASKS"
if [ -z "${!type_var}" ]; then
error "unknown doc-builder type '${type}'"
fi
eval "export DOCBUILDER_MASKS=( \"\${${type_var}[@]}\" )"
verecho "- generating documentation in format '${type}' from file '${source}'" >&2;
generate_documentation "$source" "$output"
return 0
}
#### generate_documentation ( filepath = BASH_SOURCE[0] , output = null )
generate_documentation () {
local sourcefile="${1:-${BASH_SOURCE[0]}}"
if [ ! -f "$sourcefile" ]; then path_error "$sourcefile"; fi
local output="$2"
local docstr=''
if [ -n "$DOCUMENTATION_TITLE" ]
then docstr="# ${DOCUMENTATION_TITLE}"
else docstr="# Documentation of '$sourcefile'"
fi
if [ -n "$DOCUMENTATION_INTRO" ]; then
docstr+="\n\n${DOCUMENTATION_INTRO}\n\n----\n";
fi
i=0
old_IFS="$IFS"
IFS=$'\n'
local indoc=false
local intag=false
while read line; do
if [ "${line:0:2}" != '##' ]; then continue; fi
if [ "$line" = "$DOCBUILDER_MARKER" ]; then
if [ "$indoc" = 'true' ]; then indoc=false; break; else indoc=true; fi
continue;
fi
if [ "$indoc" = 'true' ]; then
line_str=''
title_line="$(echo "$line" | grep -o "${DOCBUILDER_RULES[0]}" | sed "${DOCBUILDER_MASKS[0]}")"
fct_line="$(echo "$line" | grep -o "${DOCBUILDER_RULES[1]}" | sed "${DOCBUILDER_MASKS[1]}")"
var_line="$(echo "$line" | grep -o "${DOCBUILDER_RULES[2]}" | sed "${DOCBUILDER_MASKS[2]}")"
if [ -n "${title_line}" ]; then
line_str="${title_line} (line ${i})\n"
elif [ -n "$fct_line" ]; then
line_str="$fct_line"
intag=true
elif [ -n "$var_line" ]; then
line_str="$var_line"
intag=true
elif [ "$intag" = 'true' ]; then
comm_line="$(echo "$line" | grep -o "${DOCBUILDER_RULES[3]}" | sed "${DOCBUILDER_MASKS[3]}")"
comm_line_alt="$(echo "$line" | grep -o "${DOCBUILDER_RULES[4]}" | sed "${DOCBUILDER_MASKS[4]}")"
arg_line="$(echo "$line" | grep -o "${DOCBUILDER_RULES[5]}" | sed "${DOCBUILDER_MASKS[5]}")"
if [ "$VERBOSE" = 'true' ]; then
if [ -n "$arg_line" ]; then
line_str="$arg_line"
elif [ -n "$comm_line" ]; then
line_str="$comm_line"
elif [ -n "$comm_line_alt" ]; then
line_str="$comm_line_alt"
fi
else
if [ -n "$comm_line" ] && [ -n "$comm_line_alt" ]; then intag=false; fi
fi
fi
if [ -n "$line_str" ]; then docstr+="\n${line_str}"; fi
fi
i=$((i+1))
done < "$sourcefile" ;
IFS="$old_IFS"
export IFS
now="$(date '+%d-%-m-%Y %X')"
docstr+="\n\n----\n\n[*Doc generated at ${now} from path '${sourcefile}'*]"
if [ -n "$output" ]
then _echo "$docstr" > "$output"
else _echo "$docstr"
fi
return 0
}
#### LIBRARY INFO #####################################################################
#### get_library_version_string ( path = $0 )
## extract the GIT version string from a file matching line 'LIB_VCSVERSION=...'
get_library_version_string () {
local fpath="${1:-$0}"
if [ ! -f "$fpath" ]; then error "file '${fpath}' not found!"; fi
local _vers="$(get_version_string "$fpath")"
if [ -z "$_vers" ]; then
_vers="$(get_version_string "$fpath" LIB_VCSVERSION)"
fi
echo "$_vers"
return 0
}
#### library_info ()
library_info () {
library_short_version
return 0;
}
#### library_path ()
library_path () {
realpath "${BASH_SOURCE[0]}"
return 0;
}
#### library_help ()
library_help () {
local _lib="$(library_path)"
if in_array "${USEROS}" "${LINUX_OS[@]}"
then "$_lib" help
else "$(which sh)" "$_lib" help
fi
return 0
}
#### library_usage ()
library_usage () {
local _lib="$(library_path)"
if in_array "${USEROS}" "${LINUX_OS[@]}"
then "$_lib" usage
else "$(which sh)" "$_lib" usage
fi
return 0
}
#### library_short_version ( quiet = false )
## this function must echo an information about library name & version
library_short_version () {
local bequiet="${1:-false}"
if [ "$bequiet" = 'true' ]; then
echo "$LIB_VERSION"
return 0
fi
local TMP_VERS="${LIB_NAME} ${LIB_VERSION}"
local LIB_MODULE="$(dirname "$LIBRARY_REALPATH")/.."
local _done=false
if git_is_clone "$LIB_MODULE" "$LIB_SOURCES_URL" 2>/dev/null; then
add="$(git_get_version)"
if [ -n "$add" ]; then
_done=true
TMP_VERS+=" ${add}"
fi
fi
if [ "$_done" = 'false' ] && [ -f "$BASH_SOURCE" ]; then
TMP_VERS+=" $(get_library_version_string "${BASH_SOURCE}")"
fi
echo "$TMP_VERS"
return 0
}
#### library_version ( quiet = false )
## this function must echo an FULL information about library name & version (GNU like)
library_version () {
local OLD_VCSVERSION="$VCSVERSION"
export VCSVERSION="$LIB_VCSVERSION"
for section in "${VERSION_VARS[@]}"; do
eval "local OLD_$section=\$$section"
eval "export $section=\$LIB_$section"
done
script_version "${1:-false}"
for section in "${VERSION_VARS[@]}"; do
eval "export $section=\$OLD_$section"
done
export VCSVERSION="${OLD_VCSVERSION}"
return 0
}
#### library_debug ( "$*" )
## see all common options flags values & some debug infos
library_debug () {
OPTIND=1
local TOP_STR=" \$ $0 ${ORIGINAL_SCRIPT_OPTS}"
if [ "$*" != "${ORIGINAL_SCRIPT_OPTS}" ]; then
TOP_STR="${TOP_STR}\n re-arranged in:\n \$ $0 $*"
fi
local TMP_DEBUG_MASK=" \n\
---- DEBUG -------------------------------------------------------------\n\
${TOP_STR}\n\
------------------------------------------------------------------------\n\
- %s is set on %s\n\
- %s is set on %s\n\
- %s mode is %s (option '%s')\n\
- %s mode is %s (option '%s')\n\
- %s mode is %s (option '%s')\n\
- %s mode is %s (option '%s')\n\
- %s mode is %s (option '%s')\n\
------------------------------------------------------------------------\n\
status: %s - pid: %s - user: %s - shell: %s\n\
%s\n\
------------------------------------------------------------------------";
printf -v TMP_DEBUG "$TMP_DEBUG_MASK" \
"$(colorize 'USEROS' bold)" "$(colorize "$USEROS" bold "$COLOR_INFO")" \
"$(colorize 'WORKINGDIR' bold)" "$(colorize "$WORKINGDIR" bold "$COLOR_INFO")" \
"$(colorize 'VERBOSE' bold)" "$(colorize "$(onoff_bit "$VERBOSE")" bold "$COLOR_INFO")" "-v" \
"$(colorize 'INTERACTIVE' bold)" "$(colorize "$(onoff_bit "$INTERACTIVE")" bold "$COLOR_INFO")" "-i" \
"$(colorize 'FORCED' bold)" "$(colorize "$(onoff_bit "$FORCED")" bold "$COLOR_INFO")" "-f" \
"$(colorize 'DEBUG' bold)" "$(colorize "$(onoff_bit "$DEBUG")" bold "$COLOR_INFO")" "-x" \
"$(colorize 'QUIET' bold)" "$(colorize "$(onoff_bit "$QUIET")" bold "$COLOR_INFO")" "-q" \
"$?" "$$" "$(whoami)" "${USERSHELL} ${SHELLVERSION}" "$(get_system_info)";
TMP_DEBUG+="\n$(parse_color_tags "<${COLOR_COMMENT}>$(library_info)</${COLOR_COMMENT}>")";
_echo "$TMP_DEBUG"
return 0
}
#### / libdebug ( "$*" )
## alias of library_debug
libdebug () {
library_debug $*
}
#### LIBRARY INTERNALS ###################################################################
##@ LIBRARY_REALPATH LIBRARY_DIR LIBRARY_BASEDIR LIBRARY_SOURCEFILE
declare -rx LIBRARY_REALPATH="$(realpath "${BASH_SOURCE[0]}")" 2>/dev/null;
declare -rx LIBRARY_DIR="$(dirname "$LIBRARY_REALPATH")" 2>/dev/null;
declare -rx LIBRARY_BASEDIR="$(dirname "$LIBRARY_DIR")" 2>/dev/null;
declare -rx LIBRARY_SOURCEFILE="$(basename "$LIBRARY_REALPATH")" 2>/dev/null;
#### make_library_homedir ()
## make dir '$HOME/.piwi-bash-library' if it doesn't exist
make_library_homedir () {
if [ ! -d "$LIB_SYSHOMEDIR" ]; then mkdir "$LIB_SYSHOMEDIR"; fi
return 0
}
#### make_library_cachedir ()
## make dir '$HOME/.piwi-bash-library/cache' if it doesn't exist
make_library_cachedir () {
make_library_homedir
if [ ! -d "$LIB_SYSCACHEDIR" ]; then mkdir "$LIB_SYSCACHEDIR"; fi
return 0
}
#### clean_library_cachedir ()
## clean dir '$HOME/.piwi-bash-library/cache' if it exists
clean_library_cachedir () {
if [ -d "$LIB_SYSCACHEDIR" ]; then rm -rf "$LIB_SYSCACHEDIR"; fi
return 0
}
#### INSTALLATION WIZARD #################################################################
##! All internal installation methods are prefixed with 'instwiz_'
##! All internal installation constants are prefixed with 'LIBINST_'
##@ INSTALLATION_VARS = ( SCRIPT_VCS VCSVERSION SCRIPT_REPOSITORY_URL SCRIPT_FILES SCRIPT_FILES_BIN SCRIPT_FILES_MAN SCRIPT_FILES_CONF ) (read-only)
declare -rxa INSTALLATION_VARS="(SCRIPT_VCS VCSVERSION SCRIPT_REPOSITORY_URL SCRIPT_FILES SCRIPT_FILES_BIN SCRIPT_FILES_MAN SCRIPT_FILES_CONF)" 2>/dev/null;
##@ SCRIPT_REPOSITORY_URL = url of a distant repository
declare -x SCRIPT_REPOSITORY_URL=''
##@ SCRIPT_FILES = array of installable files
declare -xa SCRIPT_FILES=()
##@ SCRIPT_FILES_BIN = array of installable binary files
declare -xa SCRIPT_FILES_BIN=()
##@ SCRIPT_FILES_MAN = array of manpages files
declare -xa SCRIPT_FILES_MAN=()
##@ SCRIPT_FILES_CONF = array of configuration files
declare -xa SCRIPT_FILES_CONF=()
##! internal vars for installation
declare -x LIBINST_TARGET=''
declare -x LIBINST_CLONE=''
declare -x LIBINST_BRANCH='master'
## instwiz_get_real_version ( path = LIBINST_CLONE )
## get the real vcs_get_version from a repo
instwiz_get_real_version () {
local clonedir="${1:-${LIBINST_CLONE}}"
vcs_get_version "$clonedir"
return 0
}
## instwiz_remoteversion ( path = LIBINST_CLONE , branch = HEAD )
## get the last commit SHA from the remote in branch
instwiz_remoteversion () {
vcs_get_remote_version "${1:-${LIBINST_CLONE}}" "${2:-HEAD}"
return 0
}
## instwiz_prepare_install_cmd ()
instwiz_prepare_install_cmd () {
local installcmd=''
# all files
for file in "${SCRIPT_FILES[@]}"; do
local _originalfilepath="${LIBINST_CLONE}/${file}"
local _targetfilepath="${LIBINST_TARGET}/${file}"
if [ -e ${_originalfilepath} ]; then
if [ "$(string_length "$installcmd")" -gt 0 ]; then installcmd+=" && "; fi
if [ -d "$_originalfilepath" ]
then installcmd+="cp -rf '${_originalfilepath}' '${_targetfilepath}'"
else installcmd+="cp -f '${_originalfilepath}' '${_targetfilepath}'"
fi
else
echo "! > file '${LIBINST_CLONE}/${file}' not found and won't be installed!"
fi
done
# binary files
for file in "${SCRIPT_FILES_BIN[@]}"; do
local _originalfilepath="${LIBINST_CLONE}/${file}"
local _targetfilepath="${LIBINST_TARGET}/${file}"
if [ -f "$_originalfilepath" ]; then
if [ "$(string_length "$installcmd")" -gt 0 ]; then installcmd+=" && "; fi
installcmd+="chmod a+x '${_targetfilepath}'"
fi
done
# @TODO : install man and conf
# command to execute
echo "$installcmd"
return 0
}
## instwiz_prepare_uninstall_cmd ()
instwiz_prepare_uninstall_cmd () {
local uninstallcmd=''
# all files
for file in "${SCRIPT_FILES[@]}"; do
local _targetfilepath="${LIBINST_TARGET}/${file}"
if [ -e "$_targetfilepath" ]; then
if [ "$(string_length "$uninstallcmd")" -gt 0 ]; then uninstallcmd+=" && "; fi
if [ -d "$_targetfilepath" ]
then uninstallcmd+="rm -rf '${_targetfilepath}'"
else uninstallcmd+="rm -f '${_targetfilepath}'"
fi
fi
done
# @TODO : un-install man and conf
# command to execute
echo "$uninstallcmd"
return 0
}
#### script_installation_target ( target_dir = $HOME/bin )
script_installation_target () {
export LIBINST_TARGET="${1:-${HOME}/bin}"
if [ ! -d "$LIBINST_TARGET" ]; then
mkdir -p "$LIBINST_TARGET" || simple_error "target path '${LIBINST_TARGET}' not found and can't be created!"
fi
return 0
}
#### script_installation_source ( clone_repo = SCRIPT_REPOSITORY_URL , clone_dir = LIB_SYSCACHEDIR )
script_installation_source () {
if [ $# -gt 0 ]; then export SCRIPT_REPOSITORY_URL="$1"; fi
local _dirname="$(basename "$repourl")"
_dirname="${_dirname/.git/}"
if [ "$_dirname" != "$(basename "$repourl")" ]; then export SCRIPT_VCS='git'; fi
local target="${LIB_SYSCACHEDIR}/${_dirname}"
make_cachedir
if [ $# -gt 1 ]; then export SCRIPT_HOME="$1"; fi
export LIBINST_TARGET="${1:-${HOME}/bin}"
if [ ! -d "$LIBINST_TARGET" ]; then
mkdir -p "$LIBINST_TARGET" || simple_error "target path '${LIBINST_TARGET}' not found and can't be created!"
fi
return 0
}
#### script_install ( path = $HOME/bin/ )
script_install () {
local installcmd="$(instwiz_prepare_install_cmd)"
log "script_install: '${installcmd}'"
iexec "$installcmd"
quietecho ">> ok, script installed in '${LIBINST_TARGET}'"
return 0
}
#### script_check ( file_name , original = LIBINST_CLONE , target = LIBINST_TARGET )
##@param file_name: the file to check and compare on both sides
script_check () {
if [ $# -eq 0 ]; then return 0; fi
local filename="$1"
local clonedir="${2:-${LIBINST_CLONE}}"
local targetdir="${3:-${LIBINST_TARGET}}"
# target
local targetvers="$(get_version_string "${targetdir}/${filename}")"
local targetvers_sha="$(get_version_sha "$targetvers")"
local targetvers_branch="$(get_version_branch "$targetvers")"
# distant GIT
local remotevers_sha="$(instwiz_remoteversion "$clonedir" "$targetvers_branch")"
if [ "$targetvers_sha" != "$remotevers_sha" ]
then echo "New version ${remotevers_sha} available ..."; return 1;
else echo "Up-to-date"; touch "${targetdir}/${filename}";
fi
return 0
}
#### script_update ( path = $HOME/bin/ )
script_update () {
local installcmd="$(instwiz_prepare_install_cmd)"
log "script_update: '${installcmd}'"
iexec "$installcmd"
quietecho ">> ok, script updated in '${LIBINST_TARGET}'"
return 0
}
#### script_uninstall ( path = $HOME/bin/ )
script_uninstall () {
local uninstallcmd="$(instwiz_prepare_uninstall_cmd)"
if [ "$uninstallcmd" = '' ]; then
quietecho ">> nothing to un-install!"
return 1
fi
log "script_uninstall: '${uninstallcmd}'"
iexec "$uninstallcmd"
quietecho ">> ok, script removed from '${LIBINST_TARGET}'"
return 0
}
#### COMPATIBILITY #####################################################################
# to be deleted in next major version !!
##@!@##
##########################################################################################
# Internal API
##########################################################################################
# this MUST only be parsed when calling the lib directly
# any method of the internal api is prefixed by `intlib_`
#if [ "$_" != "$0" ] || [ "$(basename "$0")" != "$(basename "${BASH_SOURCE[0]}")" ]; then return 0; fi
if [ "$(basename "$0")" != "$(basename "${BASH_SOURCE[0]}")" ]; then return 0; fi
#echo "BASH LIBRARY !!!"
declare -x INTLIB_BIN_FILENAME="${LIB_FILENAME_DEFAULT}.sh"
declare -x INTLIB_DEVDOC_FILENAME="${LIB_FILENAME_DEFAULT}-DOC.md"
declare -x INTLIB_README_FILENAME="${LIB_FILENAME_DEFAULT}-README.md"
declare -x INTLIB_MAN_FILENAME="${LIB_FILENAME_DEFAULT}.man"
declare -x INTLIB_PRESET='default'
declare -x INTLIB_BRANCH='master'
declare -x INTLIB_TARGET
declare -x INTLIB_RELEASE
declare -x INTLIB_EXEC
declare -rx INTLIB_RELEASE_MASK="%s.tar.gz" 2>/dev/null;
declare -rx INTLIB_RELEASE_MASK_URL="${LIB_SOURCES_URL}/archive/%s" 2>/dev/null;
# days to make automatic version check
declare -x INTLIB_OUTDATED_CHECK=30
# days to force user update (message is always shown)
declare -x INTLIB_OUTDATED_FORCE=90
declare -rxa INTLIB_PRESET_ALLOWED="( default dev user full )" 2>/dev/null;
declare -rxa INTLIB_ACTION_ALLOWED="( install uninstall check update version help usage documentation mddocumentation clean exec )" 2>/dev/null;
# script man infos
MANPAGE_NODEPEDENCY=true
for section in "${MANPAGE_VARS[@]}"; do eval "${section}=\$LIB_${section}"; done
for section in "${SCRIPT_VARS[@]}"; do eval "${section}=\$LIB_${section}"; done
for section in "${VERSION_VARS[@]}"; do eval "${section}=\$LIB_${section}"; done
for section in "${USAGE_VARS[@]}"; do eval "${section}=\$LIB_${section}"; done
for section in "${INSTALLATION_VARS[@]}"; do eval "${section}=\$LIB_${section}"; done
SCRIPT_REPOSITORY_URL="${LIB_SOURCES_URL}"
OPTIONS_ALLOWED="b:t:p:r:e:${COMMON_OPTIONS_ALLOWED}"
LONG_OPTIONS_ALLOWED="exec:,branch:,target:,preset:,release:,local,${COMMON_LONG_OPTIONS_ALLOWED}"
INTLIB_PRESET_INFO=''
for pres in "${INTLIB_PRESET_ALLOWED[@]}"; do
INTLIB_PRESET_INFO+=" '${pres}'"
done
DESCRIPTION_USAGE="${LIB_DESCRIPTION}\n\n\
To use the library, just include its source file using: \`source path/to/piwi-bash-library.bash\` and call its methods.\n\
Try option '--man' for the library full manpage.";
OPTIONS_USAGE="\n\
$(parse_color_tags "<bold><action> in:</bold>")\n\
\tinstall\t\t\tinstall a copy locally or in your system\n\
\tcheck\t\t\tcheck if a copy is up-to-date\n\
\tupdate\t\t\tupdate a copy with newer version if so\n\
\tuninstall\t\tuninstall a copy from a system path\n\
\tversion\t\t\tget a copy version infos ; use option '--quiet' to get only the version number\n\
\tdocumentation\t\tsee the library documentation ; use option '--verbose' to increase verbosity\n\
\tclean\t\t\tclean library cache\n\n\
$(parse_color_tags "<bold>available options:</bold>")\n\
\t-e, --exec='string'\ta bash string to evaluate in the library's environment\n\
\t-t, --target=PATH\tdefine the target directory ('PATH' must exist - default is '\$HOME/bin/')\n\
\t-p, --preset=TYPE\tdefine a preset for an installation ; can be ${INTLIB_PRESET_INFO}\n\
\t-b, --branch=NAME\tdefine the GIT branch to use from the library remote repository (default is '${INTLIB_BRANCH}')\n\
\t-r, --release=VERSION\tdefine the GIT release tag to use from the library remote repository (default is empty)\n\
\t--local\t\t\tlocal installation in current directory (alias of '--target=\$(pwd)')\
${COMMON_OPTIONS_USAGE}";
SYNOPSIS_ERROR=" ${0} [-${COMMON_OPTIONS_ALLOWED_MASK}] \n\
\t[-t | --target=<path>] [--local] \n\
\t[-b | --branch=<branch>] [-r | --release=<version>] \n\
\t[-p | --preset= (${INTLIB_PRESET_ALLOWED[@]}) ] \n\
\t[-e | --exec=<'string to eval'>] \n\
\thelp | usage\n\
\tversion\n\
\tcheck\n\
\tinstall\n\
\tupdate\n\
\tuninstall\n\
\tdocumentation\n\
\tclean\n";
SYNOPSIS_USAGE=" ${0} [-${COMMON_OPTIONS_ALLOWED_MASK}] \n\
\t[-t | --target=<path>] [--local] \n\
\t[-b | --branch=<branch>] [-r | --release=<version>] \n\
\t[-p | --preset= (${INTLIB_PRESET_ALLOWED[@]}) ] \n\
\t[-e | --exec=<'string to eval'>] \n\
\t[--] <action>";
declare -x DOCUMENTATION_TITLE="Piwi Bash Library documentation\n\n[*$(library_info)*]"
declare -x DOCUMENTATION_INTRO="\
\tPackage [${LIB_PACKAGE}] version [${LIB_VERSION}].\n\
\t${LIB_COPYRIGHT} - Some rights reserved. \n\
\t${LIB_LICENSE}.\n\
\t${LIB_SOURCES}.\n\
\tBug reports: <http://github.com/piwi/bash-library/issues>.\n\
\t${LIB_ADDITIONAL_INFO}";
# internal API methods
# -> check preset validity
intlib_preset_valid () {
in_array "${INTLIB_PRESET}" "${INTLIB_PRESET_ALLOWED[@]}" || simple_error "unknown preset '${INTLIB_PRESET}'!";
SCRIPT_FILES=( ${INTLIB_BIN_FILENAME} ${INTLIB_MAN_FILENAME} )
SCRIPT_FILES_BIN=( ${INTLIB_BIN_FILENAME} )
SCRIPT_FILES_MAN=( ${INTLIB_MAN_FILENAME} )
if [ "$INTLIB_PRESET" = 'dev' ]||[ "$INTLIB_PRESET" = 'full' ]; then
SCRIPT_FILES=( "${SCRIPT_FILES[@]}" ${INTLIB_DEVDOC_FILENAME} )
fi
if [ "$INTLIB_PRESET" = 'user' ]||[ "$INTLIB_PRESET" = 'full' ]; then
SCRIPT_FILES=( "${SCRIPT_FILES[@]}" ${INTLIB_README_FILENAME} )
fi
export SCRIPT_FILES SCRIPT_FILES_BIN SCRIPT_FILES_MAN
return 0
}
# -> prepare a clone of the library repo
intlib_prepare_libclone () {
if [ "$INTLIB_RELEASE" != '' ]
then
make_library_cachedir
local oldpwd="$(pwd)"
if [ "${INTLIB_RELEASE:0:1}" != 'v' ]; then export INTLIB_RELEASE="v${INTLIB_RELEASE}"; fi
local _tag="$(printf "$INTLIB_RELEASE_MASK" "$INTLIB_RELEASE")"
local _tag_url="$(printf "$INTLIB_RELEASE_MASK_URL" "$_tag")"
local _target="${LIB_SYSCACHEDIR}/${INTLIB_RELEASE}"
if [ ! -d "$_target" ]; then
local wgetcmd="$(which wget)"
if [ -z "$wgetcmd" ]; then command_error 'wget'; fi
cd "$LIB_SYSCACHEDIR"
"$wgetcmd" "$_tag_url"
local _tar_dirname="$(tar -tzf "$_tag" | sed -n 1p)"
_tar_dirname="${_tar_dirname%/}"
if [ -d "$_tar_dirname" ]; then rm -rf "$_tar_dirname"; fi
if [ "$VERBOSE" = 'true' ]
then tar xvf "$_tag"
else tar xf "$_tag"
fi
mv "$_tar_dirname" "$INTLIB_RELEASE"
fi
export LIBINST_CLONE="$_target"
cd "$oldpwd"
else
local target="${LIB_SYSCACHEDIR}/$(basename "$LIB_SOURCES_URL")-${LIBINST_BRANCH}"
git_make_clone "$LIB_SOURCES_URL" "$target"
export LIBINST_CLONE="$CURRENT_GIT_CLONE_DIR"
git_change_branch "$LIBINST_CLONE" "$LIBINST_BRANCH"
git_update_clone "$LIBINST_CLONE"
fi
return 0
}
# -> prepare required install files in ${clone}/tmp/
intlib_prepare_install () {
intlib_prepare_libclone
local tmpdir="${LIBINST_CLONE}/tmp"
rm -rf "$tmpdir" && mkdir "$tmpdir"
cp -f "${LIBINST_CLONE}/bin/piwi-bash-library.bash" "${tmpdir}/${INTLIB_BIN_FILENAME}"
cp -f "${LIBINST_CLONE}/man/piwi-bash-library.man" "${tmpdir}/${INTLIB_MAN_FILENAME}"
cp -f "${LIBINST_CLONE}/DOCUMENTATION.md" "${tmpdir}/${INTLIB_DEVDOC_FILENAME}"
cp -f "${LIBINST_CLONE}/README.md" "${tmpdir}/${INTLIB_README_FILENAME}"
export LIBINST_CLONE="$tmpdir"
return 0
}
# internal library actions
intlibaction_documentation () {
build_documentation
return 0
}
intlibaction_documentation_tomd () {
build_documentation 'markdown'
return 0
}
intlibaction_clean () {
clean_library_cachedir
quietecho ">> cache cleaned"
return 0
}
intlibaction_check () {
script_installation_target "$INTLIB_TARGET"
intlib_prepare_libclone
script_check "$INTLIB_BIN_FILENAME"
return 0
}
intlibaction_install () {
script_installation_target "$INTLIB_TARGET"
intlib_preset_valid
intlib_prepare_install
local oldquiet="$QUIET"
export QUIET=true
script_install
export QUIET="$oldquiet"
quietecho ">> ok, library installed in '${LIBINST_TARGET}'"
return 0
}
intlibaction_update () {
# @TODO: test version to make a warning if upgrade
local installed="${INTLIB_TARGET}/${INTLIB_BIN_FILENAME}"
if [ ! -f "$installed" ]; then
simple_error "no installed library found to update in '${INTLIB_TARGET}'!"
fi
script_installation_target "$INTLIB_TARGET"
intlib_preset_valid
intlib_prepare_install
local oldquiet="$QUIET"
export QUIET=true
script_update
export QUIET="$oldquiet"
quietecho ">> ok, library updated in '${LIBINST_TARGET}'"
return 0
}
intlibaction_uninstall () {
script_installation_target "$INTLIB_TARGET"
intlib_preset_valid
local oldquiet="$QUIET"
export QUIET=true
local result="$(script_uninstall)"
export QUIET="$oldquiet"
if [ "$result" != 'false' ]
then quietecho ">> ok, library deleted from '${LIBINST_TARGET}'"
else quietecho "nothing to un-install"
fi
return 0
}
intlibaction_version () {
script_installation_target "$INTLIB_TARGET"
library_version "$QUIET"
return 0
}
intlibaction_help () {
script_long_usage; return 0
}
intlibaction_usage () {
script_usage; return 0
}
intlibaction_evaluate () {
verbose_echo "> evaluating: '$INTLIB_EXEC'"
evaluate "$INTLIB_EXEC" 1>/dev/null 2>&1;
( [ -n "$CMD_OUT" ] && [ "$QUIET" = 'false' ] ) && echo "$CMD_OUT" >&1;
[ -n "$CMD_ERR" ] && echo "$CMD_ERR" >&2;
return "$CMD_STATUS";
}
intlib_check_uptodate () {
if [ "$QUIET" = 'true' ]; then return 0; fi
local now="$(date '+%s')"
local fmdate="$(stat -c "%Y" "$0")"
local checkdiff=$((now - fmdate))
# simple check
local ts_limit=$((INTLIB_OUTDATED_CHECK * 24 * 60 * 60))
if [ "$checkdiff" -gt "$ts_limit" ]; then
export INTLIB_TARGET="$(dirname "$0")"
if ! intlibaction_check 1> /dev/null; then
info "This library version is more than ${INTLIB_OUTDATED_CHECK} days old and a newer version is available ... You should run '$0 update' to update it.";
fi
fi
# forced check
local ts_limit_forced=$((INTLIB_OUTDATED_FORCE * 24 * 60 * 60))
if [ "$checkdiff" -gt "$ts_limit_forced" ]; then
info "This library version is more than ${INTLIB_OUTDATED_FORCE} days old ... You should run '$0 update' to get last version.";
fi
return 0
}
# get any piped content
read_from_pipe
# parsing options
#rearrange_script_options "$@"
rearrange_script_options_new "$0" "$@"
[ -n "$SCRIPT_PARAMS" ] && eval set -- "$SCRIPT_PARAMS"
parse_common_options_strict "$@"
OPTIND=1
while getopts ":${OPTIONS_ALLOWED}" OPTION; do
OPTARG="${OPTARG#=}"
case "$OPTION" in
h|f|i|q|v|x|V|d|l) ;;
t) INTLIB_TARGET="$OPTARG";;
p) INTLIB_PRESET="$OPTARG";;
b) LIBINST_BRANCH="$OPTARG";;
r) INTLIB_RELEASE="$OPTARG";;
e) INTLIB_EXEC="${OPTARG:-${SCRIPT_PIPED_INPUT}}"; ACTION='exec';;
-) parse_long_option "$OPTARG" "${!OPTIND}"
case "$LONGOPTNAME" in
target) INTLIB_TARGET="$LONGOPTARG";;
local) INTLIB_TARGET="$(pwd)";;
preset) INTLIB_PRESET="$LONGOPTARG";;
branch) LIBINST_BRANCH="$LONGOPTARG";;
release)INTLIB_RELEASE="$LONGOPTARG";;
exec) INTLIB_EXEC="${LONGOPTARG:-${SCRIPT_PIPED_INPUT}}"; ACTION='exec';;
?) ;;
esac ;;
?) ;;
esac
done
export INTLIB_TARGET INTLIB_PRESET LIBINST_BRANCH INTLIB_RELEASE
if [ -z "$ACTION" ]; then
get_next_argument
ACTION="$ARGUMENT"
fi
# check last updates
intlib_check_uptodate
# checking env
# -> action is required
if [ -z "$ACTION" ]; then simple_error 'nothing to do'; fi
# -> check action validity
in_array "$ACTION" "${INTLIB_ACTION_ALLOWED[@]}" || simple_error "unknown action '${ACTION}'!";
# prepare
INTLIB_TARGET="$(resolve "$INTLIB_TARGET")"
# executing action
if [ "$DEBUG" = 'true' ]; then library_debug "$*"; fi
case "$ACTION" in
clean) intlibaction_clean; exit 0;;
check) intlibaction_check; exit 0;;
install) intlibaction_install; exit 0;;
update) intlibaction_update; exit 0;;
uninstall) intlibaction_uninstall; exit 0;;
help) intlibaction_help; exit 0;;
usage) intlibaction_usage; exit 0;;
version) intlibaction_version; exit 0;;
documentation) intlibaction_documentation; exit 0;;
mddocumentation) intlibaction_documentation_tomd; exit 0;;
exec) intlibaction_evaluate; exit $?;;
esac
# Endfile
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=sh
| piwi/bash-library | bin/piwi-bash-library.bash | Shell | gpl-3.0 | 129,819 |
#!/bin/bash
echo Restarting servers...
echo "---- Stopping ----"
./stop-server.sh
echo " "
echo "---- Starting ----"
./start-server.sh
| egeldenhuys/dalla-stats | restart-server.sh | Shell | gpl-3.0 | 137 |
# This file is part of Relax-and-Recover, licensed under the GNU General
# Public License. Refer to the included COPYING for full text of license.
# mount tmpfs on /tmp if not present
mount | grep -q /tmp
if [[ $? -ne 0 ]]; then
LogPrint "File system /tmp not present - try to mount it via tmpfs"
mount -t tmpfs tmpfs /tmp >&8
LogIfError "Could not mount tmpfs on /tmp"
fi
| terreActive/rear | usr/share/rear/verify/DUPLICITY/default/200_check_tmpfs.sh | Shell | gpl-3.0 | 388 |
#!/bin/bash -e
INSTANCE_NAME="tplmap-ruby"
IMAGE_NAME="tplmap-ruby-img"
PORT=15005
echo "Exposed testing APIs:
http://localhost:15005/reflect/eval?inj=*
http://localhost:15005/blind/eval?inj=*
http://localhost:15005/reflect/slim?inj=*
http://localhost:15005/blind/slim?inj=*
http://localhost:15005/reflect/erb?inj=*
http://localhost:15005/blind/erb?inj=*
"
cd "$( dirname "${BASH_SOURCE[0]}" )"/../
docker rm -f $INSTANCE_NAME || echo ''
docker build -f docker-envs/Dockerfile.ruby . -t $IMAGE_NAME
docker run --rm --name $INSTANCE_NAME -p $PORT:$PORT -d $IMAGE_NAME
# Wait until the http server is serving
until $(curl --output /dev/null --silent --head http://localhost:$PORT/); do
sleep 1
done
# Launch ruby engines tests
docker exec -it $INSTANCE_NAME python -m unittest discover -v . 'test_ruby_*.py'
docker stop $INSTANCE_NAME | epinna/tplmap | tests/run_ruby_tests.sh | Shell | gpl-3.0 | 844 |
#!/usr/bin/env bash
set -x
set -e
JDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source "$JDIR"/util.sh
[[ -n $NODE_LABELS ]] || exit 0
if has OSX $NODE_LABELS; then
brew update
brew upgrade
brew install boost pkg-config cryptopp
brew cleanup
fi
if has Ubuntu $NODE_LABELS; then
sudo apt-get update -qq -y
sudo apt-get -qq -y install build-essential pkg-config
sudo apt-get -qq -y install libcrypto++-dev
if has Ubuntu-12.04 $NODE_LABELS; then
sudo apt-get install -qq -y libboost1.48-all-dev
else
sudo apt-get install -qq -y libboost-all-dev
fi
fi
| bruinfish/ndn-group-encrypt | .jenkins.d/00-deps.sh | Shell | gpl-3.0 | 618 |
#!/bin/sh
for db in osu osu_store osu_mp osu_chat osu_charts osu_updates; do
echo "CREATE DATABASE IF NOT EXISTS ${db} DEFAULT CHARSET utf8mb4" | mysql -u root "$@"
echo "CREATE DATABASE IF NOT EXISTS ${db}_test DEFAULT CHARSET utf8mb4" | mysql -u root "$@"
done
| omkelderman/osu-web | bin/db_setup.sh | Shell | agpl-3.0 | 268 |
#!/usr/bin/env bash
#
# This script is used by continous integration tools to build nightly version of the
# netshell example
#
# Stop a first error
set -e
# Retrive the project filename from the command line
DFT_PROJECT=$1
# Stores the current working dir
WORKING_DIR=$(pwd)
# Setup virtual_env environnement
export WORKON_HOME=${WORKING_DIR}/python_virtualenvs
export VIRTUALENVWRAPPER_LOG_DIR=${WORKON_HOME}
export VIRTUALENVWRAPPER_HOOK_DIR=${WORKON_HOME}
export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3
source /usr/share/virtualenvwrapper/virtualenvwrapper.sh
# Move to dft tool source dir
cd toolkit/dft
# Run the dft tool to build the root_fs
sudo python3.5 dft build_rootfs --project-file ${WORKING_DIR}/${DFT_PROJECT} --log-level debug --keep-bootstrap-files --limit-arch mips
# Go back to startup directory and clean working directory
cd ${WORKING_DIR}
sudo rm -fr ${WORKING_DIR}/working_dir/*/rootfs
| wbonnet/lffs | toolkit/ci/build-firmware-mips.sh | Shell | apache-2.0 | 921 |
#!/usr/bin/env bash
# Inspired by:
# https://github.com/kubernetes/code-generator/blob/master/generate-groups.sh
#
# https://github.com/jw-s/redis-operator/tree/master/hack
PROJECT=github.com/tangfeixiong/go-to-kubernetes
GOPATH="$(cd $(dirname ${BASH_SOURCE})/../../../../../.. && pwd)"
GOPATH=$GOPATH deepcopy-gen \
--input-dirs ${PROJECT}/redis-operator/pkg/apis/example.com/v1 \
--output-file-base zz_generated.deepcopy \
--bounding-dirs ${PROJECT}/redis-operator/pkg/apis \
--go-header-file /dev/null \
--logtostderr --v 2 | tangfeixiong/go-for-kubernetes | redis-operator/hack/standalone-update-deepcopy.sh | Shell | apache-2.0 | 544 |
#!/usr/bin/env bash
sudo docker build \
--tag="lburgazzoli/ubuntu-ui:16.10" \
.
| lburgazzoli/lb-docker-files | ubuntu/16.10-ui/build.sh | Shell | apache-2.0 | 89 |
#!/bin/bash
# Run script from scenes or scenes-paper to create all symlinks
rapterRoot="/home/bontius/workspace/globOpt"
ln -sf $rapterRoot/RAPter/build/Release/bin/rapter
ln -sf $rapterRoot/RAPter/build/Debug/bin/rapter rapterd
ln -sf $rapterRoot/visualization/build/Release/bin/rapterVis
ln -sf $rapterRoot/visualization/build/Debug/bin/rapterVis raperVisd
ln -sf $rapterRoot/RAPter/build/Release/bin/corresp
ln -sf $rapterRoot/RAPter/scripts/divide.py
ln -sf $rapterRoot/RAPter/scripts/run.sh
ln -sf $rapterRoot/RAPter/scripts/runRepr.sh
ln -sf $rapterRoot/RAPter/build/Release/bin/pearl
ln -sf $rapterRoot/RAPter/build/Debug/bin/pearl pearld
ln -sf $rapterRoot/RAPter/build/Release/bin/ransac
ln -sf $rapterRoot/RAPter/build/Debug/bin/ransac ransacd
ln -sf $rapterRoot/RAPter/build/Release/bin/toGlobFit
ln -sf $rapterRoot/RAPter/build/Release/bin/refit
ln -sf $rapterRoot/RAPter/scripts/refit.py
ln -sf $rapterRoot/RAPter/scripts/show.py
ln -sf $rapterRoot/RAPter/scripts/runGlobfit.py
ln -sf $rapterRoot/RAPter/scripts/compareToGlobfit.py
ln -sf $rapterRoot/RAPter/scripts/noisePw2Html.py
ln -sf $rapterRoot/RAPter/scripts/noisePwMatrix.py
ln -sf $rapterRoot/RAPter/scripts/runSegmentation.py
ln -sf $rapterRoot/evaluation/normal_distr.py
ln -sf $rapterRoot/evaluation/readGraphProperties.py
ln -sf $rapterRoot/evaluation/collectStatistics.py
ln -sf $rapterRoot/evaluation/computeNormalEstimationError.py
ln -sf $rapterRoot/evaluation/generatePolarPrimDirections.py
ln -sf $rapterRoot/RAPter/scripts/showcands.sh
ln -sf $rapterRoot/RAPter/scripts/showPearl.sh
ln -sf $rapterRoot/RAPter/scripts/showquick.sh
ln -sf $rapterRoot/RAPter/scripts/cleanupFolder.sh | amonszpart/globOpt | RAPter/scripts/createSymlinks.sh | Shell | apache-2.0 | 1,673 |
#!/bin/bash
echo "you can use -n argument to skip the s3 download if you did it once"
echo "files are unzipped to ../../h2o-downloaded"
# This is critical:
# Ensure that all your children are truly dead when you yourself are killed.
# trap "kill -- -$BASHPID" INT TERM EXIT
# leave out EXIT for now
trap "kill -- -$BASHPID" INT TERM
echo "BASHPID: $BASHPID"
echo "current PID: $$"
source ./runner_setup.sh "$@"
echo "Do we have to clean out old ice_root dirs somewhere?"
echo "Setting up sandbox, since no cloud build here will clear it out! (unlike other runners)"
rm -fr sandbox
mkdir -p sandbox
# Should we do this cloud build with the sh2junit.py? to get logging, xml etc.
# I suppose we could just have a test verify the request cloud size, after buildingk
HDP_JOBTRACKER=192.168.1.154:8021
HDP_NODES=4
HDP_HEAP=20g
HDP_JAR=h2odriver_hdp1.3.2.jar
H2O_DOWNLOADED=../../h2o-downloaded
H2O_HADOOP=$H2O_DOWNLOADED/hadoop
H2O_JAR=h2o.jar
HDFS_OUTPUT=hdfsOutputDirName
# file created by the h2o on hadoop h2odriver*jar
REMOTE_HOME=/home/0xcustomer
REMOTE_IP=192.168.1.154
REMOTE_USER=0xcustomer@$REMOTE_IP
REMOTE_SCP="scp -i $HOME/.0xcustomer/0xcustomer_id_rsa"
REMOTE_SSH_USER="ssh -i $HOME/.0xcustomer/0xcustomer_id_rsa $REMOTE_USER"
source ./kill_hadoop_jobs.sh
#*****HERE' WHERE WE START H2O ON HADOOP*******************************************
rm -f /tmp/h2o_on_hadoop_$REMOTE_IP.sh
echo "cd /home/0xcustomer" > /tmp/h2o_on_hadoop_$REMOTE_IP.sh
echo "rm -fr h2o_one_node" >> /tmp/h2o_on_hadoop_$REMOTE_IP.sh
set +e
# remember to update this, to match whatever user kicks off the h2o on hadoop
echo "hadoop dfs -rmr /user/0xcustomer/$HDFS_OUTPUT" >> /tmp/h2o_on_hadoop_$REMOTE_IP.sh
set -e
echo "hadoop jar $HDP_JAR water.hadoop.h2odriver -jt $HDP_JOBTRACKER -libjars $H2O_JAR -mapperXmx $HDP_HEAP -nodes $HDP_NODES -output $HDFS_OUTPUT -notify h2o_one_node " >> /tmp/h2o_on_hadoop_$REMOTE_IP.sh
# copy the script, just so we have it there too
$REMOTE_SCP /tmp/h2o_on_hadoop_$REMOTE_IP.sh $REMOTE_USER:$REMOTE_HOME
# have to copy the downloaded h2o stuff over to xxx to execute with the ssh
# it needs the right hadoop client setup. This is easier than installing hadoop client stuff here.
# do the jars last, so we can see the script without waiting for the copy
echo "scp some jars"
$REMOTE_SCP $H2O_HADOOP/$HDP_JAR $REMOTE_USER:$REMOTE_HOME
$REMOTE_SCP $H2O_DOWNLOADED/$H2O_JAR $REMOTE_USER:$REMOTE_HOME
# exchange keys so jenkins can do this?
# background!
cat /tmp/h2o_on_hadoop_$REMOTE_IP.sh
cat /tmp/h2o_on_hadoop_$REMOTE_IP.sh | $REMOTE_SSH_USER &
#*********************************************************************************
CLOUD_PID=$!
jobs -l
source ./wait_for_h2o_on_hadoop.sh
# use these args when we do Runit
while IFS=';' read CLOUD_IP CLOUD_PORT
do
echo $CLOUD_IP, $CLOUD_PORT
done < h2o_one_node
rm -fr h2o-nodes.json
# NOTE: keep this hdfs info in sync with the json used to build the cloud above
../find_cloud.py -f h2o_one_node -hdfs_version cdh3 -hdfs_name_node 192.168.1.176 -expected_size $HDP_NODES
echo "h2o-nodes.json should now exist"
ls -ltr h2o-nodes.json
# cp it to sandbox? not sure if anything is, for this setup
cp -f h2o-nodes.json sandbox
cp -f h2o_one_node sandbox
#***********************************************************************************
echo "Touch all the 0xcustomer-datasets mnt points, to get autofs to mount them."
echo "Permission rights extend to the top level now, so only 0xcustomer can automount them"
echo "okay to ls the top level here...no secret info..do all the machines hadoop (cdh3) might be using"
for mr in 171 172 173 174 175 176 177 178 179 180
do
ssh -i $HOME/.0xcustomer/0xcustomer_id_rsa [email protected].$mr 'cd /mnt/0xcustomer-datasets'
done
# We now have the h2o-nodes.json, that means we started the jvms
# Shouldn't need to wait for h2o cloud here..
# the test should do the normal cloud-stabilize before it does anything.
# n0.doit uses nosetests so the xml gets created on completion. (n0.doit is a single test thing)
# A little '|| true' hack to make sure we don't fail out if this subtest fails
# test_c1_rel has 1 subtest
# This could be a runner, that loops thru a list of tests.
# belt and suspenders ..for resolving bucket path names
export H2O_REMOTE_BUCKETS_ROOT=/home/0xcustomer
echo "If it exists, pytest_config-<username>.json in this dir will be used"
echo "i.e. pytest_config-jenkins.json"
echo "Used to run as 0xcust.., with multi-node targets (possibly)"
myPy() {
DOIT=../testdir_single_jvm/n0.doit
$DOIT $1/$2 || true
# try moving all the logs created by this test in sandbox to a subdir to isolate test failures
# think of h2o.check_sandbox_for_errors()
rm -f -r sandbox/$1
mkdir -p sandbox/$1
cp -f sandbox/*log sandbox/$1
# rm -f sandbox/*log
}
# myPy c5 test_c5_KMeans_sphere15_180GB.py
# don't run this until we know whether 0xcustomer permissions also exist for the hadoop job
# myPy c1 test_c1_rel.py
myPy c2 test_c2_rel.py
# myPy c3 test_c3_rel.py
# myPy c4 test_c4_four_billion_rows.py
myPy c6 test_c6_hdfs.py
# If this one fails, fail this script so the bash dies
# We don't want to hang waiting for the cloud to terminate.
myPy shutdown test_shutdown.py
echo "Maybe it takes some time for hadoop to shut it down? sleep 10"
sleep 10
if ps -p $CLOUD_PID > /dev/null
then
echo "$CLOUD_PID is still running after shutdown. Will kill"
kill $CLOUD_PID
# may take a second?
sleep 1
fi
ps aux | grep h2odriver
jobs -l
echo ""
echo "The h2odriver job should be gone. It was pid $CLOUD_PID"
echo "The hadoop job(s) should be gone?"
$REMOTE_SSH_USER "hadoop job -list"
| woobe/h2o | py/testdir_release/runner_hdp1.3.sh | Shell | apache-2.0 | 5,665 |
#!/bin/bash -ex
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
export ZSTD_VERSION="1.4.5"
curl -sL "https://github.com/facebook/zstd/archive/v${ZSTD_VERSION}.tar.gz" -o zstd-${ZSTD_VERSION}.tar.gz
tar xf zstd-${ZSTD_VERSION}.tar.gz
pushd zstd-${ZSTD_VERSION}
mkdir build_cmake
pushd build_cmake
cmake -GNinja -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr/local \
-DZSTD_BUILD_PROGRAMS=off \
-DZSTD_BUILD_SHARED=off \
-DZSTD_BUILD_STATIC=on \
-DZSTD_MULTITHREAD_SUPPORT=off \
-DCMAKE_POSITION_INDEPENDENT_CODE=1 \
../build/cmake
ninja install
popd
popd
rm -rf zstd-${ZSTD_VERSION}.tar.gz zstd-${ZSTD_VERSION}
| xhochy/arrow | python/manylinux201x/scripts/build_zstd.sh | Shell | apache-2.0 | 1,390 |
#!/bin/sh
. /etc/sysconfig/heat-params
if [ "$NETWORK_DRIVER" == "flannel" ]; then
FLANNEL_DOCKER_BRIDGE_BIN=/usr/local/bin/flannel-docker-bridge
FLANNEL_DOCKER_BRIDGE_SERVICE=/etc/systemd/system/flannel-docker-bridge.service
DOCKER_FLANNEL_CONF=/etc/systemd/system/docker.service.d/flannel.conf
FLANNEL_DOCKER_BRIDGE_CONF=/etc/systemd/system/flanneld.service.d/flannel-docker-bridge.conf
mkdir -p /etc/systemd/system/docker.service.d
mkdir -p /etc/systemd/system/flanneld.service.d
cat >> $FLANNEL_DOCKER_BRIDGE_BIN <<EOF
#!/bin/sh
if ! [ "\$FLANNEL_SUBNET" ] && [ "\$FLANNEL_MTU" ] ; then
echo "ERROR: missing required environment variables." >&2
exit 1
fi
mkdir -p /run/flannel/
cat > /run/flannel/docker <<EOF
DOCKER_NETWORK_OPTIONS="--bip=\$FLANNEL_SUBNET --mtu=\$FLANNEL_MTU"
EOF
chown root:root $FLANNEL_DOCKER_BRIDGE_BIN
chmod 0755 $FLANNEL_DOCKER_BRIDGE_BIN
cat >> $FLANNEL_DOCKER_BRIDGE_SERVICE <<EOF
[Unit]
After=flanneld.service
Before=docker.service
Requires=flanneld.service
[Service]
Type=oneshot
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/local/bin/flannel-docker-bridge
[Install]
WantedBy=docker.service
EOF
chown root:root $FLANNEL_DOCKER_BRIDGE_SERVICE
chmod 0644 $FLANNEL_DOCKER_BRIDGE_SERVICE
cat >> $DOCKER_FLANNEL_CONF <<EOF
[Unit]
Requires=flannel-docker-bridge.service
After=flannel-docker-bridge.service
[Service]
EnvironmentFile=/run/flannel/docker
EOF
chown root:root $DOCKER_FLANNEL_CONF
chmod 0644 $DOCKER_FLANNEL_CONF
cat >> $FLANNEL_DOCKER_BRIDGE_CONF <<EOF
[Unit]
Requires=flannel-docker-bridge.service
Before=flannel-docker-bridge.service
[Install]
Also=flannel-docker-bridge.service
EOF
chown root:root $FLANNEL_DOCKER_BRIDGE_CONF
chmod 0644 $FLANNEL_DOCKER_BRIDGE_CONF
echo "activating service flanneld"
systemctl enable flanneld
systemctl --no-block start flanneld
fi
| eshijia/magnum | magnum/templates/heat-kubernetes/fragments/network-service.sh | Shell | apache-2.0 | 1,841 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for debian os distro
# $1: if 'true', we're building a master yaml, else a node
function build-kube-env {
local master=$1
local file=$2
rm -f ${file}
cat >$file <<EOF
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
PORTAL_NET: $(yaml-quote ${PORTAL_NET})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false})
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
KUBE_USER: $(yaml-quote ${KUBE_USER})
KUBE_PASSWORD: $(yaml-quote ${KUBE_PASSWORD})
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
EOF
if [[ "${master}" != "true" ]]; then
cat >>$file <<EOF
KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME})
ZONE: $(yaml-quote ${ZONE})
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS})
ENABLE_DOCKER_REGISTRY_CACHE: $(yaml-quote ${ENABLE_DOCKER_REGISTRY_CACHE:-false})
EOF
fi
}
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
#
function create-master-instance {
local address_opt=""
[[ -n ${1:-} ]] && address_opt="--address ${1}"
write-master-env
gcloud compute instances create "${MASTER_NAME}" \
${address_opt} \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro" "compute-rw" \
--can-ip-forward \
--metadata-from-file \
"startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml" \
--disk name="${MASTER_NAME}-pd" device-name=master-pd mode=rw boot=no auto-delete=no
}
function create-node-instance-template {
create-node-template "${NODE_INSTANCE_PREFIX}-template" "${scope_flags[*]}" \
"startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \
"kube-env=${KUBE_TEMP}/node-kube-env.yaml"
}
| guoshimin/kubernetes | cluster/gce/debian/helper.sh | Shell | apache-2.0 | 4,021 |
# ~/.bash_completion.d/yaml_command
_yaml()
{
local cur=${COMP_WORDS[COMP_CWORD]}
local pos=(COMP_CWORD - 1)
local pre=${COMP_WORDS[@]:0:$pos}
local cpl=$($pre _)
if [[ "$cur" == -* ]]; then
cpl=${cpl[@]//^[^-]/}
else
cpl=${cpl[@]//-*/}
fi
COMPREPLY=( $(compgen -W "$cpl" -- $cur) )
}
complete -F _yaml yaml
| rubyworks/executable | work/reference/bash_completion.sh | Shell | bsd-2-clause | 360 |
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Shell library for functions and initialization private to
# build_image, and not specific to any particular kind of image.
#
# TODO(jrbarnette): There's nothing holding this code together in
# one file aside from its lack of anywhere else to go. Probably,
# this file should get broken up or otherwise reorganized.
# Use canonical path since some tools (e.g. mount) do not like symlinks.
# Append build attempt to output directory.
if [ -z "${FLAGS_version}" ]; then
IMAGE_SUBDIR="${FLAGS_group}-${COREOS_VERSION_STRING}-a${FLAGS_build_attempt}"
else
IMAGE_SUBDIR="${FLAGS_group}-${FLAGS_version}"
fi
BUILD_DIR="${FLAGS_output_root}/${BOARD}/${IMAGE_SUBDIR}"
OUTSIDE_OUTPUT_DIR="../build/images/${BOARD}/${IMAGE_SUBDIR}"
set_build_symlinks() {
local build=$(basename ${BUILD_DIR})
local link
for link in "$@"; do
local path="${FLAGS_output_root}/${BOARD}/${link}"
ln -sfT "${build}" "${path}"
done
}
cleanup_mounts() {
echo "Cleaning up mounts"
"${BUILD_LIBRARY_DIR}/disk_util" umount "$1" || true
}
delete_prompt() {
echo "An error occurred in your build so your latest output directory" \
"is invalid."
# Only prompt if both stdin and stdout are a tty. If either is not a tty,
# then the user may not be present, so we shouldn't bother prompting.
if [ -t 0 -a -t 1 ]; then
read -p "Would you like to delete the output directory (y/N)? " SURE
SURE="${SURE:0:1}" # Get just the first character.
else
SURE="y"
echo "Running in non-interactive mode so deleting output directory."
fi
if [ "${SURE}" == "y" ] ; then
sudo rm -rf "${BUILD_DIR}"
echo "Deleted ${BUILD_DIR}"
else
echo "Not deleting ${BUILD_DIR}."
fi
}
extract_update() {
local image_name="$1"
local disk_layout="$2"
local update_path="${BUILD_DIR}/${image_name%_image.bin}_update.bin"
"${BUILD_LIBRARY_DIR}/disk_util" --disk_layout="${disk_layout}" \
extract "${BUILD_DIR}/${image_name}" "USR-A" "${update_path}"
upload_image "${update_path}"
}
zip_update_tools() {
# There isn't a 'dev' variant of this zip, so always call it production.
local update_zip="coreos_production_update.zip"
info "Generating update tools zip"
# Make sure some vars this script needs are exported
export REPO_MANIFESTS_DIR SCRIPTS_DIR
"${BUILD_LIBRARY_DIR}/generate_au_zip.py" \
--output-dir "${BUILD_DIR}" --zip-name "${update_zip}"
upload_image "${BUILD_DIR}/${update_zip}"
}
generate_update() {
local image_name="$1"
local disk_layout="$2"
local update_prefix="${image_name%_image.bin}_update"
local update="${BUILD_DIR}/${update_prefix}"
local devkey="/usr/share/update_engine/update-payload-key.key.pem"
echo "Generating update payload, signed with a dev key"
"${BUILD_LIBRARY_DIR}/disk_util" --disk_layout="${disk_layout}" \
extract "${BUILD_DIR}/${image_name}" "USR-A" "${update}.bin"
delta_generator -private_key "${devkey}" \
-new_image "${update}.bin" -out_file "${update}.gz"
upload_image -d "${update}.DIGESTS" "${update}".{bin,gz,zip}
}
# Basic command to emerge binary packages into the target image.
# Arguments to this command are passed as addition options/arguments
# to the basic emerge command.
emerge_to_image() {
local root_fs_dir="$1"; shift
sudo -E ROOT="${root_fs_dir}" \
PORTAGE_CONFIGROOT="${BUILD_DIR}"/configroot \
emerge --root-deps=rdeps --usepkgonly --jobs=$FLAGS_jobs -v "$@"
# Make sure profile.env and ld.so.cache has been generated
sudo -E ROOT="${root_fs_dir}" env-update
}
# Switch to the dev or prod sub-profile
set_image_profile() {
local suffix="$1"
local profile="${BUILD_DIR}/configroot/etc/portage/make.profile"
if [[ ! -d "${profile}/${suffix}" ]]; then
die "Not a valid profile: ${profile}/${suffix}"
fi
local realpath=$(readlink -f "${profile}/${suffix}")
ln -snf "${realpath}" "${profile}"
}
# Usage: systemd_enable /root default.target something.service
# Or: systemd_enable /root default.target [email protected] [email protected]
systemd_enable() {
local root_fs_dir="$1"
local target="$2"
local unit_file="$3"
local unit_alias="${4:-$3}"
local wants_dir="${root_fs_dir}/usr/lib/systemd/system/${target}.wants"
sudo mkdir -p "${wants_dir}"
sudo ln -sf "../${unit_file}" "${wants_dir}/${unit_alias}"
}
# Generate a ls-like listing of a directory tree.
# The ugly printf is used to predictable time format and size in bytes.
write_contents() {
info "Writing ${2##*/}"
pushd "$1" >/dev/null
sudo TZ=UTC find -printf \
'%M %2n %-7u %-7g %7s %TY-%Tm-%Td %TH:%TM ./%P -> %l\n' \
| sed -e 's/ -> $//' > "$2"
popd >/dev/null
}
# Generate a list of packages installed in an image.
# Usage: image_packages /image/root
image_packages() {
local profile="${BUILD_DIR}/configroot/etc/portage/profile"
ROOT="$1" PORTAGE_CONFIGROOT="${BUILD_DIR}"/configroot \
equery --no-color list --format '$cpv::$repo' '*'
# In production images GCC libraries are extracted manually.
if [[ -f "${profile}/package.provided" ]]; then
xargs --arg-file="${profile}/package.provided" \
equery-${BOARD} --no-color list --format '$cpv::$repo'
fi
}
# Generate a list of installed packages in the format:
# sys-apps/systemd-212-r8::coreos
write_packages() {
info "Writing ${2##*/}"
image_packages "$1" | sort > "$2"
}
# Generate a list of packages w/ their licenses in the format:
# sys-apps/systemd-212-r8::coreos GPL-2 LGPL-2.1 MIT public-domain
write_licenses() {
info "Writing ${2##*/}"
local vdb=$(portageq-${BOARD} vdb_path)
local pkg lic
for pkg in $(image_packages "$1" | sort); do
lic="$vdb/${pkg%%:*}/LICENSE"
if [[ -f "$lic" ]]; then
echo "$pkg $(< "$lic")"
fi
done > "$2"
}
extract_docs() {
local root_fs_dir="$1"
info "Extracting docs"
tar --create --auto-compress --file="${BUILD_DIR}/doc.tar.bz2" \
--directory="${root_fs_dir}/usr/share/coreos" doc
sudo rm --recursive --force "${root_fs_dir}/usr/share/coreos/doc"
}
# Add an entry to the image's package.provided
package_provided() {
local p profile="${BUILD_DIR}/configroot/etc/portage/profile"
for p in "$@"; do
info "Writing $p to package.provided"
echo "$p" >> "${profile}/package.provided"
done
}
assert_image_size() {
local disk_img="$1"
local disk_type="$2"
local size
size=$(qemu-img info -f "${disk_type}" --output json "${disk_img}" | \
jq --raw-output '.["virtual-size"]' ; exit ${PIPESTATUS[0]})
if [[ $? -ne 0 ]]; then
die_notrace "assert failed: could not read image size"
fi
MiB=$((1024*1024))
if [[ $(($size % $MiB)) -ne 0 ]]; then
die_notrace "assert failed: image must be a multiple of 1 MiB ($size B)"
fi
}
start_image() {
local image_name="$1"
local disk_layout="$2"
local root_fs_dir="$3"
local update_group="$4"
local disk_img="${BUILD_DIR}/${image_name}"
mkdir -p "${BUILD_DIR}"/configroot/etc/portage/profile
ln -s "${BOARD_ROOT}"/etc/portage/make.* \
"${BOARD_ROOT}"/etc/portage/package.* \
"${BOARD_ROOT}"/etc/portage/repos.conf \
"${BUILD_DIR}"/configroot/etc/portage/
info "Using image type ${disk_layout}"
"${BUILD_LIBRARY_DIR}/disk_util" --disk_layout="${disk_layout}" \
format "${disk_img}"
assert_image_size "${disk_img}" raw
"${BUILD_LIBRARY_DIR}/disk_util" --disk_layout="${disk_layout}" \
mount "${disk_img}" "${root_fs_dir}"
trap "cleanup_mounts '${root_fs_dir}' && delete_prompt" EXIT
# First thing first, install baselayout to create a working filesystem.
emerge_to_image "${root_fs_dir}" --nodeps --oneshot sys-apps/baselayout
# FIXME(marineam): Work around glibc setting EROOT=$ROOT
# https://bugs.gentoo.org/show_bug.cgi?id=473728#c12
sudo mkdir -p "${root_fs_dir}/etc/ld.so.conf.d"
# Set /etc/lsb-release on the image.
"${BUILD_LIBRARY_DIR}/set_lsb_release" \
--root="${root_fs_dir}" \
--group="${update_group}" \
--board="${BOARD}"
}
finish_image() {
local image_name="$1"
local disk_layout="$2"
local root_fs_dir="$3"
local image_contents="$4"
local install_grub=0
local disk_img="${BUILD_DIR}/${image_name}"
sudo mkdir -p "${root_fs_dir}/boot/coreos"
sudo cp "${root_fs_dir}/usr/boot/vmlinuz" \
"${root_fs_dir}/boot/coreos/vmlinuz-a"
sudo cp "${root_fs_dir}/usr/boot/vmlinuz" \
"${root_fs_dir}/boot/coreos/vmlinuz-b"
# Record directories installed to the state partition.
# Explicitly ignore entries covered by existing configs.
local tmp_ignore=$(awk '/^[dDfFL]/ {print "--ignore=" $2}' \
"${root_fs_dir}"/usr/lib/tmpfiles.d/*.conf)
sudo "${BUILD_LIBRARY_DIR}/gen_tmpfiles.py" --root="${root_fs_dir}" \
--output="${root_fs_dir}/usr/lib/tmpfiles.d/base_image_var.conf" \
${tmp_ignore} "${root_fs_dir}/var"
sudo "${BUILD_LIBRARY_DIR}/gen_tmpfiles.py" --root="${root_fs_dir}" \
--output="${root_fs_dir}/usr/lib/tmpfiles.d/base_image_etc.conf" \
${tmp_ignore} "${root_fs_dir}/etc"
# Only configure bootloaders if there is a boot partition
if mountpoint -q "${root_fs_dir}"/boot; then
install_grub=1
${BUILD_LIBRARY_DIR}/configure_bootloaders.sh \
--boot_dir="${root_fs_dir}"/usr/boot
fi
if [[ -n "${FLAGS_developer_data}" ]]; then
local data_path="/usr/share/coreos/developer_data"
local unit_path="usr-share-coreos-developer_data"
sudo cp "${FLAGS_developer_data}" "${root_fs_dir}/${data_path}"
systemd_enable "${root_fs_dir}" system-config.target \
"[email protected]" "system-cloudinit@${unit_path}.service"
fi
write_contents "${root_fs_dir}" "${BUILD_DIR}/${image_contents}"
# Zero all fs free space to make it more compressible so auto-update
# payloads become smaller, not fatal since it won't work on linux < 3.2
sudo fstrim "${root_fs_dir}" || true
if mountpoint -q "${root_fs_dir}/usr"; then
sudo fstrim "${root_fs_dir}/usr" || true
fi
# Sign the kernels after /usr is in a consistent state
if [[ ${COREOS_OFFICIAL:-0} -ne 1 ]]; then
sudo sbsign --key /usr/share/sb_keys/DB.key \
--cert /usr/share/sb_keys/DB.crt \
"${root_fs_dir}/boot/coreos/vmlinuz-a"
sudo mv "${root_fs_dir}/boot/coreos/vmlinuz-a.signed" \
"${root_fs_dir}/boot/coreos/vmlinuz-a"
sudo sbsign --key /usr/share/sb_keys/DB.key \
--cert /usr/share/sb_keys/DB.crt \
"${root_fs_dir}/boot/coreos/vmlinuz-b"
sudo mv "${root_fs_dir}/boot/coreos/vmlinuz-b.signed" \
"${root_fs_dir}/boot/coreos/vmlinuz-b"
fi
rm -rf "${BUILD_DIR}"/configroot
cleanup_mounts "${root_fs_dir}"
trap - EXIT
# This script must mount the ESP partition differently, so run it after unmount
if [[ "${install_grub}" -eq 1 ]]; then
local target
for target in i386-pc x86_64-efi x86_64-xen; do
${BUILD_LIBRARY_DIR}/grub_install.sh \
--target="${target}" --disk_image="${disk_img}"
done
fi
}
| BugRoger/scripts | build_library/build_image_util.sh | Shell | bsd-3-clause | 11,155 |
#!/bin/sh
bindir=/usr/bin
cat <<EOF
{
"component" : "testperl",
"daemons" : [{
"name" : "worker",
"logname": "%{component}",
"command" : "/usr/bin/perl $bindir/workerTestPerl.pl $ROOT/conf/gearbox/test-perl.conf",
"count" : 1,
"user" : "%{gearbox.user}"
}]
}
EOF
| yahoo/gearbox | workers/test-perl/conf/test-perl.conf.sh | Shell | bsd-3-clause | 318 |
#!/bin/bash
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script does two checks on the state of the copied chromium directories:
# 1. Ensure all files present are in their appropriate BUILD.gn.
# 2. Ensure all .h files with a corresponding .cc file in chromium have their
# .cc copied here.
#
# This should be run with your working directory at the root of the chromium
# clone.
# Usage: clone_helper.sh <ninja build dir> <chromium src directory>
# [run existence check]
# Both of the loops below will insert any files they want to add to a BUILD.gn
# at the end of the first 'sources =' block in the file. It will insert it with
# only two spaces instead of four, so you can find files inserted by the script
# and adjust them as necessary.
function add_source_file() {
sed ":a;/sources = \[/bb;n;ba; :b;/^ \]/bc;n;bb; :c; s#^ \]# \"$1\",\n ]#; :d;n;bd" -i "$2"
}
ninja_dir=$1
chromium_dir=$2
# The following check can be skipped by not passing a path to gn to the script,
# since this operation is slow and can be considered more of a "clean up" than
# really iterative.
if [ $# -eq 3 ]; then
gn_path=$3
# Ensure all .h,.cc, and .c files under the directory xyz are in xyz/BUILD.gn.
# This helps ensure we don't have extra files laying around in directories
# that aren't tracked in the build files. Currently, base/ is an exception to
# this test because it is a submodule.
for d in build crypto net testing url; do
diff -u \
<(find $d -type f \
-name '*.h' -o -name '*.cc' -o -name '*.c' | sort | \
sed '/net\/third_party\/quic/d') \
<($gn_path desc $1 $d:\* sources | \
sed '/^\s*\/\//!d;s/^\s*\/\/third_party\/chromium_quic\/src\///' | \
sort)
done
exit
fi
# This loops tries to catch the simplest class of build errors: missing includes
# relative to the src directory and .cc files named identically to .h files.
# This will not catch things like:
# - Third-party dependencies which include things relative to their own source
# directory.
# - Platform-specific implementation files (e.g. xyz.h and xyz_posix.cc).
while :; do
# Ensure all .h files with a matching .cc in chromium are copied here and
# placed in xyz/BUILD.gn. This helps eliminate some obvious linking errors.
for f in $(find crypto net testing url -name '*.h'); do
cc_file=$(echo $f | cut -f1 -d.).cc
[ ! -e $cc_file ] || continue
[ -e $chromium_dir/$cc_file ] || continue
mkdir -p $(dirname $cc_file)
d=$(echo $f | cut -f1 -d/)
cp $chromium_dir/$cc_file $(dirname $cc_file)
if [ -e $cc_file ]; then
echo "$cc_file >> $d/BUILD.gn"
rel_file=$(echo $cc_file | cut -f2- -d/)
add_source_file "$rel_file" $d/BUILD.gn
fi
done
# Try to build what we have so far and fix any obvious include errors. If we
# were able to add an include to fix the error, we loop again to potentially
# add a new .cc file and try again.
output=$(ninja -C $ninja_dir)
if echo "$output" | grep -q "#include \""; then
f=$(echo "$output" | grep -m1 "#include" |\
sed 's/^.*#include "\([^"]\+\)".*$/\1/')
mkdir -p $(dirname $f)
echo $f
if cp $chromium_dir/$f $(dirname $f) || \
cp $chromium_dir/out/Default/gen/$f $(dirname $f); then
b=$(echo $f | cut -f1 -d/)/BUILD.gn
echo "$f >> $b"
add_source_file "$(echo $f | cut -f2- -d/)" $b
else
echo -e "$output"
break
fi
else
echo -e "$output"
break
fi
done
| endlessm/chromium-browser | third_party/openscreen/src/third_party/chromium_quic/clone_helper.sh | Shell | bsd-3-clause | 3,635 |
#!/bin/bash
# Get azure data
# clientId : -c
# AppKey: -a
# Default values
JENKINS_USERNAME=""
JENKINS_PASSWORD=""
while getopts ":t:s:p:c:h:r:l:k:u:q:f:a:i:" opt; do
case $opt in
t) TENANTID="$OPTARG"
;;
p) PASSWORD="$OPTARG"
;;
c) CLIENTID="$OPTARG"
;;
s) SUBSCRIPTIONID="$OPTARG"
;;
h) PACKERSTORAGEACCOUNT="$OPTARG"
;;
r) RESOURCEGROUP="$OPTARG"
;;
l) RESOURCEGROUPLOCATION="$OPTARG"
;;
k) KEYVAULT="$OPTARG"
;;
i) JENKINS_FQDN="$OPTARG"
;;
u) JENKINS_USERNAME="$OPTARG"
;;
q) JENKINS_PASSWORD="$OPTARG"
;;
f) FRONT50_STORAGE="$OPTARG"
;;
a) FRONT50_KEY="$OPTARG"
;;
esac
done
WORKDIR=$(pwd)
# Usually the workdir is /var/lib/waagent/custom-script/download/0
JENKINS_URL='http:\/\/'$JENKINS_FQDN
DEBIAN_REPO='http:\/\/ppa.launchpad.net\/openjdk-r\/ppa\/ubuntu trusty main;'$JENKINS_URL
SED_FILE=$WORKDIR"/sedCommand.sed"
sudo printf "Upgrading the environment\n"
# Update and upgrade packages
sudo apt-mark hold walinuxagent grub-legacy-ec2
sudo printf "Holding walinuxagent\n"
sudo apt-get update -y
sudo printf "apt-get update completed\n"
sudo rm /var/lib/dpkg/updates/*
sudo printf "directory /var/lib/dpkg/updates removed\n"
sudo apt-get upgrade -y
sudo printf "apt-get upgrade completed\n"
# Install Spinnaker on the VM with no cassandra
sudo printf "Starting to install Spinnaker\n"
curl --silent https://raw.githubusercontent.com/spinnaker/spinnaker/master/InstallSpinnaker.sh | sudo bash -s -- --cloud_provider azure --azure_region $RESOURCEGROUPLOCATION --noinstall_cassandra
sudo printf "Spinnaker has been installed\n"
# configure to not use cassandra
sudo /opt/spinnaker/install/change_cassandra.sh --echo=inMemory --front50=azs
sudo printf "Configured to not use cassandra"
# Configuring the /opt/spinnaker/config/default-spinnaker-local.yml
# Let's create the sed command file and run the sed command
sudo printf "Setting up sedCommand \n"
sudo printf "s/enabled: \${SPINNAKER_AZURE_ENABLED:false}/enabled: \${SPINNAKER_AZURE_ENABLED:true}/g\n" > $SED_FILE
sudo printf "s/defaultRegion: \${SPINNAKER_AZURE_DEFAULT_REGION:westus}/defaultRegion: \${SPINNAKER_AZURE_DEFAULT_REGION:$RESOURCEGROUPLOCATION}/g\n" >> $SED_FILE
sudo printf "s/clientId:$/& %s/\n" $CLIENTID >> $SED_FILE
sudo printf "s/appKey:$/& %s/\n" $PASSWORD >> $SED_FILE
sudo printf "s/tenantId:$/& %s/\n" $TENANTID >> $SED_FILE
sudo printf "s/subscriptionId:$/& %s/\n" $SUBSCRIPTIONID >> $SED_FILE
# Adding the PackerResourceGroup, the PackerStorageAccount, the defaultResourceGroup and the defaultKeyVault
sudo printf "s/packerResourceGroup:$/& %s/\n" $RESOURCEGROUP >> $SED_FILE
sudo printf "s/packerStorageAccount:$/& %s/\n" $PACKERSTORAGEACCOUNT >> $SED_FILE
sudo printf "s/defaultResourceGroup:$/& %s/\n" $RESOURCEGROUP >> $SED_FILE
sudo printf "s/defaultKeyVault:$/& %s/\n" $KEYVAULT >> $SED_FILE
# Enable Igor for the integration with Jenkins
sudo printf "/igor:/ {\n N\n N\n N\n /enabled:/ {\n s/enabled:.*/enabled: true/\n P\n D\n }\n}\n" >> $SED_FILE
# Configure the Jenkins instance
sudo printf "/name: Jenkins.*/ {\n N\n /baseUrl:/ { s/baseUrl:.*/baseUrl: %s:8080/ }\n" $JENKINS_URL >> $SED_FILE
sudo printf " N\n /username:/ { s/username:/username: %s/ }\n" $JENKINS_USERNAME >> $SED_FILE
sudo printf " N\n /password:/ { s/password:/password: %s/ }\n" $JENKINS_PASSWORD >> $SED_FILE
sudo printf "}\n" >> $SED_FILE
# Configure Azure storage
sudo printf "/azs:/ {\n N\n s/enabled: false/enabled: true/\n N\n s/storageAccountName:/storageAccountName: $FRONT50_STORAGE/\n N\n s|storageAccountKey:|storageAccountKey: $FRONT50_KEY|\n }\n" >> $SED_FILE
sudo printf "sedCommand.sed file created\n"
# Set the variables in the spinnaker-local.yml file
sudo sed -i -f $SED_FILE /opt/spinnaker/config/spinnaker-local.yml
sudo printf "spinnaker-local.yml file has been updated\n"
# Configure rosco.yml file
sudo sed -i "/# debianRepository:/s/.*/debianRepository: $DEBIAN_REPO:9999 trusty main/" /opt/rosco/config/rosco.yml
sudo sed -i '/defaultCloudProviderType/s/.*/defaultCloudProviderType: azure/' /opt/rosco/config/rosco.yml
sudo printf "rosco.yml file has been updated\n"
# Adding apt-key key
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EB9B1D8886F44E2A
sudo printf "apt-key done\n"
# Removing debug file
sudo rm -f $SED_FILE
# rebooting the VM to avoid issues with front50
sudo restart spinnaker
| richstep/azure-quickstart-templates | spinnaker-jenkins-to-vmss/scripts/set_azure_spinnaker.sh | Shell | mit | 4,549 |
#!/bin/bash -e
# Loads jpgs from SDSS
# usage: cat stripe_82 | ./load_data
echo "Reading from stdin..."
while read rerun run
do
echo "Loading rerun $rerun, run $run..."
rsync -aLvz --prune-empty-dirs --progress \
--include "$run/" --include "?/" --include "frame*.jpg" \
--exclude "*" \
rsync://data.sdss3.org/dr10/env/BOSS_PHOTOOBJ/frames/$rerun/ ./data/$rerun/
done
echo "All processing finished"
echo "Done."
| xkxx/last-voyage | sdss/load_data.sh | Shell | mit | 431 |
#!/bin/bash
gofiles=$(find . -name "*.go" | grep -v "/vendor/")
for gofile in $gofiles; do
echo $gofile
sed '/^import/,/^[[:space:]]*)/ { /^[[:space:]]*$/ d; }' $gofile > tmp
mv tmp $gofile
done
go fmt `go list ./... | grep -v "/vendor/"`
goimports -local github.com/hiromaily/ -w `goimports -local github.com/hiromaily/ -l ./ | grep -v "/vendor/"`
| hiromaily/go-tools | scripts/imports.sh | Shell | mit | 364 |
#!/bin/sh
################################################################################
## ##
## Copyright (c) International Business Machines Corp., 2001 ##
## ##
## This program is free software; you can redistribute it and#or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ##
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ##
## for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, write to the Free Software ##
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ##
## ##
################################################################################
#
# File : xinetd_tests.sh
#
# Description: Test Basic functionality of xinetd command.
# Test #1: xinetd starts programs that provide Internet services.
#
# Author: Manoj Iyer, [email protected]
#
# History: Mar 04 2003 - Created - Manoj Iyer.
#
# Function: chk_ifexists
#
# Description: - Check if command required for this test exits.
#
# Input: - $1 - calling test case.
# - $2 - command that needs to be checked.
#
# Return: - zero on success.
# - non-zero on failure.
chk_ifexists()
{
which $2 > $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_brkm TBROK NULL "$1: command $2 not found."
fi
return $RC
}
# Function: init
#
# Description: - Check if command required for this test exits.
# - Create temporary directories required for this test.
# - Initialize global variables.
#
# Return: - zero on success.
# - non-zero on failure.
init()
{
# Initialize global variables.
export TST_TOTAL=2
export TCID="xinetd"
export TST_COUNT=0
. daemonlib.sh
if [ -f "/usr/lib/systemd/system/telnet.socket" ]; then
tst_brkm TCONF NULL "xinetd doesn't manage telnet"
exit $?
fi
# Inititalize cleanup function.
trap "cleanup" 0
# create the temporary directory used by this testcase
if [ -z $TMP ]
then
LTPTMP=/tmp/tst_xinetd.$$
else
LTPTMP=$TMP/tst_xinetd.$$
fi
mkdir -p $LTPTMP > /dev/null 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_brkm TBROK NULL "INIT: Unable to create temporary directory"
return $RC
fi
# sometimes the default telnet may be /usr/kerberos/bin/telnet
TELNET_COMM='/usr/bin/telnet'
# check if commands tst_*, xinetd, awk exists.
chk_ifexists INIT tst_resm || return $RC
chk_ifexists INIT xinetd || return $RC
chk_ifexists INIT diff || return $RC
chk_ifexists INIT ip || return $RC
chk_ifexists INIT $TELNET_COMM || return $RC
IPV6_ENABLED=0
ip a | grep inet6 > /dev/null 2>&1
if [ $? -eq 0 ]
then
IPV6_ENABLED=1
fi
# Create custom xinetd.conf file.
# tst_xinetd.conf.1 config file has telnet service disabled.
cat > $LTPTMP/tst_xinetd.conf.1 <<-EOF
defaults
{
instances = 25
log_type = FILE /var/log/servicelog
log_on_success = HOST PID
log_on_failure = HOST
disabled = telnet
}
EOF
RC=$?
# tst_xinetd.conf.2 config file has telnet enabled.
cat > $LTPTMP/tst_xinetd.conf.2 <<-EOF
defaults
{
instances = 25
log_type = FILE /var/log/servicelog
log_on_success = HOST PID
log_on_failure = HOST
# disabled = telnet
}
service telnet
{
socket_type = stream
protocol = tcp
wait = no
user = root
server = /usr/sbin/in.telnetd
server_args = -n
no_access =
flags = IPv6
}
EOF
RC=$?
# Create expected file with telnet disabled.
cat > $LTPTMP/tst_xinetd.exp.1 <<-EOF
telnet: connect to address 127.0.0.1: Connection refused
EOF
RC=$?
if [ $RC -ne 0 ]
then
tst_brkm TBROK NULL \
"INIT: unable to create expected file $LTPTMP/tst_xinetd.exp.1"
return $RC
fi
if [ $IPV6_ENABLED -eq 1 ]
then
cat > $LTPTMP/tst_xinetd.exp.1.ipv6 <<-EOF
telnet: connect to address ::1: Connection refused
EOF
RC=$?
if [ $RC -ne 0 ]
then
tst_brkm TBROK NULL \
"INIT: unable to create expected file $LTPTMP/tst_xinetd.exp.1"
fi
fi
# Create expected file with telnet enabled.
cat > $LTPTMP/tst_xinetd.exp.2 <<-EOF
Trying 127.0.0.1...
Connected to 127.0.0.1.
Escape character is '^]'.
Connection closed by foreign host.
EOF
RC=$?
if [ $RC -ne 0 ]
then
tst_brkm TBROK NULL \
"INIT: unable to create expected file $LTPTMP/tst_xinetd.exp.2"
return $RC
fi
if [ $IPV6_ENABLED -eq 1 ]
then
cat > $LTPTMP/tst_xinetd.exp.2.ipv6 <<-EOF
Trying ::1...
Connected to ::1.
Escape character is '^]'.
Connection closed by foreign host.
EOF
RC=$?
if [ $RC -ne 0 ]
then
tst_brkm TBROK NULL \
"INIT: unable to create expected file $LTPTMP/tst_xinetd.exp.2.ipv6"
fi
fi
return $RC
}
# Function: cleanup
#
# Description: - remove temporaty files and directories.
#
# Return: - zero on success.
# - non-zero on failure.
cleanup()
{
# restore the original xinetd.conf if a back up exits.
if [ -f /etc/xinetd.conf.orig ]
then
mv /etc/xinetd.conf.orig /etc/xinetd.conf \
> $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_res TINFO $LTPTMP/tst_xinetd.err \
"CLEANUP: failed restoring original xinetd.conf RC=$RC. Details:"
fi
sleep 1s
# restoring original services
restart_daemon xinetd > $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_res TINFO $LTPTMP/tst_xinetd.err \
"CLEANUP: failed restoring original services RC=$RC. Details:"
fi
fi
# remove all the temporary files created by this test.
tst_resm TINFO "CLEAN: removing $LTPTMP"
rm -fr $LTPTMP
}
# Function: test01
#
# Description: - Test that xinetd reads the configuration file and starts or
# stops services.
# - restart xinetd with configuration file with telnet disabled.
# - telnet to locahost should fail.
# - restart xinetd with configuration file with telnet enabled.
# - telnet to locahost should work.
#
# Return: - zero on success.
# - non-zero on failure.
test01()
{
TCID=xinetd01
TST_COUNT=1
nhops=0 # Number of hops required to get to host.
tst_resm TINFO "Test #1: restart xinetd with telnet disabled."
# create a backup of the original xinetd.conf file.
mv /etc/xinetd.conf /etc/xinetd.conf.orig > $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_brk TBROK $LTPTMP/tst_xinetd.err NULL \
"Test #1: Failed while backing up original xinetd.conf. Details"
return $RC
fi
# install the new config file with telnet disabled.
mv $LTPTMP/tst_xinetd.conf.1 /etc/xinetd.conf > $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_brk TBROK $LTPTMP/tst_xinetd.err NULL \
"Test #1: Failed installing new xinetd.conf in /etc. Details:"
return $RC
fi
tst_resm TINFO "Test #1: new xinetd.conf installed with telnet disabled."
sleep 1s
# restart xinetd to re-start the services
restart_daemon xinetd > $LTPTMP/tst_xinetd.out 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_res TFAIL $LTPTMP/tst_xinetd.out \
"Test #1: unable to restart service with telnet disabled. Details:"
return $RC
else
# even if xinetd restart has zero exit value,
# make certain there was no failure.
grep -i "fail" $LTPTMP/tst_xinetd.out > $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -eq 0 ]
then
tst_res TFAIL $LTPTMP/tst_xinetd.err \
"Test #1: xinetd failed to restart. Details"
return $RC
else
RC=0
tst_resm TINFO \
"Test #1: xinetd re-started successfully with telnet disabled."
fi
fi
# Not checking for exit code from telnet command because telnet is
# not terminated by the test gracefully.
if [ $IPV6_ENABLED -eq 1 ]
then
echo "" | $TELNET_COMM ::1 2>$LTPTMP/tst_xinetd.out.ipv6 1>/dev/null
diff -iwB $LTPTMP/tst_xinetd.out.ipv6 $LTPTMP/tst_xinetd.exp.1.ipv6 \
> $LTPTMP/tst_xinetd.err.ipv6 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_res TFAIL $LTPTMP/tst_xinetd.err.ipv6 \
"Test #1: with telnet diabled expected out differs RC=$RC. Details:"
return $RC
fi
fi
echo "" | $TELNET_COMM 127.0.0.1 2>$LTPTMP/tst_xinetd.out 1>/dev/null
diff -iwB $LTPTMP/tst_xinetd.out $LTPTMP/tst_xinetd.exp.1 \
> $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_res TFAIL $LTPTMP/tst_xinetd.err \
"Test #1: with telnet diabled expected out differs RC=$RC. Details:"
return $RC
fi
tst_resm TINFO "Test #1: restart xinetd with telnet enabled."
# install the xinetd config file with telnet enabled.
mv $LTPTMP/tst_xinetd.conf.2 /etc/xinetd.conf > $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_brk TBROK $LTPTMP/tst_xinetd.err NULL \
"Test #1: Failed installing new xinetd.conf in /etc. Details:"
return $RC
fi
tst_resm TINFO "Test #1: new xinetd.conf installed with telnet enabled."
sleep 1s
# restart services.
restart_daemon xinetd > $LTPTMP/tst_xinetd.out 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_res TFAIL $LTPTMP/tst_xinetd.out \
"Test #1: unable to restart services with telnet enabled. Details:"
return $RC
else
# even if restart has a zero exit value double check for failure.
grep -i "fail" $LTPTMP/tst_xinetd.out > $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -eq 0 ]
then
tst_res TFAIL $LTPTMP/tst_xinetd.err \
"Test #1: xinetd failed to restart. Details"
return $RC
else
RC=0
tst_resm TINFO \
"Test #1: xinetd re-started successfully with telnet enabled."
fi
fi
# Not checking for exit code from telnet command because telnet is
# not terminated by the test gracefully.
if [ $IPV6_ENABLED -eq 1 ]
then
echo "" | $TELNET_COMM ::1 >$LTPTMP/tst_xinetd.out.ipv6 2>&1
diff -iwB $LTPTMP/tst_xinetd.out.ipv6 $LTPTMP/tst_xinetd.exp.2.ipv6 \
> $LTPTMP/tst_xinetd.err.ipv6 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_res TFAIL $LTPTMP/tst_xinetd.err.ipv6 \
"Test #1: with telnet diabled expected out differs RC=$RC. Details:"
return $RC
else
tst_resm TPASS \
"Test #1: xinetd reads the config file and starts or stops IPv6 services."
fi
fi
echo "" | $TELNET_COMM 127.0.0.1 > $LTPTMP/tst_xinetd.out 2>&1
diff -iwB $LTPTMP/tst_xinetd.out $LTPTMP/tst_xinetd.exp.2 \
> $LTPTMP/tst_xinetd.err 2>&1
RC=$?
if [ $RC -ne 0 ]
then
tst_res TFAIL $LTPTMP/tst_xinetd.err \
"Test #1: expected output differes from actual. Details:"
return $RC
else
tst_resm TPASS \
"Test #1: xinetd reads the config file and starts or stops services."
fi
return $RC
}
# Function: main
#
# Description: - Execute all tests and report results.
#
# Exit: - zero on success
# - non-zero on failure.
init || exit $?
test01 || RC=$?
exit $RC
| sunyuan3/ltp | testcases/network/xinetd/xinetd_tests.sh | Shell | gpl-2.0 | 12,875 |
# This file contain all configuration information to build
# `lustre-release/lustre/contrib/wireshark'
[[ $1 =~ --.* ]] || {
###########################################################################
# #
# DOWNLOAD CONFIGURATION
# #
###########################################################################
## BEGIN: -can-edit ##
# URL of directory containing all source tar balls
export WS_DOWNLOAD_BASE_URL='http://wiresharkdownloads.riverbed.com'
WS_DOWNLOAD_BASE_URL+='/wireshark/src/all-versions'
# wireshark verion to be used
export WS_VERSION='1.6.8'
## END : -can-edit ##
# URL of the wireshark source code tarball
# Implicit assumption: Wireshark release names follow the nameing
# convention coded in the content of the following varialble
export WS_SOURCE_URL="${WS_DOWNLOAD_BASE_URL}/wireshark-${WS_VERSION}.tar.bz2"
###########################################################################
# #
# BUILD ENVIRONMENT #
# #
###########################################################################
## BEGIN: -can-edit ##
# Space separate list of RPMs needed to be installed for
# compilation of wireshark
# Package name(s) (can) vary between different distributions
# If distributions 'marked' by same release file, content has to
# parsed and variable PREREQUISITE_RPMS has to be set accoringly to
# package name(s) used for each distro.
if [ -r /etc/redhat-release ] ; then
export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap libpcap-devel perl'
elif [ -r /etc/SuSE-release ] ; then
export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap0 libpcap-devel perl'
fi
# Include and linker flags needed to Lustre/LNet
# Only version indepent information should be added here
# (Back ticked expression will be evaluated by make command)
export PLUGIN_COMPILE_FLAGS='`pkg-config --libs --cflags glib-2.0`'
## END : -can-edit ##
# Top-level directory to be used to unpack/compile/install
# wireshark/lustre-git-repo
export BUILD_DIR=`pwd`
# Directory location of wireshark source code
export WS_HOME="${BUILD_DIR}/wireshark-${WS_VERSION}"
# (Relative) path of the wireshark contribution directory
export LUSTRE_WS_DIR='lustre-release/lustre/contrib/wireshark'
# RPM internal name for the Lustre/LNet plugins
export PLUGIN_RPM_NAME='lustre-wireshark-plugins'
# TAR command + options to be used to create a bzip2 tarball
export TAR='/bin/tar jcpf '
# TAR command + options to be used to unpack a bzip2 tarball
export UNTAR='/bin/tar jxpf '
exit 0
}
die() {
echo "wsconfig error: $*"
exit 1
} 1>&2
# arg1: complete package name, with version
# arg2: the minimum version
#
chk_ver() {
act_ver=${1#*-devel-} ; shift
act_ver=${act_ver%%-*}
declare low_ver=$(
printf "${act_ver}\n$1\n" | sort -V | head -n1 )
test "X$low_ver" = "X$1" || \
die "wireshark too old: $act_ver is before $1"
}
set_var() {
case "X$2" in
Xlibdir )
txt=$(echo $(rpm -q --list $1 | \
sed -n '\@/libwire@s@/libwire[^/]*$@@p' | \
sort -u) )
;;
* )
die "unknown variable: $2"
;;
esac
}
set_cflags() {
dlst=$(rpm -q --list $pkg | \
grep '/usr.*/include.*/wireshark$' | \
while read f ; do test -d $f && echo "$f" ; done)
rm -f config.h
for f in $dlst XX
do test -f $f/config.h && ln -s ${f}/config.h .
txt+=" -I$f"
done
test -f config.h || die "cannot find config header"
}
parse_wireshark() {
declare pkg=$(rpm -qa | sed -n '/wireshark-devel/{;p;q;}')
declare dlst=
while test $# -gt 1
do
txt=
case "$1" in
--libs )
txt=$(rpm -q --list $pkg | \
sed -n 's@\.so$@@p' | \
sed 's@.*/lib@-l@')
;;
--cflags )
set_cflags
;;
--modversion )
txt=${pkg#wireshark-devel-}
txt=${txt%%-*}
;;
--atleast-version=* )
chk_ver ${pkg} ${1#*=}
;;
--atleast-version )
shift
chk_ver ${pkg} ${1}
;;
--variable=* )
set_var ${pkg} ${1#*=}
;;
--variable )
shift
set_var ${pkg} ${1}
;;
* )
die "unknown option: $1"
;;
esac
test ${#txt} -gt 0 && \
printf "%s" "$(echo ' '$txt)"
shift
done
echo
}
pkg-config "$@" 2>/dev/null && exit 0
pkg=$#
case ${!pkg} in
glib* )
fullpkg=$(rpm -qa | grep -E '^glib[2-9].*-devel' | head -n1)
dirs=$(rpm -q --list $fullpkg | \
while read f ; do test -d $f && echo $f ; done | \
grep -F /include)
for f in $dirs ; do printf "-I$f " ; done
rpm -q --list $fullpkg | \
sed -n 's@^.*/libglib@-lglib@p' | \
sed -n 's/\.so$//p' | \
head -n 1
;;
wireshark )
parse_wireshark "$@"
;;
* )
echo huh?
exit 1
;;
esac
| sushantmane/lustre-stable | lustre/contrib/wireshark/wsconfig.sh | Shell | gpl-2.0 | 5,537 |
#! /bin/sh
gnome-doc-prepare -c -f \
&& aclocal \
&& autoheader -f \
&& automake -c -f --add-missing \
&& autoconf -f
| pv/gwaei-stuff | rpm/fedora/patches/gwaei-1.3.0-autogen.sh | Shell | gpl-3.0 | 119 |
#!/bin/bash
dir=test-demo
mkdir ${dir}
cd ${dir}
rm *pdf
rep=1000
seqlen=100000
## compare population sturture for a single population data
COMPAREFILE=compareDemo
rm ${COMPAREFILE}
theta=10
r=10
source ../chisq_r.src
source ../ks_r.src
source ../tmrca_r.src
source ../bl_r.src
source ../process_sample_stats.src
#case 1
echo "case_1" > current_case
rm scrm* scrmInit*
scrm 10 ${rep} -t ${theta} -r ${r} ${seqlen} -eN 0.4 10.01 -eN 1 0.01 -T -L > scrmout
scrmtime
cat scrmout | sample_stats > scrm_stats
scrm 10 ${rep} -t ${theta} -r ${r} ${seqlen} -eN 0.4 10.01 -eN 1 0.01 -T -L -init scrmReadTrees > scrmInitout
scrmInittime
cat scrmInitout | sample_stats > scrmInit_stats
foo
#case 2
echo "case_2" > current_case
rm scrm* scrmInit*
scrm 16 ${rep} -t ${theta} -r ${r} ${seqlen} -G 5.4 -eG 0.4 1 -eN 0.8 15 -T -L > scrmout
scrmtime
cat scrmout | sample_stats > scrm_stats
scrm 16 ${rep} -t ${theta} -r ${r} ${seqlen} -G 5.4 -eG 0.4 1 -eN 0.8 15 -T -L -init scrmReadTrees > scrmInitout
scrmInittime
cat scrmInitout | sample_stats > scrmInit_stats
foo
| luntergroup/scrm_jz_stable_branch | tests/test_read_init/scrm_vs_scrmInitialtree.sh | Shell | gpl-3.0 | 1,073 |
#/bin/sh
#
# requires:
# cvs (if pulling from CVS
if [ $DEBUG ] ; then
set -xv
fi
rootdir=/tmp/pvfs2-build-test
tarballurl=http://www.mcs.anl.gov/hpio/pvfs2-0.0.6.tar.gz
cvsroot=:pserver:[email protected]:/anoncvs
# specify extra configure options here; for now we disable karma because
# of all the gtk warnings
with_db_arg=""
if [ $WITH_DB ] ; then
with_db_arg=--with-db=$WITH_DB
fi
configureopts="$PVFS2_CONFIGOPTS --enable-shared --enable-ucache --enable-strict --disable-karma $with_db_arg"
#
# use this method if you want to test a release
# takes no arguments. returns nonzero on error
get_dist() {
# get the source (-nv keeps it kinda quiet)
wget -nv $tarballurl
if [ $? != 0 ] ; then
echo "wget of $tarballurl failed. Aborting."
exit 1
fi
# untar the source
tar xzf $tarball
if [ -d $tarballdir ] ; then
mv $tarballdir $srcdir
fi
if [ ! -d $srcdir ] ; then
echo "Tarball $tarball did not create a $srcdir directory or a $tarballdir directory. Aborting."
exit 1
fi
}
# pulls from CVS the tag or branch specified by the first argument. returns
# nonzero on error.
get_cvs() {
#cvs -Q -d $cvsroot co -r $1 pvfs2
echo "Current directory is `pwd`"
svn export --force -q http://www.orangefs.org/svn/orangefs/$1/
if [ $? -ne 0 ] ; then
echo "Pulling PVFS2 from http://www.orangefs.org/svn/orangefs/$1/ failed."
exit 1
fi
#ls -l
#mv pvfs2 pvfs2-$1
#split off last element in path
BRANCH=`echo $1 | awk -F"/" '{print $NF}'`
echo "Branch is ${BRANCH}"
mv $BRANCH pvfs2-$BRANCH
}
# end of user defines
tarball=`basename $tarballurl`
tarballdir=`echo $tarball | sed -e "s/.tar.gz//" | sed -e "s/.tgz//"`
old_wd=$( cd `dirname $0`; pwd)
build_kernel="false"
build_tests="false"
make_targets="all"
cvs_tag="HEAD"
kerneldir=""
usage()
{
echo "USAGE: pvfs2-build.sh <-k kernel source> <-r dir>"
echo " -k: path to kernel source (enables module build)"
echo " -r: path to directory to build and install in"
echo " -s: build with key-based security"
echo " -c: build with certificate-based security"
echo " -t: build test programs"
echo " -v: name of tag or branch in CVS"
echo ""
echo "set PVFS2_CONFIGOPTS to add platform specific configure options"
return
}
# get command line arguments
while getopts k:r:cstv: opt
do
case "$opt" in
k) build_kernel="true"; kerneldir="$OPTARG";;
r) rootdir="$OPTARG";;
c) enable_certs="true";;
s) enable_keys="true";;
t) build_tests="true";;
v) full_cvs_tag="$OPTARG";;
\?) usage; exit 1;;
esac
done
echo "PVFS2 will be built in ${rootdir}."
#cvs tag is final element of full cvs tag
cvs_tag=`echo $full_cvs_tag | awk -F"/" '{print $NF}'`
if [ ! -d $rootdir ] ; then
mkdir $rootdir
fi
if [ "$enable_keys" = "true" ]
then
configureopts="${configureopts} --enable-security-key"
fi
if [ "$enable_certs" = "true" ]
then
# TODO
echo "certificate support not yet implemented"
exit 1
fi
date=`date "+%Y-%m-%d-%H-%M"`
host=`uname -n`
srcdir=$rootdir/pvfs2-$cvs_tag
#builddir=$rootdir/BUILD-pvfs2-$cvs_tag
builddir=$srcdir
installdir=$rootdir/INSTALL-pvfs2-$cvs_tag
# clean up src, build, install directories
# clean up misc. files in the root directory too
rm -rf $srcdir $builddir $installdir
rm -rf $rootdir/\*.tgz
rm -rf $rootdir/pvfs2
# move to our root dir
cd $rootdir
# could make this some sort of command line option...
get_cvs $full_cvs_tag || exit 1
# create build and install directories, configure
#mkdir $builddir
mkdir $installdir
cd $srcdir
$srcdir/prepare > prepare.out
#cd $builddir
#ls $srcdir
#$srcdir/prepare > prepare.out
if [ $build_kernel = "true" ] ; then
$srcdir/configure $configureopts --with-kernel=$kerneldir --prefix=$installdir > $rootdir/configure-${cvs_tag}.log 2>&1
make_targets="all kmod"
else
$srcdir/configure $configureopts --prefix=$installdir > $rootdir/configure-${cvs_tag}.log 2>&1
make_targets="all"
fi
if [ $? != 0 ] ; then
echo "Configure failed; see $rootdir/configure-${cvs_tag}.log. Aborting."
exit 1
fi
# make
make $make_targets > $rootdir/make-${cvs_tag}.log 2>&1
if [ $? != 0 ] ; then
echo "Make failed; see $rootdir/make-${cvs_tag}.log. Aborting."
exit 1
fi
# look through make output
PEMM=`which pvfs2-extract-make-msgs.pl 2>/dev/null`
if [ x$PEMM = "x" ] ; then
if [ ! -x $old_wd/pvfs2-extract-make-msgs.pl ] ; then
echo "Failed to find pvfs2-extract-make-msgs.pl. Aborting."
exit 1
else
PEMM=$old_wd/pvfs2-extract-make-msgs.pl
fi
fi
$PEMM $rootdir/make-${cvs_tag}.log > $rootdir/make-extracted-${cvs_tag}.log 2>&1
if [ $? != 0 ] ; then
# warnings used to be fatal. We still want no warnings, but we'll flag
# that we found some instead of bailing out altogether.
echo "Unexpected output during make; see $rootdir/make-extracted-${cvs_tag}.log."
touch $rootdir/pvfs-built-with-warnings
fi
# make install
make install > $rootdir/make-install-${cvs_tag}.log 2>&1
if [ $? != 0 ] ; then
echo "Make install failed; see $rootdir/make-install-${cvs_tag}.log. Aborting."
exit 1
fi
if [ $build_kernel = "true" ] ; then
make kmod_prefix=${installdir} kmod_install
fi
# build tests if needed
if [ $build_tests = "true" ] ; then
cd $builddir/test
$srcdir/test/configure $configureopts > $rootdir/configure-test-${cvs_tag}.log 2>&1
if [ $? != 0 ] ; then
echo "Configure of test programs failed; see $rootdir/configure-test-${cvs_tag}.log. Aborting."
exit 1
fi
# make
make all > $rootdir/make-test-${cvs_tag}.log 2>&1
if [ $? != 0 ] ; then
echo "Make failed; see $rootdir/make-test-${cvs_tag}.log. Aborting."
exit 1
fi
# look through make output
PEMM=`which pvfs2-extract-make-msgs.pl 2>/dev/null`
if [ x$PEMM = "x" ] ; then
if [ ! -x $old_wd/pvfs2-extract-make-msgs.pl ] ; then
echo "Failed to find pvfs2-extract-make-msgs.pl. Aborting."
exit 1
else
PEMM=$old_wd/pvfs2-extract-make-msgs.pl
fi
fi
$PEMM $rootdir/make-test-${cvs_tag}.log > $rootdir/make-test-extracted-${cvs_tag}.log 2>&1
if [ $? != 0 ] ; then
# same as above. Indicate that we found something,
# but don't abort
echo "Unexpected output during test make; see $rootdir/make-test-extracted-${cvs_tag}.log."
touch $rootdir/pvfs2-test-built-with-warnings
fi
make install > $rootdir/make-test-install-${cvs_tag}.log 2>&1
if [ $? != 0 ] ; then
echo "Make install (tests) failed; see $rootdir/make-test-install-${cvs_tag}.log. Aborting."
exit 1
fi
fi
exit 0
| angelos-se/orangefs-release | maint/build/pvfs2-build.sh | Shell | lgpl-3.0 | 6,451 |
#!/bin/bash -eu
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# build project
cd c++
autoreconf -i
./configure --disable-shared
make -j$(nproc)
make -j$(nproc) capnp-llvm-fuzzer-testcase
cp *fuzzer* $OUT/
| skia-dev/oss-fuzz | projects/capnproto/build.sh | Shell | apache-2.0 | 818 |
#!/bin/bash -eu
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
cd sql-parser
sed 's/static ?= no/LIB_CFLAGS += ${CXXFLAGS}\nstatic ?= no/g' -i Makefile
make static=yes
$CXX $CXXFLAGS $LIB_FUZZING_ENGINE \
fuzz_sql_parse.cpp libsqlparser.a -I./src -o $OUT/fuzz_sql_parse
| googlefonts/oss-fuzz | projects/sql-parser/build.sh | Shell | apache-2.0 | 886 |
#!/bin/bash
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to install dependencies for compiling
# the code of our upcoming Angular 2 based vtctld UI.
#
# Regular users should not have to run it. Run bootstrap.sh (located in the
# repository root) instead.
# TODO(mberlin): Merge this back into bootstrap.sh once we support caching the
# dependencies on Travis and local disk.
# Download node
node_ver=v6.3.1
node_dist=$VTROOT/dist/node
if [[ -x $node_dist/bin/node && `$node_dist/bin/node -v` == "$node_ver" ]]; then
echo "skipping nodejs download. remove $node_dist to force redownload."
else
echo "Downloading nodejs"
rm -rf $node_dist
node_tar="node_linux64.tar.xz"
curl -sL https://nodejs.org/dist/$node_ver/node-$node_ver-linux-x64.tar.xz -o $node_tar
tar xf $node_tar -C $VTROOT/dist
mv $VTROOT/dist/node-$node_ver-linux-x64 $node_dist
rm $node_tar
# Add the node directory to PATH to make sure that the Angular
# installation below can find the "node" binary.
# (dev.env does actually append it to PATH.)
source $VTROOT/dev.env
fi
echo "Installing dependencies for building web UI"
angular_cli_dir=$VTROOT/dist/angular-cli
web_dir2=$VTROOT/web/vtctld2
angular_cli_commit=cacaa4eff10e135016ef81076fab1086a3bce92f
if [[ -d $angular_cli_dir && `cd $angular_cli_dir && git rev-parse HEAD` == "$angular_cli_commit" ]]; then
echo "skipping angular cli download. remove $angular_cli_dir to force download."
else
cd $VTROOT/dist && git clone https://github.com/angular/angular-cli.git --quiet
cd $angular_cli_dir && git checkout $angular_cli_commit --quiet
fi
cd $angular_cli_dir && $node_dist/bin/npm link --silent
cd $web_dir2 && $node_dist/bin/npm install --silent
cd $web_dir2 && $node_dist/bin/npm link angular-cli --silent
| vitessio/vitess | tools/bootstrap_web.sh | Shell | apache-2.0 | 2,340 |
alias composer="hhvm -v ResourceLimit.SocketDefaultTimeout=30 -v Http.SlowQueryThreshold=30000 /usr/local/bin/composer"
| lsvt-casey/chef-applications | templates/default/hhvm/composerviahhvm.bash | Shell | apache-2.0 | 120 |
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
if [ "$UIMA_HOME" = "" ]
then
echo UIMA_HOME environment variable is not set
exit 1
fi
if [ "$JAVA_HOME" = "" ]
then
UIMA_JAVA_CALL=java
else
UIMA_JAVA_CALL="$JAVA_HOME/bin/java"
fi
"$UIMA_JAVA_CALL" -cp "$UIMA_HOME/lib/uima-core.jar:$UIMA_HOME/lib/uima-tools.jar" org.apache.uima.tools.migration.IbmUimaToApacheUima $1 -ext java,xml,xmi,wsdd,properties,launch,bat,cmd,sh,ksh,csh,
| hyokun31/wisekb-management-platform | wisekb-uima-ducc/apache-uima/bin/ibmUimaToApacheUima.sh | Shell | apache-2.0 | 1,217 |
#!/bin/bash
# Create Kubernetes cluster
# Update required settings in "settings" file before running this script
function pause(){
read -p "$*"
}
## Fetch GC settings
# project and zone
project=$(cat settings | grep project= | head -1 | cut -f2 -d"=")
zone=$(cat settings | grep zone= | head -1 | cut -f2 -d"=")
# CoreOS release channel
channel=$(cat settings | grep channel= | head -1 | cut -f2 -d"=")
# master instance type
master_machine_type=$(cat settings | grep master_machine_type= | head -1 | cut -f2 -d"=")
# node instance type
node_machine_type=$(cat settings | grep node_machine_type= | head -1 | cut -f2 -d"=")
# get the latest full image name
image=$(gcloud compute images list --project=$project | grep -v grep | grep coreos-$channel | awk {'print $1'})
#
# master name
master_name=$(cat settings | grep master_name= | head -1 | cut -f2 -d"=")
# node name and count
node_name=$(cat settings | grep node_name= | head -1 | cut -f2 -d"=")
node_count=$(cat settings | grep node_count= | head -1 | cut -f2 -d"=")
##
# create master node
gcloud compute instances create $master_name \
--project=$project --image=$image --image-project=coreos-cloud \
--boot-disk-type=pd-standard --boot-disk-size=20 --zone=$zone \
--machine-type=$master_machine_type --metadata-from-file user-data=./cloud-config/master.yaml \
--can-ip-forward --scopes compute-rw --tags=k8s-cluster,k8s-master
# create internal static IP for the master
gcloud compute routes create ip-10-222-1-1-$master_name --project=$project \
--next-hop-instance $master_name \
--next-hop-instance-zone $zone \
--destination-range 10.222.1.1/32
#
# create nodes
# by default 3 nodes get created, update node_count in settings file if you want a different number of nodes
for (( i=1; i<=$node_count; i++ ))
do
gcloud compute instances create $node_name-$i \
--project=$project --image=$image --image-project=coreos-cloud \
--boot-disk-type=pd-standard --boot-disk-size=50 --zone=$zone \
--machine-type=$node_machine_type --metadata-from-file user-data=./cloud-config/node.yaml \
--can-ip-forward --tags="k8s-cluster,${node_name}"
done
#
echo " "
echo "Cluster machines setup has finished !!!"
echo "Run next script get_k8s_fleet_etcd.sh ... "
pause 'Press [Enter] key to continue ...'
| rimusz/kube-gce | 1-bootstrap_cluster.sh | Shell | apache-2.0 | 2,285 |
#!/bin/bash
if [[ ! -d "/var/log/kolla/logstash" ]]; then
mkdir -p /var/log/kolla/logstash
fi
if [[ $(stat -c %a /var/log/kolla/logstash) != "755" ]]; then
chmod 755 /var/log/kolla/logstash
fi
| stackforge/kolla | docker/logstash/extend_start.sh | Shell | apache-2.0 | 202 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.