code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
#*******************************************************************************
#
# Copyright (C) 2015-2018 the BBoxDB project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*******************************************************************************
#
# Bootstrap file for BBoxDB shell scripts
#
#
#########################################
# Include functions
. $BBOXDB_HOME/bin/bboxdb-env.sh
. $BBOXDB_HOME/bin/functions.sh
# Change working dir
cd $BBOXDB_HOME
# Find all required jars
if [ -d $BBOXDB_HOME/target ]; then
jar=$(find $BBOXDB_HOME/target -name 'bboxdb*.jar' | tail -1)
fi
if [ -d $BBOXDB_HOME/target/lib ]; then
libs=$(find $BBOXDB_HOME/target/lib -name '*.jar' | xargs echo | tr ' ' ':')
fi
# Build classpath
classpath="$BBOXDB_HOME/conf:$libs:$jar"
|
jnidzwetzki/scalephant
|
bin/bootstrap.sh
|
Shell
|
apache-2.0
| 1,337 |
#!/usr/bin/env bash
hadoop fs -rm -r -skipTrash /user/root/avro
cat << EOF > /tmp/avro.properties
spark.ui.enabled=true
system.in.read=true
gdb.path=hdfs:///user/root/TXDOT_Roadway_Inventory.gdb
point.path=hdfs:///user/root/points.csv
output.path=hdfs:///user/root/avro
output.format=avro
EOF
time spark-submit\
--master yarn-client\
--num-executors 6\
--executor-cores 1\
--executor-memory 512M\
--packages com.databricks:spark-avro_2.10:2.0.1,com.databricks:spark-csv_2.10:1.4.0\
target/spark-snap-points-0.3.jar\
/tmp/avro.properties
|
mraad/spark-snap-points
|
yarn-avro.sh
|
Shell
|
apache-2.0
| 544 |
#!/usr/bin/env bash
# This file contains logic for integration tests which are executed by CI upon pull requests to
# dcos-commons. The script builds each framework, packages and uploads it, then runs its
# integration tests against a newly-launched cluster.
# Exit immediately on errors -- the helper scripts all emit github statuses internally
set -e
function proxylite_preflight {
bash frameworks/proxylite/scripts/ci.sh pre-test
}
function run_framework_tests {
framework=$1
FRAMEWORK_DIR=${REPO_ROOT_DIR}/frameworks/${framework}
if [ "$framework" = "proxylite" ]; then
if ! proxylite_preflight; then
sleep 5
proxylite_preflight
fi
fi
# Build/upload framework scheduler artifact if one is not directly provided:
if [ -z "${!STUB_UNIVERSE_URL}" ]; then
STUB_UNIVERSE_URL=$(echo "${framework}_STUB_UNIVERSE_URL" | awk '{print toupper($0)}')
# Build/upload framework scheduler:
UNIVERSE_URL_PATH=$FRAMEWORK_DIR/${framework}-universe-url
UNIVERSE_URL_PATH=$UNIVERSE_URL_PATH ${FRAMEWORK_DIR}/build.sh aws
if [ ! -f "$UNIVERSE_URL_PATH" ]; then
echo "Missing universe URL file: $UNIVERSE_URL_PATH"
exit 1
fi
export STUB_UNIVERSE_URL=$(cat $UNIVERSE_URL_PATH)
rm -f $UNIVERSE_URL_PATH
echo "Built/uploaded stub universe: $STUB_UNIVERSE_URL"
else
echo "Using provided STUB_UNIVERSE_URL: $STUB_UNIVERSE_URL"
fi
echo Security: $SECURITY
if [ "$SECURITY" = "strict" ]; then
${REPO_ROOT_DIR}/tools/setup_permissions.sh root ${framework}-role
# Some tests install a second instance of a framework, such as "hdfs2"
${REPO_ROOT_DIR}/tools/setup_permissions.sh root ${framework}2-role
fi
# Run shakedown tests in framework directory:
TEST_GITHUB_LABEL="${framework}" ${REPO_ROOT_DIR}/tools/run_tests.py shakedown ${FRAMEWORK_DIR}/tests/
}
echo "Beginning integration tests at "`date`
REPO_ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $REPO_ROOT_DIR
# Get a CCM cluster if not already configured (see available settings in dcos-commons/tools/README.md):
if [ -z "$CLUSTER_URL" ]; then
echo "CLUSTER_URL is empty/unset, launching new cluster."
export CCM_AGENTS=5
CLUSTER_INFO=$(${REPO_ROOT_DIR}/tools/launch_ccm_cluster.py)
echo "Launched cluster: ${CLUSTER_INFO}"
# jq emits json strings by default: "value". Use --raw-output to get value without quotes
export CLUSTER_URL=$(echo "${CLUSTER_INFO}" | jq --raw-output .url)
export CLUSTER_ID=$(echo "${CLUSTER_INFO}" | jq .id)
export CLUSTER_AUTH_TOKEN=$(echo "${CLUSTER_INFO}" | jq --raw-output .auth_token)
CLUSTER_CREATED="true"
else
echo "Using provided CLUSTER_URL as cluster: $CLUSTER_URL"
CLUSTER_CREATED=""
fi
# A specific framework can be specified to run its tests
# Otherwise all tests are run in random framework order
if [ -n "$1" ]; then
run_framework_tests $1
else
for framework in $(ls $REPO_ROOT_DIR/frameworks | while IFS= read -r fw; do printf "%05d %s\n" "$RANDOM" "$fw"; done | sort -n | cut -c7-); do
echo "Starting shakedown tests for $framework at "`date`
run_framework_tests $framework
done
fi
# Tests succeeded. Out of courtesy, trigger a teardown of the cluster if we created it ourselves.
# Don't wait for the cluster to complete teardown.
if [ -n "${CLUSTER_CREATED}" ]; then
${REPO_ROOT_DIR}/tools/launch_ccm_cluster.py trigger-stop ${CLUSTER_ID}
fi
|
adragomir/dcos-commons
|
test.sh
|
Shell
|
apache-2.0
| 3,551 |
#!/bin/bash
#author : Jam < [email protected] >
#version : 1.0
#description : 本脚本主要用来处理Pycharm的模板安装
PY_VERSION=${PY_VERSION:=2016.3}
TMP_URL=${TMP_URL:=https://raw.githubusercontent.com/favorinfo/templates/master}
TMP_PATH=${TMP_PATH:=Library/Preferences/PyCharm${PY_VERSION}/templates}
ITEMS="Jam Model DjangoHTML"
for item in $ITEMS
do
tmp=$TMP_PATH/$item.xml
if [ "$FORCE" ] || [ ! -e $tmp ]; then
echo "准备安装模板 $item.xml..."
curl -sfLo $tmp $TMP_URL/$item.xml
fi
done
|
favorinfo/templates
|
install.sh
|
Shell
|
apache-2.0
| 548 |
#!/usr/bin/env bash
set -euxo pipefail
readonly LOCAL_JAR_NAME='beam-runners-flink_2.11-job-server.jar'
readonly SERVICE_INSTALL_DIR='/usr/lib/beam-job-service'
readonly SERVICE_WORKING_DIR='/var/lib/beam-job-service'
readonly SERVICE_WORKING_USER='yarn'
readonly ARTIFACTS_GCS_PATH_METADATA_KEY='beam-artifacts-gcs-path'
readonly RELEASE_SNAPSHOT_URL_METADATA_KEY="beam-job-service-snapshot"
readonly RELEASE_SNAPSHOT_URL_DEFAULT="http://repo1.maven.org/maven2/org/apache/beam/beam-runners-flink_2.11-job-server/2.6.0/beam-runners-flink_2.11-job-server-2.6.0.jar"
readonly BEAM_IMAGE_ENABLE_PULL_METADATA_KEY="beam-image-enable-pull"
readonly BEAM_IMAGE_ENABLE_PULL_DEFAULT=false
readonly BEAM_IMAGE_VERSION_METADATA_KEY="beam-image-version"
readonly BEAM_IMAGE_VERSION_DEFAULT="master"
readonly BEAM_IMAGE_REPOSITORY_KEY="beam-image-repository"
readonly BEAM_IMAGE_REPOSITORY_DEFAULT="apache.bintray.io/beam"
readonly START_FLINK_YARN_SESSION_METADATA_KEY='flink-start-yarn-session'
# Set this to true to start a flink yarn session at initialization time.
readonly START_FLINK_YARN_SESSION_DEFAULT=true
function is_master() {
local role="$(/usr/share/google/get_metadata_value attributes/dataproc-role)"
if [[ "$role" == 'Master' ]] ; then
true
else
false
fi
}
function get_artifacts_dir() {
/usr/share/google/get_metadata_value "attributes/${ARTIFACTS_GCS_PATH_METADATA_KEY}" \
|| echo "gs://$(/usr/share/google/get_metadata_value "attributes/dataproc-bucket")/beam-artifacts"
}
function download_snapshot() {
readonly snapshot_url="${1}"
readonly protocol="$(echo "${snapshot_url}" | head -c5)"
if [ "${protocol}" = "gs://" ]; then
gsutil cp "${snapshot_url}" "${LOCAL_JAR_NAME}"
else
curl -o "${LOCAL_JAR_NAME}" "${snapshot_url}"
fi
}
function flink_master_url() {
local start_flink_yarn_session="$(/usr/share/google/get_metadata_value \
"attributes/${START_FLINK_YARN_SESSION_METADATA_KEY}" \
|| echo "${START_FLINK_YARN_SESSION_DEFAULT}")"
# TODO: delete this workaround when the beam job service is able to understand
# flink in yarn mode.
if ${start_flink_yarn_session} ; then
# grab final field from the first yarn application that contains 'flink'
yarn application -list \
| grep -i 'flink' \
| head -n1 \
| awk -F $'\t' '{print $9}' \
| cut -c8-
else
echo "localhost:8081"
fi
}
function install_job_service() {
local master_url="$(/usr/share/google/get_metadata_value attributes/dataproc-master)"
local artifacts_dir="$(get_artifacts_dir)"
local release_snapshot_url="$(/usr/share/google/get_metadata_value \
"attributes/${RELEASE_SNAPSHOT_URL_METADATA_KEY}" \
|| echo "${RELEASE_SNAPSHOT_URL_DEFAULT}")"
echo "Retrieving Beam Job Service snapshot from ${release_snapshot_url}"
local flink_master="$(flink_master_url)"
echo "Resolved flink master to: '${master_url}'"
mkdir -p "${SERVICE_INSTALL_DIR}"
pushd "${SERVICE_INSTALL_DIR}"
download_snapshot "${release_snapshot_url}"
popd
mkdir -p "${SERVICE_WORKING_DIR}"
chown -R "${SERVICE_WORKING_USER}" "${SERVICE_WORKING_DIR}"
cat > "/etc/systemd/system/beam-job-service.service" <<EOF
[Unit]
Description=Beam Job Service
After=default.target
[Service]
Type=simple
User=${SERVICE_WORKING_USER}
WorkingDirectory=${SERVICE_WORKING_DIR}
ExecStart=/usr/bin/java \
-jar ${SERVICE_INSTALL_DIR}/${LOCAL_JAR_NAME} \
--job-host=${master_url}\
--artifacts-dir=${artifacts_dir} \
--flink-master-url=${flink_master}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl enable beam-job-service
}
function run_job_service() {
systemctl restart beam-job-service
}
function pull_beam_images() {
local beam_image_version="$(/usr/share/google/get_metadata_value \
"attributes/${BEAM_IMAGE_VERSION_METADATA_KEY}" \
|| echo "${BEAM_IMAGE_VERSION_DEFAULT}")"
local image_repo="$(/usr/share/google/get_metadata_value \
"attributes/${BEAM_IMAGE_REPOSITORY_KEY}" \
|| echo "${BEAM_IMAGE_REPOSITORY_DEFAULT}")"
# Pull beam images with `sudo -i` since if pulling from GCR, yarn will be
# configured with GCR authorization
sudo -u yarn -i docker pull "${image_repo}/go:${beam_image_version}"
sudo -u yarn -i docker pull "${image_repo}/python:${beam_image_version}"
sudo -u yarn -i docker pull "${image_repo}/java:${beam_image_version}"
}
function main() {
if [[ is_master ]]; then
install_job_service
run_job_service
fi
local pull_images="$(/usr/share/google/get_metadata_value \
"attributes/${BEAM_IMAGE_ENABLE_PULL_METADATA_KEY}" \
|| echo "${BEAM_IMAGE_ENABLE_PULL_DEFAULT}")"
if ${pull_images} ; then
pull_beam_images
fi
}
main "$@"
|
dennishuo/dataproc-initialization-actions
|
beam/beam.sh
|
Shell
|
apache-2.0
| 4,711 |
#!/bin/bash
set -x
source ./environment.sh
mkdir ./zoo
mkdir ./pyzoo
export ZOO_PATH=zoo
export PYZOO_PATH=pyzoo
echo ">>> $MASTER, start spark-driver"
ssh root@$MASTER "docker run -itd \
--privileged \
--net=host \
--cpuset-cpus="2-5" \
--oom-kill-disable \
--device=/dev/gsgx \
--device=/dev/sgx/enclave \
--device=/dev/sgx/provision \
-v /var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket \
-v $ENCLAVE_KEY_PATH:/graphene/Pal/src/host/Linux-SGX/signer/enclave-key.pem \
-v $DATA_PATH:/ppml/trusted-big-data-ml/work/data \
-v $KEYS_PATH:/ppml/trusted-big-data-ml/work/keys \
-v $SECURE_PASSWORD_PATH:/ppml/trusted-big-data-ml/work/password \
-v $PYZOO_PATH:/ppml/trusted-big-data-ml/work/pyzoo \
-v $ZOO_PATH:/ppml/trusted-big-data-ml/work/zoo \
--name=spark-driver \
-e LOCAL_IP=$MASTER \
-e SGX_MEM_SIZE=32G \
-e SPARK_MASTER=spark://$MASTER:7077 \
-e SPARK_DRIVER_PORT=10027 \
-e SPARK_DRIVER_BLOCK_MANAGER_PORT=10026 \
$TRUSTED_BIGDATA_ML_DOCKER bash"
|
intel-analytics/BigDL
|
ppml/trusted-big-data-ml/python/docker-graphene/start-distributed-spark-driver.sh
|
Shell
|
apache-2.0
| 1,089 |
# Gets node 1
source `dirname $0`/../../functions.sh GET $*
curl $copts -X GET -H 'Accept: application/json' $EXCHANGE_URL_ROOT/v1/nodes/1'?id='$EXCHANGE_USER'&token='$EXCHANGE_PW | $parse
|
cgiroua/exchange-api
|
src/test/bash/get/nodes/1-urlparms-user.sh
|
Shell
|
apache-2.0
| 189 |
#!/bin/bash
#
# This script installs development and QA tools for Fuel.
#
set -e
TOP_DIR=$(cd $(dirname "$0") && pwd)
source ${TOP_DIR}/helpers/functions.sh
print_usage() {
echo "Usage: ${0##*/} [-h]"
echo "Options:"
echo " -h: print this usage message and exit"
}
check_root() {
local user=$(/usr/bin/id -u)
if [ ${user} -ne 0 ]; then
error "Only the superuser (uid 0) can use this script."
exit 1
fi
}
parse_arguments() {
while getopts ":h" opt; do
case ${opt} in
h)
print_usage
exit 0
;;
*)
error "An invalid option has been detected."
print_usage
exit 1
esac
done
}
init_variables() {
USER_NAME=developer
USER_HOME=/home/${USER_NAME}
DEST=${DEST:-/opt/stack}
VIRTUALENV_DIR=${DEST}/.venv
PIP_SECURE_LOCATION="https://raw.github.com/pypa/pip/master/contrib/get-pip.py"
TMP="`dirname \"$0\"`"
TMP="`( cd \"${TMP}\" && pwd )`"
mkdir -p ${DEST}
}
install_system_requirements() {
message "Enable default CentOS repos"
yum -y reinstall centos-release # enable default CentOS repos
message "Installing system requirements"
yum -y install gcc
yum -y install zlib-devel
yum -y install sqlite-devel
yum -y install readline-devel
yum -y install bzip2-devel
yum -y install libgcrypt-devel
yum -y install openssl-devel
yum -y install libffi-devel
yum -y install libxml2-devel
yum -y install libxslt-devel
}
install_python_27() {
message "Installing Python 2.7"
TMP="`mktemp -d`"
cd ${TMP}
wget https://www.python.org/ftp/python/2.7.8/Python-2.7.8.tgz
tar xzf Python-2.7.8.tgz
cd Python-2.7.8
./configure --prefix=/usr/local --enable-unicode=ucs4 --enable-shared LDFLAGS="-Wl,-rpath /usr/local/lib"
make altinstall
message "Installing pip and virtualenv for Python 2.7"
GETPIPPY_FILE="`mktemp`"
wget -O ${GETPIPPY_FILE} ${PIP_SECURE_LOCATION}
python2.7 ${GETPIPPY_FILE}
pip2.7 install -U tox
pip2.7 install -U virtualenv
}
setup_virtualenv() {
message "Setup virtualenv in ${VIRTUALENV_DIR}"
virtualenv -p python2.7 ${VIRTUALENV_DIR}
}
activate_virtualenv() {
source ${VIRTUALENV_DIR}/bin/activate
}
init_cluster_variables() {
message "Initializing cluster variables"
CONTROLLER_HOST_ID="`fuel node | grep controller | awk '{print $1}'`"
CONTROLLER_HOST="node-${CONTROLLER_HOST_ID}"
message "Controller host: ${CONTROLLER_HOST}"
export OS_AUTH_URL=http://${CONTROLLER_HOST}:5000/v2.0/
export OS_AUTH_URL_v3=http://${CONTROLLER_HOST}:5000/v3.0/
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_TENANT_NAME=admin
ADMIN_URL="`keystone catalog --service identity | grep adminURL | awk '{print $4}'`"
MGMT_IP="`echo ${ADMIN_URL} | sed 's/[\/:]/ /g' | awk '{print $2}'`"
MGMT_CIDR="`echo ${MGMT_IP} | awk -F '.' '{print $1 "." $2 "." $3 ".0/24"}'`"
message "Keystone admin URL: ${ADMIN_URL}"
message "Calculated mgmt network CIDR: ${MGMT_CIDR}"
message "Adding route to mgmt network"
ip ro add ${MGMT_CIDR} dev eth0 || true
# fix permissions on fuel client
chmod o+r /etc/fuel/client/config.yaml
}
install_rally() {
message "Installing Rally into ${DEST}"
cd ${DEST}
RALLY_DIR=${DEST}/rally
rm -rf ${RALLY_DIR}
git clone git://git.openstack.org/stackforge/rally.git
cd ${RALLY_DIR}
${VIRTUALENV_DIR}/bin/pip install -U pbr
${VIRTUALENV_DIR}/bin/python setup.py install
RALLY_CONFIGURATION_DIR="/etc/rally"
RALLY_DATABASE_DIR="${VIRTUALENV_DIR}/database"
mkdir -p /etc/rally
chmod -R o+w /etc/rally
message "Rally installed into ${RALLY_DIR}"
}
install_tempest() {
message "Installing Tempest into ${DEST}"
cd ${DEST}
TEMPEST_DIR="${DEST}/tempest"
rm -rf ${TEMPEST_DIR}
git clone git://git.openstack.org/openstack/tempest.git
cd ${TEMPEST_DIR}
${VIRTUALENV_DIR}/bin/python setup.py install
mkdir -p /etc/tempest
chmod -R o+w /etc/tempest
cp ${TOP_DIR}/helpers/tempest.sh ${VIRTUALENV_DIR}/bin/tempest
cp ${TOP_DIR}/helpers/sync.sh ${VIRTUALENV_DIR}/bin/sync
cp ${TOP_DIR}/helpers/functions.sh ${VIRTUALENV_DIR}/bin/
cp ${TOP_DIR}/helpers/shouldfail ${DEST}/
message "Tempest installed into ${TEMPEST_DIR}"
message "Downloading necessary resources"
TEMPEST_FILES="${VIRTUALENV_DIR}/files"
mkdir ${TEMPEST_FILES}
CIRROS_VERSION=${CIRROS_VERSION:-"0.3.2"}
CIRROS_IMAGE_URL="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"
wget -O ${TEMPEST_FILES}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz ${CIRROS_IMAGE_URL}
cd ${TEMPEST_FILES}
tar xzf cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz
}
configure_rally() {
message "Configuring Rally"
mkdir -p ${RALLY_DATABASE_DIR} ${RALLY_CONFIGURATION_DIR}
sed 's|#connection=<None>|connection=sqlite:///'${RALLY_DATABASE_DIR}'/rally.sqlite|' \
${RALLY_DIR}/etc/rally/rally.conf.sample > ${RALLY_CONFIGURATION_DIR}/rally.conf
${VIRTUALENV_DIR}/bin/rally-manage db recreate
chmod -R go+w ${RALLY_DATABASE_DIR}
RALLY_CLUSTER_FILE="`mktemp`"
cat > ${RALLY_CLUSTER_FILE} << EOF
{
"type": "ExistingCloud",
"auth_url": "http://${CONTROLLER_HOST}:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin_port": 35357,
"admin": {
"username": "admin",
"password": "admin",
"tenant_name": "admin"
}
}
EOF
${VIRTUALENV_DIR}/bin/rally deployment create --filename=${RALLY_CLUSTER_FILE} --name=SkyNet
${VIRTUALENV_DIR}/bin/rally use deployment --name SkyNet
${VIRTUALENV_DIR}/bin/rally deployment check
}
configure_user() {
message "Creating and configuring user ${USER_NAME}"
useradd -m ${USER_NAME}
cp -r /root/.ssh ${USER_HOME}
chown -R ${USER_NAME} ${USER_HOME}/.ssh
chown -R ${USER_NAME} ${VIRTUALENV_DIR}
# bashrc
cat > ${USER_HOME}/.bashrc <<EOF
test "\${PS1}" || return
shopt -s histappend
HISTCONTROL=ignoredups:ignorespace
HISTFILESIZE=2000
HISTSIZE=1000
export EDITOR=vi
alias ..=cd\ ..
alias ls=ls\ --color=auto
alias ll=ls\ --color=auto\ -lhap
alias vi=vim\ -XNn
alias d=df\ -hT
alias f=free\ -m
alias g=grep\ -iI
alias gr=grep\ -riI
alias l=less
alias n=netstat\ -lnptu
alias p=ps\ aux
alias u=du\ -sh
echo \${PATH} | grep ":\${HOME}/bin" >/dev/null || export PATH="\${PATH}:\${HOME}/bin"
if test \$(id -u) -eq 0
then
export PS1='\[\033[01;41m\]\u@\h:\[\033[01;44m\] \W \[\033[01;41m\] #\[\033[0m\] '
else
export PS1='\[\033[01;33m\]\u@\h\[\033[01;0m\]:\[\033[01;34m\]\W\[\033[01;0m\]$ '
fi
cd ${DEST}
. ${VIRTUALENV_DIR}/bin/activate
. ${USER_HOME}/openrc
EOF
chown ${USER_NAME} ${USER_HOME}/.bashrc
# vimrc
cat > ${USER_HOME}/.vimrc <<EOF
set nocompatible
set nobackup
set nowritebackup
set noswapfile
set viminfo=
syntax on
colorscheme slate
set number
set ignorecase
set smartcase
set hlsearch
set smarttab
set expandtab
set tabstop=4
set shiftwidth=4
set softtabstop=4
filetype on
filetype plugin on
EOF
chown ${USER_NAME} ${USER_HOME}/.vimrc
cat >> ${USER_HOME}/.ssh/config <<EOF
User root
EOF
# openrc
cat > ${USER_HOME}/openrc <<EOF
export OS_TENANT_NAME=${OS_TENANT_NAME}
export OS_USERNAME=${OS_USERNAME}
export OS_PASSWORD=${OS_PASSWORD}
export OS_AUTH_URL=${OS_AUTH_URL}
export OS_AUTH_URL_V3=${OS_AUTH_URL_v3}
EOF
# copy Rally deployment openrc
cp -r /root/.rally ${USER_HOME}
chown -R ${USER_NAME} ${USER_HOME}/.rally
chown -R ${USER_NAME} ${DEST}
}
print_information() {
echo "======================================================================"
echo "Information about your installation:"
echo " * User: ${USER_NAME}"
echo " * Tempest: ${DEST}/tempest"
echo " * Rally: ${DEST}/rally"
echo " * Rally database at: ${RALLY_DATABASE_DIR}"
echo " * Rally configuration file at: ${RALLY_CONFIGURATION_DIR}"
echo "======================================================================"
}
main() {
check_root
parse_arguments "$@"
init_variables
install_system_requirements
install_python_27
setup_virtualenv
init_cluster_variables
install_rally
install_tempest
configure_rally
configure_user
print_information
exit 0
}
main "$@"
|
shakhat/mos-dev-tools
|
install.sh
|
Shell
|
apache-2.0
| 8,481 |
#!/bin/bash
mongo --port 27003 --eval "var cfg = rs.config(); cfg.members[2].priority = 0; rs.reconfig(cfg);"
sleep 15
mongo --port 27003 --eval "load('a.js'); part4();"
|
stasel/M102
|
final/final 4.sh
|
Shell
|
apache-2.0
| 172 |
#!/bin/bash
ROOT_DIR=$(cd $(dirname $0)/../ && pwd)
docker run --rm -ti -v $ROOT_DIR:/src --workdir=/src -e HOST_USER_ID=$(id -u) -e HOST_USER_GROUP_ID=$(id -g) -e HOST_USER_NAME=$USER approximator/clangformat:4.0 bash -c "find /src/firmware -maxdepth 1 \( -name '*.c' -o -name '*.h' \) -print -exec clang-format-4.0 -style=file -i {} \; && find /src/GroundControlStation/src \( -name '*.cpp' -o -name '*.h' \) -print -exec clang-format-4.0 -style=file -i {} \;"
|
approximator/SimpleSINS
|
scripts/format.sh
|
Shell
|
apache-2.0
| 465 |
#!/bin/bash -eux
# "sysprep" actions adapted from source at
# https://github.com/libguestfs/libguestfs/tree/master/sysprep
#
echo "Removing log files and stale data"
logfiles="
/root/anaconda-ks.cfg
/root/anaconda-post.log
/root/initial-setup-ks.cfg
/root/install.log
/root/install.log.syslog
/var/cache/fontconfig/*
/var/cache/gdm/*
/var/cache/man/*
/var/lib/AccountService/users/*
/var/lib/fprint/*
/var/lib/logrotate.status
/var/log/anaconda/*
/var/log/anaconda.syslog
/var/log/apache2/*_log-*
/var/log/apache2/*_log
/var/log/apt/*
/var/log/aptitude*
/var/log/audit/*
/var/log/BackupPC/LOG
/var/log/btmp*
/var/log/ceph/*.log
/var/log/chrony/*.log
/var/log/ConsoleKit/*
/var/log/cron*
/var/log/cups/*_log*
/var/log/debug*
/var/log/dmesg*
/var/log/exim4/*
/var/log/faillog*
/var/log/firewalld*
/var/log/gdm/*
/var/log/glusterfs/*glusterd.vol.log
/var/log/glusterfs/glusterfs.log
/var/log/grubby*
/var/log/httpd/*log
/var/log/installer/*
/var/log/jetty/jetty-console.log
/var/log/journal/*
/var/log/lastlog*
/var/log/libvirt/libvirtd.log
/var/log/libvirt/libxl/*.log
/var/log/libvirt/lxc/*.log
/var/log/libvirt/qemu/*.log
/var/log/libvirt/uml/*.log
/var/log/lightdm/*
/var/log/*.log*
/var/log/mail/*
/var/log/maillog*
/var/log/messages*
/var/log/ntp
/var/log/ntpstats/*
/var/log/ppp/connect-errors
/var/log/rhsm/*
/var/log/sa/*
/var/log/secure*
/var/log/setroubleshoot/*.log
/var/log/spooler*
/var/log/squid/*.log
/var/log/syslog*
/var/log/tallylog*
/var/log/tuned/tuned.log
/var/log/wtmp*
/var/log/xferlog*
/var/named/data/named.run
"
for lf in $logfiles; do
sudo rm -v -rf $lf
done
# Remove these silently
sudo rm -rf /var/lib/rpm/__db.* /var/lib/yum/* /tmp/* /var/tmp/*
echo "Deleting /root/.ssh (config or keys)"
sudo rm -v -rf /root/.ssh
echo "Removing .bash_history"
sudo rm -v -rf /home/*/.bash_history
sudo rm -v -rf /root/.bash_history
echo "Removing DHCP leases data"
sudo rm -v -rf /var/lib/dhclient/*
sudo rm -v -rf /var/lib/dhcp/*
sudo rm -v -rf /var/lib/dhcpd/*
echo "Resetting machine-id"
sudo truncate --size=0 /etc/machine-id || :
echo "Removing SSH host keys"
sudo rm -rf /etc/ssh/*_host_*
# Explicitly sync disk
sync
|
norcams/himlar-base
|
centos-base/bin/post.sh
|
Shell
|
apache-2.0
| 2,145 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The golang package that we are building.
readonly KUBE_GO_PACKAGE=k8s.io/kubernetes
readonly KUBE_GOPATH="${KUBE_OUTPUT}/go"
# Load contrib target functions
if [ -n "${KUBERNETES_CONTRIB:-}" ]; then
for contrib in "${KUBERNETES_CONTRIB}"; do
source "${KUBE_ROOT}/contrib/${contrib}/target.sh"
done
fi
# The set of server targets that we are only building for Linux
# Note: if you are adding something here, you might need to add it to
# kube::build::source_targets in build/common.sh as well.
kube::golang::server_targets() {
local targets=(
cmd/kube-dns
cmd/kube-proxy
cmd/kube-apiserver
cmd/kube-controller-manager
cmd/kubelet
cmd/kubemark
cmd/hyperkube
federation/cmd/federation-apiserver
federation/cmd/federation-controller-manager
plugin/cmd/kube-scheduler
)
if [ -n "${KUBERNETES_CONTRIB:-}" ]; then
for contrib in "${KUBERNETES_CONTRIB}"; do
targets+=($(eval "kube::contrib::${contrib}::server_targets"))
done
fi
echo "${targets[@]}"
}
readonly KUBE_SERVER_TARGETS=($(kube::golang::server_targets))
readonly KUBE_SERVER_BINARIES=("${KUBE_SERVER_TARGETS[@]##*/}")
if [[ "${KUBE_FASTBUILD:-}" == "true" ]]; then
readonly KUBE_SERVER_PLATFORMS=(linux/amd64)
if [[ "${KUBE_BUILDER_OS:-}" == "darwin"* ]]; then
readonly KUBE_TEST_PLATFORMS=(
darwin/amd64
linux/amd64
)
readonly KUBE_CLIENT_PLATFORMS=(
darwin/amd64
linux/amd64
)
else
readonly KUBE_TEST_PLATFORMS=(linux/amd64)
readonly KUBE_CLIENT_PLATFORMS=(linux/amd64)
fi
else
# The server platform we are building on.
readonly KUBE_SERVER_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/ppc64le # note: hyperkube is temporarily disabled due to a linking error
)
# If we update this we should also update the set of golang compilers we build
# in 'build/build-image/cross/Dockerfile'. However, it's only a bit faster since go 1.5, not mandatory
readonly KUBE_CLIENT_PLATFORMS=(
linux/amd64
linux/386
linux/arm
linux/arm64
linux/ppc64le
darwin/amd64
darwin/386
windows/amd64
windows/386
)
# Which platforms we should compile test targets for. Not all client platforms need these tests
readonly KUBE_TEST_PLATFORMS=(
linux/amd64
darwin/amd64
windows/amd64
linux/arm
)
fi
# The set of client targets that we are building for all platforms
readonly KUBE_CLIENT_TARGETS=(
cmd/kubectl
)
readonly KUBE_CLIENT_BINARIES=("${KUBE_CLIENT_TARGETS[@]##*/}")
readonly KUBE_CLIENT_BINARIES_WIN=("${KUBE_CLIENT_BINARIES[@]/%/.exe}")
# The set of test targets that we are building for all platforms
kube::golang::test_targets() {
local targets=(
cmd/integration
cmd/gendocs
cmd/genkubedocs
cmd/genman
cmd/genyaml
cmd/mungedocs
cmd/genswaggertypedocs
cmd/linkcheck
examples/k8petstore/web-server/src
federation/cmd/genfeddocs
vendor/github.com/onsi/ginkgo/ginkgo
test/e2e/e2e.test
test/e2e_node/e2e_node.test
)
if [ -n "${KUBERNETES_CONTRIB:-}" ]; then
for contrib in "${KUBERNETES_CONTRIB}"; do
targets+=($(eval "kube::contrib::${contrib}::test_targets"))
done
fi
echo "${targets[@]}"
}
readonly KUBE_TEST_TARGETS=($(kube::golang::test_targets))
readonly KUBE_TEST_BINARIES=("${KUBE_TEST_TARGETS[@]##*/}")
readonly KUBE_TEST_BINARIES_WIN=("${KUBE_TEST_BINARIES[@]/%/.exe}")
readonly KUBE_TEST_PORTABLE=(
test/e2e/testing-manifests
test/kubemark
hack/e2e.go
hack/e2e-internal
hack/get-build.sh
hack/ginkgo-e2e.sh
hack/federated-ginkgo-e2e.sh
hack/lib
)
# Gigabytes desired for parallel platform builds. 11 is fairly
# arbitrary, but is a reasonable splitting point for 2015
# laptops-versus-not.
#
# If you are using boot2docker, the following seems to work (note
# that 12000 rounds to 11G):
# boot2docker down
# VBoxManage modifyvm boot2docker-vm --memory 12000
# boot2docker up
readonly KUBE_PARALLEL_BUILD_MEMORY=11
readonly KUBE_ALL_TARGETS=(
"${KUBE_SERVER_TARGETS[@]}"
"${KUBE_CLIENT_TARGETS[@]}"
"${KUBE_TEST_TARGETS[@]}"
)
readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}")
readonly KUBE_STATIC_LIBRARIES=(
kube-apiserver
kube-controller-manager
kube-dns
kube-scheduler
kube-proxy
kubectl
federation-apiserver
federation-controller-manager
)
kube::golang::is_statically_linked_library() {
local e
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
# Allow individual overrides--e.g., so that you can get a static build of
# kubectl for inclusion in a container.
if [ -n "${KUBE_STATIC_OVERRIDES:+x}" ]; then
for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
fi
return 1;
}
# kube::binaries_from_targets take a list of build targets and return the
# full go package to be built
kube::golang::binaries_from_targets() {
local target
for target; do
# If the target starts with what looks like a domain name, assume it has a
# fully-qualified package name rather than one that needs the Kubernetes
# package prepended.
if [[ "${target}" =~ ^([[:alnum:]]+".")+[[:alnum:]]+"/" ]]; then
echo "${target}"
else
echo "${KUBE_GO_PACKAGE}/${target}"
fi
done
}
# Asks golang what it thinks the host platform is. The go tool chain does some
# slightly different things when the target platform matches the host platform.
kube::golang::host_platform() {
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
}
kube::golang::current_platform() {
local os="${GOOS-}"
if [[ -z $os ]]; then
os=$(go env GOHOSTOS)
fi
local arch="${GOARCH-}"
if [[ -z $arch ]]; then
arch=$(go env GOHOSTARCH)
fi
echo "$os/$arch"
}
# Takes the the platform name ($1) and sets the appropriate golang env variables
# for that platform.
kube::golang::set_platform_envs() {
[[ -n ${1-} ]] || {
kube::log::error_exit "!!! Internal error. No platform set in kube::golang::set_platform_envs"
}
export GOOS=${platform%/*}
export GOARCH=${platform##*/}
# Do not set CC when building natively on a platform, only if cross-compiling from linux/amd64
if [[ $(kube::golang::host_platform) == "linux/amd64" ]]; then
# Dynamic CGO linking for other server architectures than linux/amd64 goes here
# If you want to include support for more server platforms than these, add arch-specific gcc names here
if [[ ${platform} == "linux/arm" ]]; then
export CGO_ENABLED=1
export CC=arm-linux-gnueabi-gcc
elif [[ ${platform} == "linux/arm64" ]]; then
export CGO_ENABLED=1
export CC=aarch64-linux-gnu-gcc
elif [[ ${platform} == "linux/ppc64le" ]]; then
export CGO_ENABLED=1
export CC=powerpc64le-linux-gnu-gcc
fi
fi
}
kube::golang::unset_platform_envs() {
unset GOOS
unset GOARCH
unset CGO_ENABLED
unset CC
}
# Create the GOPATH tree under $KUBE_OUTPUT
kube::golang::create_gopath_tree() {
local go_pkg_dir="${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}"
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
mkdir -p "${go_pkg_basedir}"
rm -f "${go_pkg_dir}"
# TODO: This symlink should be relative.
ln -s "${KUBE_ROOT}" "${go_pkg_dir}"
}
# Ensure the godep tool exists and is a viable version.
kube::golang::verify_godep_version() {
local -a godep_version_string
local godep_version
local godep_min_version="63"
if ! which godep &>/dev/null; then
kube::log::usage_from_stdin <<EOF
Can't find 'godep' in PATH, please fix and retry.
See https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md#godep-and-dependency-management for installation instructions.
EOF
return 2
fi
godep_version_string=($(godep version))
godep_version=${godep_version_string[1]/v/}
if ((godep_version<$godep_min_version)); then
kube::log::usage_from_stdin <<EOF
Detected godep version: ${godep_version_string[*]}.
Kubernetes requires godep v$godep_min_version or greater.
Please update:
go get -u github.com/tools/godep
EOF
return 2
fi
}
# Ensure the go tool exists and is a viable version.
kube::golang::verify_go_version() {
if [[ -z "$(which go)" ]]; then
kube::log::usage_from_stdin <<EOF
Can't find 'go' in PATH, please fix and retry.
See http://golang.org/doc/install for installation instructions.
EOF
return 2
fi
local go_version
go_version=($(go version))
if [[ "${go_version[2]}" < "go1.6" && "${go_version[2]}" != "devel" ]]; then
kube::log::usage_from_stdin <<EOF
Detected go version: ${go_version[*]}.
Kubernetes requires go version 1.6 or greater.
Please install Go version 1.6 or later.
EOF
return 2
fi
}
# kube::golang::setup_env will check that the `go` commands is available in
# ${PATH}. It will also check that the Go version is good enough for the
# Kubernetes build.
#
# Inputs:
# KUBE_EXTRA_GOPATH - If set, this is included in created GOPATH
#
# Outputs:
# env-var GOPATH points to our local output dir
# env-var GOBIN is unset (we want binaries in a predictable place)
# env-var GO15VENDOREXPERIMENT=1
# current directory is within GOPATH
kube::golang::setup_env() {
kube::golang::verify_go_version
kube::golang::create_gopath_tree
export GOPATH=${KUBE_GOPATH}
# Append KUBE_EXTRA_GOPATH to the GOPATH if it is defined.
if [[ -n ${KUBE_EXTRA_GOPATH:-} ]]; then
GOPATH="${GOPATH}:${KUBE_EXTRA_GOPATH}"
fi
# Change directories so that we are within the GOPATH. Some tools get really
# upset if this is not true. We use a whole fake GOPATH here to collect the
# resultant binaries. Go will not let us use GOBIN with `go install` and
# cross-compiling, and `go install -o <file>` only works for a single pkg.
local subdir
subdir=$(kube::realpath . | sed "s|$KUBE_ROOT||")
cd "${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}/${subdir}"
# Unset GOBIN in case it already exists in the current session.
unset GOBIN
# This seems to matter to some tools (godep, ugorji, ginkgo...)
export GO15VENDOREXPERIMENT=1
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
# place in ${KUBE_OUTPUT_BINDIR}
#
# Ideally this wouldn't be necessary and we could just set GOBIN to
# KUBE_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
# install' will place binaries that match the host platform directly in $GOBIN
# while placing cross compiled binaries into `platform_arch` subdirs. This
# complicates pretty much everything else we do around packaging and such.
kube::golang::place_bins() {
local host_platform
host_platform=$(kube::golang::host_platform)
kube::log::status "Placing binaries"
local platform
for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
# The substitution on platform_src below will replace all slashes with
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
local platform_src="/${platform//\//_}"
if [[ $platform == $host_platform ]]; then
platform_src=""
fi
local full_binpath_src="${KUBE_GOPATH}/bin${platform_src}"
if [[ -d "${full_binpath_src}" ]]; then
mkdir -p "${KUBE_OUTPUT_BINPATH}/${platform}"
find "${full_binpath_src}" -maxdepth 1 -type f -exec \
rsync -pt {} "${KUBE_OUTPUT_BINPATH}/${platform}" \;
fi
done
}
kube::golang::fallback_if_stdlib_not_installable() {
local go_root_dir=$(go env GOROOT);
local go_host_os=$(go env GOHOSTOS);
local go_host_arch=$(go env GOHOSTARCH);
local cgo_pkg_dir=${go_root_dir}/pkg/${go_host_os}_${go_host_arch}_cgo;
if [ -e ${cgo_pkg_dir} ]; then
return 0;
fi
if [ -w ${go_root_dir}/pkg ]; then
return 0;
fi
kube::log::status "+++ Warning: stdlib pkg with cgo flag not found.";
kube::log::status "+++ Warning: stdlib pkg cannot be rebuilt since ${go_root_dir}/pkg is not writable by `whoami`";
kube::log::status "+++ Warning: Make ${go_root_dir}/pkg writable for `whoami` for a one-time stdlib install, Or"
kube::log::status "+++ Warning: Rebuild stdlib using the command 'CGO_ENABLED=0 go install -a -installsuffix cgo std'";
kube::log::status "+++ Falling back to go build, which is slower";
use_go_build=true
}
# Builds the toolchain necessary for building kube. This needs to be
# built only on the host platform.
# TODO: This builds only the `teststale` binary right now. As we expand
# this function's capabilities we need to find this a right home.
# Ideally, not a shell script because testing shell scripts is painful.
kube::golang::build_kube_toolchain() {
local targets=(
hack/cmd/teststale
)
local binaries
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
kube::log::status "Building the toolchain targets:" "${binaries[@]}"
go install "${goflags[@]:+${goflags[@]}}" \
-ldflags "${goldflags}" \
"${binaries[@]:+${binaries[@]}}"
}
# Try and replicate the native binary placement of go install without
# calling go install.
kube::golang::output_filename_for_binary() {
local binary=$1
local platform=$2
local output_path="${KUBE_GOPATH}/bin"
if [[ $platform != $host_platform ]]; then
output_path="${output_path}/${platform//\//_}"
fi
local bin=$(basename "${binary}")
if [[ ${GOOS} == "windows" ]]; then
bin="${bin}.exe"
fi
echo "${output_path}/${bin}"
}
kube::golang::build_binaries_for_platform() {
local platform=$1
local use_go_build=${2-}
local -a statics=()
local -a nonstatics=()
local -a tests=()
for binary in "${binaries[@]}"; do
# TODO(IBM): Enable hyperkube builds for ppc64le again
# The current workaround creates a text file with help text instead of a binary
# We're doing it this way so the build system isn't affected so much
if [[ "${binary}" == *"hyperkube" && "${platform}" == "linux/ppc64le" ]]; then
echo "hyperkube build for ppc64le is disabled. Creating dummy text file instead."
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
mkdir -p $(dirname ${outfile})
echo "Not available at the moment. Please see: https://github.com/kubernetes/kubernetes/issues/25886 for more information." > ${outfile}
elif [[ "${binary}" =~ ".test"$ ]]; then
tests+=($binary)
elif kube::golang::is_statically_linked_library "${binary}"; then
statics+=($binary)
else
nonstatics+=($binary)
fi
done
if [[ "${#statics[@]}" != 0 ]]; then
kube::golang::fallback_if_stdlib_not_installable;
fi
if [[ -n ${use_go_build:-} ]]; then
kube::log::progress " "
for binary in "${statics[@]:+${statics[@]}}"; do
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
CGO_ENABLED=0 go build -o "${outfile}" \
"${goflags[@]:+${goflags[@]}}" \
-ldflags "${goldflags}" \
"${binary}"
kube::log::progress "*"
done
for binary in "${nonstatics[@]:+${nonstatics[@]}}"; do
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
go build -o "${outfile}" \
"${goflags[@]:+${goflags[@]}}" \
-ldflags "${goldflags}" \
"${binary}"
kube::log::progress "*"
done
kube::log::progress "\n"
else
# Use go install.
if [[ "${#nonstatics[@]}" != 0 ]]; then
go install "${goflags[@]:+${goflags[@]}}" \
-ldflags "${goldflags}" \
"${nonstatics[@]:+${nonstatics[@]}}"
fi
if [[ "${#statics[@]}" != 0 ]]; then
CGO_ENABLED=0 go install -installsuffix cgo "${goflags[@]:+${goflags[@]}}" \
-ldflags "${goldflags}" \
"${statics[@]:+${statics[@]}}"
fi
fi
for test in "${tests[@]:+${tests[@]}}"; do
local outfile=$(kube::golang::output_filename_for_binary "${test}" \
"${platform}")
local testpkg="$(dirname ${test})"
# Staleness check always happens on the host machine, so we don't
# have to locate the `teststale` binaries for the other platforms.
# Since we place the host binaries in `$KUBE_GOPATH/bin`, we can
# assume that the binary exists there, if it exists at all.
# Otherwise, something has gone wrong with building the `teststale`
# binary and we should safely proceed building the test binaries
# assuming that they are stale. There is no good reason to error
# out.
if test -x "${KUBE_GOPATH}/bin/teststale" && ! "${KUBE_GOPATH}/bin/teststale" -binary "${outfile}" -package "${testpkg}"
then
continue
fi
# `go test -c` below directly builds the binary. It builds the packages,
# but it never installs them. `go test -i` only installs the dependencies
# of the test, but not the test package itself. So neither `go test -c`
# nor `go test -i` installs, for example, test/e2e.a. And without that,
# doing a staleness check on k8s.io/kubernetes/test/e2e package always
# returns true (always stale). And that's why we need to install the
# test package.
go install "${goflags[@]:+${goflags[@]}}" \
-ldflags "${goldflags}" \
"${testpkg}"
mkdir -p "$(dirname ${outfile})"
go test -c \
"${goflags[@]:+${goflags[@]}}" \
-ldflags "${goldflags}" \
-o "${outfile}" \
"${testpkg}"
done
}
# Return approximate physical memory available in gigabytes.
kube::golang::get_physmem() {
local mem
# Linux kernel version >=3.14, in kb
if mem=$(grep MemAvailable /proc/meminfo | awk '{ print $2 }'); then
echo $(( ${mem} / 1048576 ))
return
fi
# Linux, in kb
if mem=$(grep MemTotal /proc/meminfo | awk '{ print $2 }'); then
echo $(( ${mem} / 1048576 ))
return
fi
# OS X, in bytes. Note that get_physmem, as used, should only ever
# run in a Linux container (because it's only used in the multiple
# platform case, which is a Dockerized build), but this is provided
# for completeness.
if mem=$(sysctl -n hw.memsize 2>/dev/null); then
echo $(( ${mem} / 1073741824 ))
return
fi
# If we can't infer it, just give up and assume a low memory system
echo 1
}
# Build binaries targets specified
#
# Input:
# $@ - targets and go flags. If no targets are set then all binaries targets
# are built.
# KUBE_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
# then just the host architecture is built.
kube::golang::build_binaries() {
# Create a sub-shell so that we don't pollute the outer environment
(
# Check for `go` binary and set ${GOPATH}.
kube::golang::setup_env
echo "Go version: $(go version)"
local host_platform
host_platform=$(kube::golang::host_platform)
# Use eval to preserve embedded quoted strings.
local goflags goldflags
eval "goflags=(${KUBE_GOFLAGS:-})"
goldflags="${KUBE_GOLDFLAGS:-} $(kube::version::ldflags)"
local use_go_build
local -a targets=()
local arg
for arg; do
if [[ "${arg}" == "--use_go_build" ]]; then
use_go_build=true
elif [[ "${arg}" == -* ]]; then
# Assume arguments starting with a dash are flags to pass to go.
goflags+=("${arg}")
else
targets+=("${arg}")
fi
done
if [[ ${#targets[@]} -eq 0 ]]; then
targets=("${KUBE_ALL_TARGETS[@]}")
fi
local -a platforms=("${KUBE_BUILD_PLATFORMS[@]:+${KUBE_BUILD_PLATFORMS[@]}}")
if [[ ${#platforms[@]} -eq 0 ]]; then
platforms=("${host_platform}")
fi
local binaries
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
local parallel=false
if [[ ${#platforms[@]} -gt 1 ]]; then
local gigs
gigs=$(kube::golang::get_physmem)
if [[ ${gigs} -ge ${KUBE_PARALLEL_BUILD_MEMORY} ]]; then
kube::log::status "Multiple platforms requested and available ${gigs}G >= threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in parallel"
parallel=true
else
kube::log::status "Multiple platforms requested, but available ${gigs}G < threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in serial"
parallel=false
fi
fi
# First build the toolchain before building any other targets
kube::golang::build_kube_toolchain
if [[ "${parallel}" == "true" ]]; then
kube::log::status "Building go targets for ${platforms[@]} in parallel (output will appear in a burst when complete):" "${targets[@]}"
local platform
for platform in "${platforms[@]}"; do (
kube::golang::set_platform_envs "${platform}"
kube::log::status "${platform}: go build started"
kube::golang::build_binaries_for_platform ${platform} ${use_go_build:-}
kube::log::status "${platform}: go build finished"
) &> "/tmp//${platform//\//_}.build" &
done
local fails=0
for job in $(jobs -p); do
wait ${job} || let "fails+=1"
done
for platform in "${platforms[@]}"; do
cat "/tmp//${platform//\//_}.build"
done
exit ${fails}
else
for platform in "${platforms[@]}"; do
kube::log::status "Building go targets for ${platform}:" "${targets[@]}"
kube::golang::set_platform_envs "${platform}"
kube::golang::build_binaries_for_platform ${platform} ${use_go_build:-}
done
fi
)
}
|
gtank/kubernetes
|
hack/lib/golang.sh
|
Shell
|
apache-2.0
| 21,850 |
#!/bin/bash -e
# Copyright 2020 Google LLC
buildpath=$1
sourcepath=$2
stl=$3
if [[ -z "${buildpath}" || -z "${sourcepath}" ]]; then
echo "Usage: $0 <build path> <source path> [c++|gnustl]"
exit 1
fi
if [[ ! -d "${sourcepath}" ]]; then
echo "Source path '${sourcepath}' not found."
exit 2
fi
if [[ "${stl}" == "c++" || "${stl}" == "gnustl" ]]; then
export FIREBASE_ANDROID_STL="${stl}"_static
elif [[ ! -z "${stl}" ]]; then
echo "Invalid STL specified."
echo "Valid STLs are: 'c++' (default) or 'gnustl'"
exit 2
fi
origpath=$( pwd -P )
mkdir -p "${buildpath}"
cd "${buildpath}"
if [[ -n $(ls) ]]; then
echo "Error: build path '${buildpath}' not empty."
exit 2
fi
absbuildpath=$( pwd -P )
cd "${origpath}"
# If NDK_ROOT is not set or is the wrong version, use to the version in /tmp.
if [[ -z "${NDK_ROOT}" || ! $(grep -q "Pkg\.Revision = 16\." "${NDK_ROOT}/source.properties") ]]; then
if [[ ! -d /tmp/android-ndk-r16b ]]; then
echo "Recommended NDK version r16b not present in /tmp."
if [[ ! -z "${stl}" ]]; then
echo "STL may only be specified if using the recommended NDK version."
echo "Please run install_prereqs.sh script and try again."
exit 2
else
echo "Please run install_prereqs.sh if you wish to use the recommended NDK version."
echo "Continuing with default NDK..."
sleep 2
fi
fi
export NDK_ROOT=/tmp/android-ndk-r16b
export ANDROID_NDK_HOME=/tmp/android-ndk-r16b
fi
cd "${sourcepath}"
set +e
# Retry the build up to 10 times, because the build fetches files from
# maven and elsewhere, and occasionally the GitHub runners have
# network connectivity issues that cause the download to fail.
gradleparams="-Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false\
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120"
for retry in {1..10} error; do
if [[ $retry == "error" ]]; then exit 5; fi
./gradlew assembleRelease "${gradleparams}" && break
sleep 300
done
set -e
# Gradle puts the build output inside the source tree, in various
# "build" and ".externalNativeBuild" directories. Grab them and place
# them in the build output directory.
declare -a paths
for lib in *; do
if [[ -d "${lib}/build" ]]; then
paths+=("${lib}/build")
fi
if [[ -d "${lib}/.externalNativeBuild" ]]; then
paths+=("${lib}/.externalNativeBuild")
fi
if [[ -d "${lib}/${lib}_resources/build" ]]; then
paths+=("${lib}/${lib}_resources/build")
fi
if [[ -d "${lib}/${lib}_java/build" ]]; then
paths+=("${lib}/${lib}_java/build")
fi
done
set -x
if [[ $(uname) == "Linux" ]] || [[ $(uname) == "Darwin" ]]; then
# Turn buildpath into an absolute path for use later with rsync.
# Use rsync to copy the relevent paths to the destination directory.
rsync -aR "${paths[@]}" "${absbuildpath}/"
else
# rsync has to be specifically installed on windows bash (including github runners)
# Also, rsync with absolute destination path doesn't work on Windows.
# Using a simple copy instead of rsync on Windows.
cp -R --parents "${paths[@]}" "${absbuildpath}"
fi
|
firebase/firebase-cpp-sdk
|
build_scripts/android/build.sh
|
Shell
|
apache-2.0
| 3,087 |
#!/bin/bash
HOST=$1
USER=$2
usage() {
echo "Usage: $0 [host string] [username]"
exit 1
}
if [ -z $HOST ]
then
echo "Error: missing host string"
usage
fi
fab -H $HOST -u $USER fullstrap_minions
|
zheli/dev-environment
|
bootstrap.sh
|
Shell
|
apache-2.0
| 205 |
#!/bin/bash -xe
# test-setup.sh - Install required stuffs
# Used in both CI jobs and locally
#
# Install the following tools:
# * dep
# Get OS
case $(uname -s) in
Darwin)
OS=darwin
;;
Linux)
if LSB_RELEASE=$(which lsb_release); then
OS=$($LSB_RELEASE -s -c)
else
# No lsb-release, trya hack or two
if which dpkg 1>/dev/null; then
OS=debian
elif which yum 1>/dev/null || which dnf 1>/dev/null; then
OS=redhat
else
echo "Linux distro not yet supported"
exit 1
fi
fi
;;
*)
echo "Unsupported OS"
exit 1
;;
esac
case $OS in
darwin)
if which brew 1>/dev/null; then
if ! which dep 1>/dev/null; then
brew install dep
fi
else
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
fi
;;
xenial|zesty)
APT_GET="DEBIAN_FRONTEND=noninteractive \
apt-get -q --option "Dpkg::Options::=--force-confold" \
--assume-yes"
if ! which add-apt-repository 1>/dev/null; then
sudo $APT_GET install software-properties-common
fi
sudo add-apt-repository --yes ppa:gophers/archive
sudo apt-get update && sudo $APT_GET install golang-1.9-go
sudo ln -sf /usr/lib/go-1.9/bin/go /usr/local/bin
sudo ln -sf /usr/lib/go-1.9/bin/gofmt /usr/local/bin
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
;;
esac
|
kubernetes/cloud-provider-openstack
|
tools/test-setup.sh
|
Shell
|
apache-2.0
| 1,637 |
#!/usr/bin/env bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Script to produce binary release of libtensorflow (C API, Java jars etc.).
set -ex
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Setup environment for bazel builds
source "${SCRIPT_DIR}/bazel/common_env.sh"
source "${SCRIPT_DIR}/bazel/bazel_test_lib.sh"
# Sanity check that this is being run from the root of the git repository.
cd ${SCRIPT_DIR}/../../../..
if [ ! -e "WORKSPACE" ]; then
echo "Must run this from the root of the bazel workspace"
echo "Currently at ${PWD}, script is at ${SCRIPT_DIR}"
exit 1
fi
run_configure_for_gpu_build
# build_libtensorflow_tarball in ../builds/libtensorflow.sh
# cannot be used on Windows since it relies on pkg_tar rules.
# So we do something special here
bazel --output_user_root=${TMPDIR} build -c opt --copt=/arch:AVX --announce_rc \
tensorflow:tensorflow.dll \
tensorflow:tensorflow_dll_import_lib \
tensorflow/tools/lib_package:clicenses_generate \
tensorflow/java:tensorflow_jni.dll \
tensorflow/tools/lib_package:jnilicenses_generate
DIR=lib_package
rm -rf ${DIR}
mkdir -p ${DIR}
# Zip up the .dll and the LICENSE for the JNI library.
cp bazel-bin/tensorflow/java/tensorflow_jni.dll ${DIR}/tensorflow_jni.dll
zip -j ${DIR}/libtensorflow_jni-gpu-windows-$(uname -m).zip \
${DIR}/tensorflow_jni.dll \
bazel-genfiles/tensorflow/tools/lib_package/include/tensorflow/jni/LICENSE
rm -f ${DIR}/tensorflow_jni.dll
# Zip up the .dll, LICENSE and include files for the C library.
mkdir -p ${DIR}/include/tensorflow/c
mkdir -p ${DIR}/include/tensorflow/c/eager
mkdir -p ${DIR}/lib
cp bazel-bin/tensorflow/tensorflow.dll ${DIR}/lib/tensorflow.dll
cp bazel-genfiles/tensorflow/tensorflow.lib ${DIR}/lib/tensorflow.lib
cp tensorflow/c/c_api.h ${DIR}/include/tensorflow/c
cp tensorflow/c/eager/c_api.h ${DIR}/include/tensorflow/c/eager
cp bazel-genfiles/tensorflow/tools/lib_package/include/tensorflow/c/LICENSE ${DIR}/include/tensorflow/c
cd ${DIR}
zip libtensorflow-gpu-windows-$(uname -m).zip \
lib/tensorflow.dll \
lib/tensorflow.lib \
include/tensorflow/c/eager/c_api.h \
include/tensorflow/c/c_api.h \
include/tensorflow/c/LICENSE
rm -rf lib include
|
alsrgv/tensorflow
|
tensorflow/tools/ci_build/windows/libtensorflow_gpu.sh
|
Shell
|
apache-2.0
| 2,853 |
#!/bin/bash -e
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This test checks that the extractor will emit index packs.
# It should be run from the Kythe root.
TEST_NAME="test_index_pack"
. ./kythe/cxx/extractor/testdata/test_common.sh
. ./kythe/cxx/extractor/testdata/skip_functions.sh
rm -rf -- "${OUT_DIR}"
mkdir -p "${OUT_DIR}"
KYTHE_OUTPUT_DIRECTORY="${OUT_DIR}" KYTHE_INDEX_PACK="1" \
"${EXTRACTOR}" --with_executable "/usr/bin/g++" \
-I./kythe/cxx/extractor/testdata \
./kythe/cxx/extractor/testdata/transcript_main.cc
# Storing redundant extractions is OK.
KYTHE_OUTPUT_DIRECTORY="${OUT_DIR}" KYTHE_INDEX_PACK="1" \
"${EXTRACTOR}" --with_executable "/usr/bin/g++" \
-I./kythe/cxx/extractor/testdata \
./kythe/cxx/extractor/testdata/transcript_main.cc
test -e "${OUT_DIR}/units" || exit 1
test -e "${OUT_DIR}/files" || exit 1
[[ $(ls -1 "${OUT_DIR}"/files/*.data | wc -l) -eq 3 ]]
[[ $(ls -1 "${OUT_DIR}"/units/*.unit | wc -l) -eq 1 ]]
|
legrosbuffle/kythe
|
kythe/cxx/extractor/testdata/test_index_pack.sh
|
Shell
|
apache-2.0
| 1,519 |
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
if [[ -z "${CREDENTIALS}" ]]; then
CREDENTIALS=${KOKORO_KEYSTORE_DIR}/73713_docuploader_service_account
fi
if [[ -z "${STAGING_BUCKET}" ]]; then
echo "Need to set STAGING_BUCKET environment variable"
exit 1
fi
# work from the git root directory
pushd $(dirname "$0")/../../
# install docuploader package
python3 -m pip install gcp-docuploader
# compile all packages
mvn clean install -B -q -DskipTests=true
export NAME=google-cloud-monitoring-dashboard
export VERSION=$(grep ${NAME}: versions.txt | cut -d: -f3)
# build the docs
mvn site -B -q
pushd target/site/apidocs
# create metadata
python3 -m docuploader create-metadata \
--name ${NAME} \
--version ${VERSION} \
--language java
# upload docs
python3 -m docuploader upload . \
--credentials ${CREDENTIALS} \
--staging-bucket ${STAGING_BUCKET}
|
googleapis/java-monitoring-dashboards
|
.kokoro/release/publish_javadoc.sh
|
Shell
|
apache-2.0
| 1,430 |
#!/usr/bin/env bash
curl -i -X POST \
-H "Accept:application/json" \
-H "Content-Type:application/json" \
localhost:8083/connectors/mysql-source-connector/restart
|
medvekoma/debezium-poc
|
confluent/05-restart-source-connector.sh
|
Shell
|
apache-2.0
| 169 |
#!/bin/bash -e
# Copyright 2017-2018 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Create a VM and boot stateless ESXi via cdrom/iso
set -o pipefail
usage() {
cat <<'EOF'
Usage: $0 [-d DISK_GB] [-m MEM_GB] [-i ESX_ISO] [-s] ESX_URL VM_NAME
GOVC_* environment variables also apply, see https://github.com/vmware/govmomi/tree/master/govc#usage
If GOVC_USERNAME is set, it is used to create an account on the ESX vm. Default is to use the existing root account.
If GOVC_PASSWORD is set, the account password will be set to this value. Default is to use the given ESX_URL password.
EOF
}
disk=48
mem=16
# 6.7.0d https://docs.vmware.com/en/VMware-vSphere/6.7/rn/vsphere-esxi-vcenter-server-67-release-notes.html
iso=VMware-VMvisor-6.7.0-9484548.x86_64.iso
while getopts d:hi:m:s flag
do
case $flag in
d)
disk=$OPTARG
;;
h)
usage
exit
;;
i)
iso=$OPTARG
;;
m)
mem=$OPTARG
;;
s)
standalone=true
;;
*)
usage 1>&2
exit 1
;;
esac
done
shift $((OPTIND-1))
if [ $# -ne 2 ] ; then
usage
exit 1
fi
if [[ "$iso" == *"-Installer-"* ]] ; then
echo "Invalid iso name (need stateless, not installer): $iso" 1>&2
exit 1
fi
export GOVC_INSECURE=1
export GOVC_URL=$1
network=${GOVC_NETWORK:-"VM Network"}
username=$GOVC_USERNAME
password=$GOVC_PASSWORD
unset GOVC_USERNAME GOVC_PASSWORD
guest=${GUEST:-"vmkernel65Guest"}
if [ -z "$password" ] ; then
# extract password from $GOVC_URL
password=$(govc env GOVC_PASSWORD)
fi
shift
name=$1
shift
echo -n "Checking govc version..."
govc version -require 0.15.0
if [ "$(govc env -x GOVC_URL_HOST)" = "." ] ; then
if [ "$(uname -s)" = "Darwin" ]; then
PATH="/Applications/VMware Fusion.app/Contents/Library:$PATH"
fi
dir="${name}.vmwarevm"
vmx="$dir/${name}.vmx"
if [ -d "$dir" ] ; then
if vmrun list | grep -q "$vmx" ; then
vmrun stop "$vmx" hard
fi
rm -rf "$dir"
fi
mkdir "$dir"
vmware-vdiskmanager -c -s "${disk}GB" -a lsilogic -t 1 "$dir/${name}.vmdk" 2>/dev/null
cat > "$vmx" <<EOF
config.version = "8"
virtualHW.version = "11"
numvcpus = "2"
memsize = "$((mem*1024))"
displayName = "$name"
guestOS = "vmkernel6"
vhv.enable = "TRUE"
scsi0.present = "TRUE"
scsi0.virtualDev = "lsilogic"
scsi0:0.present = "TRUE"
scsi0:0.fileName = "${name}.vmdk"
ide1:0.present = "TRUE"
ide1:0.fileName = "$(realpath "$iso")"
ide1:0.deviceType = "cdrom-image"
ethernet0.present = "TRUE"
ethernet0.connectionType = "nat"
ethernet0.virtualDev = "e1000"
ethernet0.wakeOnPcktRcv = "FALSE"
ethernet0.linkStatePropagation.enable = "TRUE"
vmci0.present = "TRUE"
hpet0.present = "TRUE"
tools.syncTime = "TRUE"
pciBridge0.present = "TRUE"
pciBridge4.present = "TRUE"
pciBridge4.virtualDev = "pcieRootPort"
pciBridge4.functions = "8"
pciBridge5.present = "TRUE"
pciBridge5.virtualDev = "pcieRootPort"
pciBridge5.functions = "8"
pciBridge6.present = "TRUE"
pciBridge6.virtualDev = "pcieRootPort"
pciBridge6.functions = "8"
pciBridge7.present = "TRUE"
pciBridge7.virtualDev = "pcieRootPort"
pciBridge7.functions = "8"
EOF
vmrun start "$vmx" nogui
vm_ip=$(vmrun getGuestIPAddress "$vmx" -wait)
else
export GOVC_DATASTORE=${GOVC_DATASTORE:-$(basename "$(govc ls datastore)")}
if [ "$(govc about -json | jq -r .About.ProductLineId)" == "embeddedEsx" ] ; then
policy=$(govc host.portgroup.info -json | jq -r ".Portgroup[] | select(.Spec.Name == \"$network\") | .Spec.Policy.Security")
if [ -n "$policy" ] && [ "$(jq -r <<<"$policy" .AllowPromiscuous)" != "true" ] ; then
echo "Enabling promiscuous mode for $network on $(govc env -x GOVC_URL_HOST)..."
govc host.portgroup.change -allow-promiscuous "$network"
fi
fi
boot=$(basename "$iso")
if ! govc datastore.ls "$boot" > /dev/null 2>&1 ; then
govc datastore.upload "$iso" "$boot"
fi
echo "Creating vm ${name}..."
govc vm.create -on=false -net "$network" -m $((mem*1024)) -c 2 -g "$guest" -net.adapter=vmxnet3 -disk.controller pvscsi "$name"
echo "Adding a second nic for ${name}..."
govc vm.network.add -net "$network" -net.adapter=vmxnet3 -vm "$name"
echo "Enabling nested hv for ${name}..."
govc vm.change -vm "$name" -nested-hv-enabled
echo "Enabling Mac Learning dvFilter for ${name}..."
seq 0 1 | xargs -I% govc vm.change -vm "$name" \
-e ethernet%.filter4.name=dvfilter-maclearn \
-e ethernet%.filter4.onFailure=failOpen
echo "Adding cdrom device to ${name}..."
id=$(govc device.cdrom.add -vm "$name")
echo "Inserting $boot into $name cdrom device..."
govc device.cdrom.insert -vm "$name" -device "$id" "$boot"
if [ -n "$standalone" ] ; then
echo "Creating $name disk for use by ESXi..."
govc vm.disk.create -vm "$name" -name "$name"/disk1 -size "${disk}G"
fi
echo "Powering on $name VM..."
govc vm.power -on "$name"
echo "Waiting for $name ESXi IP..."
vm_ip=$(govc vm.ip "$name")
! govc events -n 100 "vm/$name" | grep -E 'warning|error'
fi
esx_url="root:@${vm_ip}"
echo "Waiting for $name hostd (via GOVC_URL=$esx_url)..."
while true; do
if govc about -u "$esx_url" 2>/dev/null; then
break
fi
printf "."
sleep 1
done
if [ -z "$standalone" ] ; then
# Create disk for vSAN after boot so they are unclaimed
echo "Creating $name disks for use by vSAN..."
govc vm.disk.create -vm "$name" -name "$name"/vsan-cache -size "$((disk/2))G"
govc vm.disk.create -vm "$name" -name "$name"/vsan-store -size "${disk}G"
fi
# Set target to the ESXi VM
GOVC_URL="$esx_url"
if [ -z "$standalone" ] ; then
echo "Rescanning ${name} HBA for new devices..."
disk=($(govc host.storage.info -rescan | grep /vmfs/devices/disks | awk '{print $1}' | sort))
echo "Marking ${name} disk ${disk[0]} as SSD..."
govc host.storage.mark -ssd "${disk[0]}"
echo "Marking ${name} disk ${disk[1]} as HDD..."
govc host.storage.mark -ssd=false "${disk[1]}"
fi
echo "Configuring NTP for ${name}..."
govc host.date.change -server 0.pool.ntp.org
for id in TSM TSM-SSH ntpd ; do
printf "Enabling service %s for ${name}...\n" $id
govc host.service enable $id
govc host.service start $id
done
if [ -z "$username" ] ; then
username=root
action="update"
else
action="create"
fi
echo "Disabling VSAN device monitoring for ${name}..."
govc host.esxcli system settings advanced set -o /LSOM/VSANDeviceMonitoring -i 0
# A setting of 1 means that vSwp files are created thin, with 0% Object Space Reservation
govc host.esxcli system settings advanced set -o /VSAN/SwapThickProvisionDisabled -i 1
govc host.esxcli system settings advanced set -o /VSAN/FakeSCSIReservations -i 1
echo "ESX host account $action for user $username on ${name}..."
govc host.account.$action -id $username -password "$password"
echo "Granting Admin permissions for user $username on ${name}..."
govc permissions.set -principal $username -role Admin
echo "Enabling guest ARP inspection to get vm IPs without vmtools on ${name}..."
govc host.esxcli system settings advanced set -o /Net/GuestIPHack -i 1
echo "Opening firewall for serial port traffic for ${name}..."
govc host.esxcli network firewall ruleset set -r remoteSerialPort -e true
echo "Setting hostname for ${name}..."
govc host.esxcli system hostname set -H "$name"
echo "Enabling MOB for ${name}..."
govc host.option.set Config.HostAgent.plugins.solo.enableMob true
if which sshpass >/dev/null && [ -e ~/.ssh/id_rsa.pub ] ; then
echo "Adding ssh authorized key to ${name}..."
sshpass -p "$password" scp \
-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=error \
~/.ssh/id_rsa.pub "root@$vm_ip:/etc/ssh/keys-root/authorized_keys"
fi
echo "Done: GOVC_URL=${username}:${password}@${vm_ip}"
|
dougm/govmomi
|
scripts/vcsa/create-esxi-vm.sh
|
Shell
|
apache-2.0
| 8,288 |
#!/usr/bin/env bash
#
# Copyright 2012-2014, Continuuity, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
die ( ) {
echo
echo "$*"
echo
exit 1
}
APP_NAME="loom-dummy-provisioner"
PID_DIR=${PID_DIR:-/var/run/loom}
pid="${PID_DIR}/${APP_NAME}.pid"
check_before_start() {
if [ ! -d "${PID_DIR}" ] ; then
mkdir -p "${PID_DIR}"
fi
pid="${PID_DIR}/${APP_NAME}${p}.pid"
if [ -f "${pid}" ] ; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo "$0 running as process `cat $pid`. Stop it first or use the restart function."
exit 0
fi
fi
}
start ( ) {
cd "${LOOM_HOME}"
check_before_start
echo "Starting Loom Dummy Provisioner ..."
nohup nice -1 java -cp server/lib/*:server/conf com.continuuity.loom.runtime.MockProvisionerMain >> ${LOOM_LOG_DIR}/${APP_NAME}${p}.log 2>&1 &
pid="${PID_DIR}/${APP_NAME}${p}.pid"
echo $! > $pid
}
stop ( ) {
local failed=0
echo "Stopping Loom Dummy Provisioner ..."
pid="${PID_DIR}/${APP_NAME}${p}.pid"
if [ -f "${pid}" ] ; then
echo -n " Stopping provisioner ${p} ..."
pidToKill=`cat $pid`
# kill -0 == see if the PID exists
if kill -0 $pidToKill > /dev/null 2>&1; then
kill $pidToKill > /dev/null 2>&1
local cnt=0
while kill -0 $pidToKill > /dev/null 2>&1 ; do
echo -n .
sleep 1
((cnt++))
if [ ${cnt} -ge 30 ]; then
echo " Provisioner ${p} (pid: $pidToKill) still running a task..."
break
fi
done
rm -f "${pid}"
ret=0
else
ret=$?
fi
echo
if [ ${ret} -eq 0 ] ; then
echo "Stopped successfully ..."
else
echo "ERROR: Failed stopping!"
failed=1
fi
fi
return "${failed}"
}
status() {
local failed=0
pid="${PID_DIR}/${APP_NAME}${p}.pid"
if [ -f $pid ]; then
pidToCheck=`cat $pid`
# kill -0 == see if the PID exists
if kill -0 $pidToCheck > /dev/null 2>&1; then
echo "${APP_NAME} ${p} running as process $pidToCheck"
ret=0
else
echo "${APP_NAME} ${p} pidfile exists, but process does not appear to be running"
ret=3
fi
else
echo "${APP_NAME} ${p} is not running"
ret=2
fi
if [ ${ret} -ne 0 ] ; then
failed=1
fi
if [ ${failed} -eq 0 ] ; then
echo "Loom Provisioner up and running"
elif [ ${failed} -eq 3 ] ; then
echo "At least one provisioner failed"
fi
return "${failed}"
}
restart() {
stop
start
}
case ${1} in
start)
${1}
;;
stop)
${1}
;;
status)
${1}
;;
restart)
${1}
;;
register)
# no-op for dummy, assume they are loaded already
;;
*)
echo "Usage: $0 {start|stop|status|restart|register}"
exit 1
;;
esac
exit $?
|
awholegunch/loom
|
bin/loom-dummy-provisioner.sh
|
Shell
|
apache-2.0
| 3,237 |
#!/bin/bash
#yum install -y http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm || true
#yum install -y https://github.com/SharedHealth/FreeSHR-SCM/raw/master/dist/shr_scm_utils-0.1-1.noarch.rpm || true
#yum install -y https://github.com/SharedHealth/FreeSHR-SCM/raw/master/dist/shr_scm-0.1-1.noarch.rpm || true
#Provision using ansible
sudo ansible-playbook -i ../../FreeSHR-Playbooks/dhis2 ../../FreeSHR-Playbooks/bdshr-dhis.yml --skip-tags="application"
|
SharedHealth/DHIS-Server
|
scripts/provision-local.sh
|
Shell
|
apache-2.0
| 480 |
#!/bin/bash
#
# Sets up the environment for launching a FITS instance via
# either the fits.sh launcher, or the fits-ngserver.sh Nailgun server
FITS_ENV_SCRIPT="$0"
# Resolve symlinks to this script
while [ -h "$FITS_ENV_SCRIPT" ] ; do
ls=`ls -ld "$FITS_ENV_SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
FITS_ENV_SCRIPT="$link"
else
FITS_ENV_SCRIPT=`dirname "$FITS_ENV_SCRIPT"`/"$link"
fi
done
FITS_HOME=`dirname $FITS_ENV_SCRIPT`
export FITS_HOME
# Uncomment the following line if you want "file utility" to dereference and follow symlinks.
# export POSIXLY_CORRECT=1
# concatenate args and use eval/exec to preserve spaces in paths, options and args
args=""
for arg in "$@" ; do
args="$args \"$arg\""
done
# Application classpath
APPCLASSPATH=""
JCPATH=${FITS_HOME}/lib
# Add on extra jar files to APPCLASSPATH
for i in "$JCPATH"/*.jar; do
APPCLASSPATH="$APPCLASSPATH":"$i"
done
# all subdirectories of ${FITS_HOME}/lib/ get loaded dynamically at runtime. DO NOT add here!
|
uvalib/Libra2
|
tools/fits-1.2.0/fits-env.sh
|
Shell
|
apache-2.0
| 1,081 |
#!/bin/sh -eux
arch="`uname -r | sed 's/^.*[0-9]\{1,\}\.[0-9]\{1,\}\.[0-9]\{1,\}\(-[0-9]\{1,2\}\)-//'`"
debian_version="`lsb_release -r | awk '{print $2}'`";
major_version="`echo $debian_version | awk -F. '{print $1}'`";
apt-get update;
# Disable systemd apt timers/services
if [ "$major_version" -ge "9" ]; then
systemctl stop apt-daily.timer;
systemctl stop apt-daily-upgrade.timer;
systemctl disable apt-daily.timer;
systemctl disable apt-daily-upgrade.timer;
systemctl mask apt-daily.service;
systemctl mask apt-daily-upgrade.service;
systemctl daemon-reload;
fi
# Disable periodic activities of apt
cat <<EOF >/etc/apt/apt.conf.d/10periodic;
APT::Periodic::Enable "0";
APT::Periodic::Update-Package-Lists "0";
APT::Periodic::Download-Upgradeable-Packages "0";
APT::Periodic::AutocleanInterval "0";
APT::Periodic::Unattended-Upgrade "0";
EOF
apt-get -y upgrade linux-image-$arch;
apt-get -y install linux-headers-`uname -r`;
|
juliandunn/bento
|
debian/scripts/update.sh
|
Shell
|
apache-2.0
| 948 |
#! /bin/bash
# Fail on a non-zero exit code (leaving user to cleanup)
set -e
#
# Checks to see whether any temporary instances are still hanging around
#
INSTANCES=`aws ec2 describe-instances --region ${AWS_REGION} --filters Name=owner-id,Values=${AWS_OWNER_ID} Name=instance-state-name,Values=running Name=key-name,Values=packer* | grep INSTANCES | cut -f 8`
for INSTANCE in $INSTANCES
do
echo "Cleaning up Packer created temporary instance: $INSTANCE"
aws ec2 terminate-instances --instance-ids $INSTANCE
done
|
fcrepo4-labs/fcrepo4-packer-graphite
|
scripts/cleanup-failure.sh
|
Shell
|
apache-2.0
| 519 |
#!/bin/bash
docker build -t graceful-shutdown-test-provider:latest .
|
bmwcarit/joynr
|
tests/graceful-shutdown-test/graceful-shutdown-test-provider/build_docker_image.sh
|
Shell
|
apache-2.0
| 70 |
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright Clairvoyant 2015
exit 1
PATH=/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin
# Function to discover basic OS details.
discover_os() {
if command -v lsb_release >/dev/null; then
# CentOS, Ubuntu, RedHatEnterpriseServer, Debian, SUSE LINUX
# shellcheck disable=SC2034
OS=$(lsb_release -is)
# CentOS= 6.10, 7.2.1511, Ubuntu= 14.04, RHEL= 6.10, 7.5, SLES= 11
# shellcheck disable=SC2034
OSVER=$(lsb_release -rs)
# 7, 14
# shellcheck disable=SC2034
OSREL=$(echo "$OSVER" | awk -F. '{print $1}')
# Ubuntu= trusty, wheezy, CentOS= Final, RHEL= Santiago, Maipo, SLES= n/a
# shellcheck disable=SC2034
OSNAME=$(lsb_release -cs)
else
if [ -f /etc/redhat-release ]; then
if [ -f /etc/centos-release ]; then
# shellcheck disable=SC2034
OS=CentOS
# 7.5.1804.4.el7.centos, 6.10.el6.centos.12.3
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/centos-release --qf='%{VERSION}.%{RELEASE}\n' | awk -F. '{print $1"."$2}')
# shellcheck disable=SC2034
OSREL=$(rpm -qf /etc/centos-release --qf='%{VERSION}\n')
else
# shellcheck disable=SC2034
OS=RedHatEnterpriseServer
# 7.5, 6Server
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/redhat-release --qf='%{VERSION}\n')
if [ "$OSVER" == "6Server" ]; then
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/redhat-release --qf='%{RELEASE}\n' | awk -F. '{print $1"."$2}')
# shellcheck disable=SC2034
OSNAME=Santiago
else
# shellcheck disable=SC2034
OSNAME=Maipo
fi
# shellcheck disable=SC2034
OSREL=$(echo "$OSVER" | awk -F. '{print $1}')
fi
elif [ -f /etc/SuSE-release ]; then
if grep -q "^SUSE Linux Enterprise Server" /etc/SuSE-release; then
# shellcheck disable=SC2034
OS="SUSE LINUX"
fi
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/SuSE-release --qf='%{VERSION}\n' | awk -F. '{print $1}')
# shellcheck disable=SC2034
OSREL=$(rpm -qf /etc/SuSE-release --qf='%{VERSION}\n' | awk -F. '{print $1}')
# shellcheck disable=SC2034
OSNAME="n/a"
fi
fi
}
echo "********************************************************************************"
echo "*** $(basename "$0")"
echo "********************************************************************************"
# Check to see if we are on a supported OS.
discover_os
if [ "$OS" != RedHatEnterpriseServer ] && [ "$OS" != CentOS ]; then
#if [ "$OS" != RedHatEnterpriseServer ] && [ "$OS" != CentOS ] && [ "$OS" != Debian ] && [ "$OS" != Ubuntu ]; then
echo "ERROR: Unsupported OS."
exit 3
fi
echo "Updating IPtables for KDC..."
if [ "$OS" == RedHatEnterpriseServer ] || [ "$OS" == CentOS ]; then
service iptables save
# shellcheck disable=SC1004
sed -i -e '/--dport 22/i\
-A INPUT -p tcp -m state --state NEW -m tcp --dport 88 -j ACCEPT\
-A INPUT -p tcp -m state --state NEW -m tcp --dport 464 -j ACCEPT\
-A INPUT -p tcp -m state --state NEW -m tcp --dport 749 -j ACCEPT\
-A INPUT -p udp -m udp --dport 88 -j ACCEPT\
-A INPUT -p udp -m udp --dport 464 -j ACCEPT\
-A INPUT -p udp -m udp --dport 749 -j ACCEPT' /etc/sysconfig/iptables
service iptables restart
elif [ "$OS" == Debian ] || [ "$OS" == Ubuntu ]; then
:
fi
|
teamclairvoyant/hadoop-deployment-bash
|
services/update_iptables_kdc.sh
|
Shell
|
apache-2.0
| 3,929 |
#!/bin/bash
#*******************************************************************************
# Copyright 2015 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#*******************************************************************************
# # Bootstrap Vergireen Demo
#
# ## Author(s)
# - Ricardo Quintana <[email protected]>
#
# ## Description:
#
# This script creates a directory with all the necessary asset files that you need to run
# the Verigreen demo environment which is comprised of Verigreen container, GitLab container
# (with Verigreen Git-Hook), and a Jenkins master container.
#
assets_dir="$HOME/.vg"
echo "WARNING: Bootstrapping actions are not reversible."
echo "Make sure that you backup your configuration/data before continuing."
echo "Would like to continue? (Y/y for Yes, any other key for No):"
read continue_boot
if [[ "$continue_boot" == "y" || "$continue_boot" == "Y" ]]; then
echo "Bootstrapping in progress..."
else
echo "Exiting bootstrap process."
exit -1
fi
function create_assets_dir {
echo "Creating $assets_dir"
mkdir -p "$assets_dir"
}
function bootstrap_consul {
cp -Ri assets/consul $assets_dir
echo "Finished bootstrapping for Consul!"
}
function bootstrap_vg {
cp -Ri assets/vg $assets_dir
echo "Finished bootstrapping for Verigreen!"
}
function bootstrap_jenkins {
cp -Ri assets/jenkins $assets_dir
echo "Finished bootstrapping for Jenkins!"
}
function bootstrap_gitlab {
cp -Ri assets/gitlab $assets_dir
echo "Finished bootstrapping for GitLab!"
}
function bootstrap_data {
# TODO: need to add cacheloader directory
mkdir -p $assets_dir/data/jenkins
mkdir -p $assets_dir/data/gitlab
echo "Finished bootstrapping for data directories for Jenkins and GitLab!"
}
function cleanup_data {
# TODO: need to add cacheloader directory
sudo rm -rf $assets_dir/data
echo "Finished cleaning up data directories for Jenkins and GitLab!"
}
function cleanup_assets {
rm -rf $assets_dir/consul
rm -rf $assets_dir/jenkins
rm -rf $assets_dir/gitlab
rm -rf $assets_dir/vg
echo "Finished cleaning up assets for GitLab, Verigreen, Jenkins, and Consul!"
}
function cleanup {
cleanup_data
cleanup_assets
}
function bootstrap_dot_ssh {
mkdir -p $HOME/.ssh
chmod 700 $HOME/.ssh
cp -Ri assets/vg/ssh/vg_demo $HOME/.ssh
cp -Ri assets/vg/ssh/vg_demo.pub $HOME/.ssh
[[ -z "$(cat $HOME/.ssh/config | grep -m 1 vg_demo)" ]] && echo -e "\n" >> $HOME/.ssh/config && \
cat assets/vg/ssh/config_external | tee -a $HOME/.ssh/config
chmod 600 $HOME/.ssh/config
chmod 600 $HOME/.ssh/vg_demo
chmod 600 $HOME/.ssh/vg_demo.pub
}
case $1 in
vg)
create_assets_dir
bootstrap_vg
;;
jenkins)
create_assets_dir
bootstrap_jenkins
;;
gitlab)
create_assets_dir
bootstrap_gitlab
;;
consul)
create_assets_dir
bootstrap_consul
;;
data)
create_assets_dir
bootstrap_data
;;
ssh)
bootstrap_dot_ssh
;;
cleanup_data)
cleanup_data
;;
cleanup_assets)
cleanup_assets
;;
cleanup)
cleanup
;;
*)
create_assets_dir
bootstrap_vg
bootstrap_jenkins
bootstrap_consul
bootstrap_data
bootstrap_dot_ssh
bootstrap_gitlab
;;
esac
exit 0
|
Verigreen/demo
|
bootstrap-host.sh
|
Shell
|
apache-2.0
| 3,700 |
#!/bin/bash -x
export wavelength_file=/wizards/oli-vis.wmd
export outfile=$2/$(basename -s .tif $1)_clusters
cp /wizards/tad.wiz /tmp/
cat << EOF | python
import xml.etree.ElementTree as ET
ET.register_namespace('', "https://comet.balldayton.com/standards/namespaces/2005/v1/comet.xsd")
tree = ET.parse('/wizards/tad.batchwiz')
ns={"opticks":"https://comet.balldayton.com/standards/namespaces/2005/v1/comet.xsd"}
tree.find('.//opticks:parameter[@name="Input Filename"]/opticks:value', ns).text = "file://$1"
tree.find('.//opticks:parameter[@name="Output Filename"]/opticks:value', ns).text = "file://${outfile}"
tree.write('/tmp/tad.batchwiz')
EOF
/opt/Opticks/Bin/OpticksBatch -input:/tmp/tad.batchwiz
./centroid.py ${outfile} $1 ${@:3}
|
ngageoint/scale
|
dockerfiles/examples/tad/runOpticks.sh
|
Shell
|
apache-2.0
| 742 |
#!/bin/sh
cd spark
sbt assembly && spark-submit --master 'local[2]' --class RawEventProcessing target/scala-2.10/killranalytics-assembly-1.0.jar
|
rustyrazorblade/killranalytics
|
bin/build_and_run_streaming.sh
|
Shell
|
apache-2.0
| 147 |
#!/bin/sh
curl -X POST -ssl3 -d "server_url=https://formhub.org/gigix&form_id=Patient" https://enketo.formhub.org/transform/get_html_form
|
gigix/shivaganda
|
village-enketo/form/formhub.sh
|
Shell
|
apache-2.0
| 138 |
#!/bin/sh
# when adding new module to maflib, please
# run this command to add the document of
# that new module.
sphinx-apidoc -f -o ./source ../maflib
|
pfi/maf
|
document/update_apidoc.sh
|
Shell
|
bsd-2-clause
| 154 |
#ssh ros@baycat 'rm -rf ~/osu-uwrt/riptide_software/src'
~/osu-uwrt/riptide_software/src/riptide_utilities/dates_scripts/date_set-baycat.sh
rsync -vrzc --delete --exclude=".*" --exclude=".*/" ~/osu-uwrt/riptide_software/src ros@baycat:~/osu-uwrt/riptide_software
ssh ros@baycat 'chmod 700 ~/osu-uwrt/riptide_software/src/riptide_utilities/*.sh'
ssh ros@baycat 'find ~/osu-uwrt/riptide_software/src/ -type f -iname "*.py" -exec chmod +x {} \;'
|
tsender/riptide_software
|
riptide_utilities/xfer-baycat.sh
|
Shell
|
bsd-2-clause
| 445 |
#
# only init if installed.
fasd_cache="$HOME/.fasd-init-bash"
if [ "$(command -v fasd)" -nt "$fasd_cache" -o ! -s "$fasd_cache" ]; then
eval "$(fasd --init posix-alias zsh-hook zsh-ccomp zsh-ccomp-install zsh-wcomp zsh-wcomp-install)" >| "$fasd_cache"
fi
source "$fasd_cache"
unset fasd_cache
# jump to recently used items
alias a='fasd -a' # any
#alias s='fasd -si' # show / search / select
alias d='fasd -d' # directory
alias f='fasd -f' # file
alias z='fasd_cd -d' # cd, same functionality as j in autojump
alias zz='fasd_cd -d -i' # interactive directory jump
|
ay65535/yadr
|
zsh/fasd.zsh
|
Shell
|
bsd-2-clause
| 569 |
#!/bin/sh
DIRNAME=`dirname $0`
# Setup the JVM
if [ "x$JAVA_HOME" != "x" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA="java"
fi
if [ ! -f "$DIRNAME/ksar.jar" ] ; then
echo "Unable to find ksar.jar"
exit 1;
fi
exec $JAVA $JAVA_OPT -jar $DIRNAME/ksar.jar $@
|
benbenw/ksar
|
src/main/bin/run.sh
|
Shell
|
bsd-3-clause
| 276 |
while read line || [ -n "$line" ]; do
f=1
for ((i=0; i<${#line}; i++)); do
a="${line:$i:1}"
if [ -z "${a//[a-zA-Z]}" ] && [ $f -eq 1 ]; then
printf ${a^^}
f=0
elif [ -z "${a//[ ]}" ]; then
printf "$a"
f=1
else
printf "$a"
f=0
fi
done
echo
done < $1
|
nikai3d/ce-challenges
|
easy/capitalize_words.bash
|
Shell
|
bsd-3-clause
| 377 |
#!/bin/sh
COVERAGE="`which python-coverage`"
if [ -z "$COVERAGE" ]
then
COVERAGE="`which coverage`"
fi
cd $CI_HOME/conpaas-client
python setup.py install
cd $CI_HOME/conpaas-director
# Create fake files/directories not really needed for unit testing
touch ConPaaS.tar.gz
mkdir conpaas
cp -a ../conpaas-services/config conpaas
cp -a ../conpaas-services/scripts conpaas
mkdir -p cpsdirectorconf/certs
# We cannot use system-wide directories
sed -i s#/etc/cpsdirector#$PWD/cpsdirectorconf# director.cfg.example
python setup.py install
# Create certificates
DIRECTOR_TESTING=true python cpsconf.py localhost
# Fake tarball
touch cpsdirectorconf/ConPaaS.tar.gz
$COVERAGE run --source=cpsdirector test.py
$COVERAGE report -m
cd ../conpaas-services/src/tests/
$COVERAGE run --source=conpaas run_tests.py
$COVERAGE report -m
|
mihaisoloi/conpaas
|
travis.sh
|
Shell
|
bsd-3-clause
| 835 |
# Usage:
# source scripts/make-env-vex.sh [env name=repo folder name by default]
repo=$(git rev-parse --show-toplevel)
envname=$1
if [ -z "$envname" ]; then
envname=$(basename $repo)
fi
echo ~~~ Building temporary vex environment called \"$envname\" for repo \"$repo\".
rmvirtualenv $envname
vex -m --python python2.7 $envname pip install "git+https://github.com/level12/wheelhouse#egg=Wheelhouse"
vex $envname wheelhouse install -- -r $repo/requirements/dev-env.txt
vex $envname pip install -e .
echo ~~~ Run \"exit\" to leave and destroy temporary environment.
vex -r $envname
|
level12/keg-bouncer
|
scripts/make-env-vex.sh
|
Shell
|
bsd-3-clause
| 585 |
#!/bin/bash
set -ev
if [ $TRAVIS_BRANCH != 'master' ] && [ $TRAVIS_PULL_REQUEST = 'false' ]
then
if [ $JOB_TYPE == 'rest' ]
then
yarn lint
yarn test
npx lerna run test --stream --since -- -- --ci --bail --coverage
else
npx lerna run test:$JOB_TYPE --stream --since -- -- --ci --bail --coverage
fi
echo "Publishing branch changes to coveralls for PR"
yarn coverage:publish
exit 0
fi
if [ $TRAVIS_PULL_REQUEST = 'false' ]
then
echo "Running all the tests for master"
if [ $JOB_TYPE == 'rest' ]
then
yarn lint
yarn test
npx lerna run test --stream -- -- --ci --bail --coverage
else
npx lerna run test:$JOB_TYPE --stream -- -- --ci --bail --coverage
fi
exit 0
fi
echo "Running all the tests for the PR with no publish"
if [ $JOB_TYPE == 'rest' ]
then
yarn lint
yarn test
npx lerna run test --stream -- -- --ci --bail --coverage
else
npx lerna run test:$JOB_TYPE --stream -- -- --ci --bail --coverage
fi
|
newsuk/times-components
|
lib/run_jobs.sh
|
Shell
|
bsd-3-clause
| 972 |
brew install node
npm install -g gulp
sudo gem install sass
npm install
mkdir keys
touch keys/mc.json
echo -e "{\n\t\"apiKey\":\"\"\n}" >> ./keys/mc.json
|
Jewelbots/www
|
bootstrap.sh
|
Shell
|
bsd-3-clause
| 154 |
#!/bin/bash
#
# Copyright (C) 2012 Brian Aker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#
# * The names of its contributors may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
die() { echo "$@"; exit 1; }
command_exists () {
type "$1" &> /dev/null ;
}
determine_target_platform () {
if [ $(uname) = "Darwin" ]; then
PLATFORM="darwin"
elif [[ -f "/etc/fedora-release" ]]; then
PLATFORM="fedora"
elif [[ -f "/etc/lsb-release" ]]; then
debian_version=`cat /etc/lsb-release | grep DISTRIB_CODENAME | awk -F= ' { print $2 } '`
case $debian_version in
precise)
PLATFORM="precise"
;;
*)
;;
esac
fi
PLATFORM_VERSION=`uname -r`
if [ "$PLATFORM" == "unknown" ]; then
PLATFORM=`uname -s`
fi
TARGET_PLATFORM="$PLATFORM-$PLATFORM_VERSION"
}
configure_target_platform () {
# If we are executing on OSX use CLANG, otherwise only use it if we find it in the ENV
case $TARGET_PLATFORM in
darwin-*)
CC=clang CXX=clang++ ./configure $DEBUG_ARG $ASSERT_ARG $PREFIX_ARG || die "Cannot execute CC=clang CXX=clang++ configure $DEBUG_ARG $ASSERT_ARG $PREFIX_ARG"
;;
*)
./configure $DEBUG_ARG $ASSERT_ARG $PREFIX_ARG || die "Cannot execute configure $DEBUG_ARG $ASSERT_ARG $PREFIX_ARG"
;;
esac
}
setup_gdb_command () {
GDB_TMPFILE=$(mktemp /tmp/gdb.XXXXXXXXXX)
echo "set logging overwrite on" > $GDB_TMPFILE
echo "set logging on" >> $GDB_TMPFILE
echo "set environment LIBTEST_IN_GDB=1" >> $GDB_TMPFILE
echo "run" >> $GDB_TMPFILE
echo "thread apply all bt" >> $GDB_TMPFILE
echo "quit" >> $GDB_TMPFILE
GDB_COMMAND="gdb -f -batch -x $GDB_TMPFILE"
}
setup_valgrind_command () {
VALGRIND_COMMAND="valgrind --error-exitcode=1 --leak-check=yes --show-reachable=yes --track-fds=yes --malloc-fill=A5 --free-fill=DE"
}
make_valgrind () {
if [ "$PLATFORM" = "darwin" ]; then
make_darwin_malloc
else
if command_exists valgrind; then
if [ -n "$TESTS_ENVIRONMENT" ]; then
OLD_TESTS_ENVIRONMENT=$TESTS_ENVIRONMENT
export -n TESTS_ENVIRONMENT
fi
# Set ENV VALGRIND_COMMAND
if [ -z "$VALGRIND_COMMAND" ]; then
setup_valgrind_command
fi
if [[ -f libtool ]]; then
TESTS_ENVIRONMENT="$LIBTOOL_COMMAND $VALGRIND_COMMAND"
else
TESTS_ENVIRONMENT="$VALGRIND_COMMAND"
fi
export TESTS_ENVIRONMENT
export TESTS_ENVIRONMENT
make_target check
export -n TESTS_ENVIRONMENT
if [ -n "$OLD_TESTS_ENVIRONMENT" ]; then
TESTS_ENVIRONMENT=$OLD_TESTS_ENVIRONMENT
export TESTS_ENVIRONMENT
fi
fi
fi
}
make_install_system () {
make_distclean
INSTALL_LOCATION=$(mktemp -d /tmp/XXXXXXXXXX)
PREFIX_ARG="--prefix=$INSTALL_LOCATION"
configure_target_platform
if [ -n "$TESTS_ENVIRONMENT" ]; then
OLD_TESTS_ENVIRONMENT=$TESTS_ENVIRONMENT
export -n TESTS_ENVIRONMENT
fi
make_target all
make_target "install"
make_target "installcheck"
make_target "uninstall"
export -n TESTS_ENVIRONMENT
if [ -n "$OLD_TESTS_ENVIRONMENT" ]; then
TESTS_ENVIRONMENT=$OLD_TESTS_ENVIRONMENT
export TESTS_ENVIRONMENT
fi
rm -r -f $INSTALL_LOCATION
}
make_darwin_malloc () {
MallocGuardEdges=1
MallocErrorAbort=1
MallocScribble=1
export MallocGuardEdges MallocErrorAbort MallocScribble
make_check
export -n MallocGuardEdges MallocErrorAbort MallocScribble
}
make_local () {
case $TARGET_PLATFORM in
darwin-*)
make_distcheck
make_valgrind
;;
*)
make_target_platform
;;
esac
}
make_target_platform () {
case $TARGET_PLATFORM in
fedora-*)
# make rpm includes "make distcheck"
if [ -f rpm.am ]; then
make_rpm
else
make_distcheck
fi
;;
precise-*)
make_distcheck
make_valgrind
make_gdb
;;
unknown-*)
make_all
;;
*)
make_all
;;
esac
make_install_system
make_distclean
}
make_gdb () {
if command_exists gdb; then
if [ -n "$TESTS_ENVIRONMENT" ]; then
OLD_TESTS_ENVIRONMENT=$TESTS_ENVIRONMENT
export -n TESTS_ENVIRONMENT
fi
# Set ENV GDB_COMMAND
if [ -z "$GDB_COMMAND" ]; then
setup_gdb_command
fi
if [[ -f libtool ]]; then
TESTS_ENVIRONMENT="$LIBTOOL_COMMAND $GDB_COMMAND"
else
TESTS_ENVIRONMENT="$GDB_COMMAND"
fi
export TESTS_ENVIRONMENT
make_target check
export -n TESTS_ENVIRONMENT
MAKE_TARGET=
if [[ -f gdb.txt ]]; then
rm -f gdb.txt
fi
if [ -n "$OLD_TESTS_ENVIRONMENT" ]; then
TESTS_ENVIRONMENT=$OLD_TESTS_ENVIRONMENT
export TESTS_ENVIRONMENT
fi
fi
}
make_target () {
if [ -n "$MAKE_TARGET" ]; then
OLD_MAKE_TARGET=$MAKE_TARGET
fi
MAKE_TARGET=$1
run $MAKE $MAKE_TARGET || die "Cannot execute $MAKE $MAKE_TARGET"
if [ -n "$MAKE_TARGET" ]; then
MAKE_TARGET=$OLD_MAKE_TARGET
fi
}
make_distcheck () {
make_target distcheck
}
make_rpm () {
make_target "rpm"
}
make_distclean () {
make_target distclean
}
make_maintainer_clean () {
make_target maintainer-clean
}
make_check () {
make_target check
}
make_all () {
make_target all
}
run() {
if [ -n "$TESTS_ENVIRONMENT" ]; then
echo "TESTS_ENVIRONMENT=$TESTS_ENVIRONMENT"
fi
echo "\`$@' $ARGS"
$@ $ARGS
}
parse_command_line_options() {
if ! options=$(getopt -o c --long configure -n 'bootstrap' -- "$@"); then
exit 1
fi
eval set -- "$options"
while [ $# -gt 0 ]; do
case $1 in
-c | --configure )
CONFIGURE_OPTION="yes" ; shift;;
-- )
shift; break;;
-* )
echo "$0: error - unrecognized option $1" 1>&2; exit 1;;
*)
break;;
esac
done
}
bootstrap() {
parse_command_line_options $@
determine_target_platform
DEFAULT_DEV_AUTORECONF_FLAGS="--install --force --verbose -Wall -Werror"
DEFAULT_AUTORECONF_FLAGS="--install --force --verbose -Wall"
if [ -d .git ]; then
AUTORECONF_FLAGS=$DEFAULT_DEV_AUTORECONF_FLAGS
VCS_CHECKOUT=git
elif [ -d .bzr ]; then
AUTORECONF_FLAGS=$DEFAULT_DEV_AUTORECONF_FLAGS
VCS_CHECKOUT=bzr
elif [ -d .svn ]; then
AUTORECONF_FLAGS=$DEFAULT_DEV_AUTORECONF_FLAGS
VCS_CHECKOUT=svn
elif [ -d .hg ]; then
AUTORECONF_FLAGS=$DEFAULT_DEV_AUTORECONF_FLAGS
VCS_CHECKOUT=hg
else
AUTORECONF_FLAGS=$DEFAULT_AUTORECONF_FLAGS
fi
if [ -z "$LIBTOOLIZE_FLAGS" ]; then
LIBTOOLIZE_FLAGS="--force --verbose --install"
fi
if [ "$PLATFORM" = "darwin" ]; then
LIBTOOLIZE=glibtoolize
elif [ -z "$LIBTOOLIZE" ]; then
LIBTOOLIZE=libtoolize
fi
AUTORECONF=autoreconf
# Set ENV DEBUG in order to enable debugging
if [ -n "$DEBUG" ]; then
DEBUG_ARG="--enable-debug"
fi
# Set ENV ASSERT in order to enable assert
if [ -n "$ASSERT" ]; then
ASSERT_ARG="--enable-assert"
fi
# Set ENV MAKE in order to override "make"
if [ -z "$MAKE" ]; then
MAKE="make"
fi
# Set ENV PREFIX in order to set --prefix for ./configure
if [ -n "$PREFIX" ]; then
PREFIX_ARG="--prefix=$PREFIX"
fi
if [ -f Makefile ]; then
make_maintainer_clean
rm -f Makefile.in
rm -f aminclude.am
fi
run $LIBTOOLIZE $LIBTOOLIZE_FLAGS || die "Cannot execute $LIBTOOLIZE $LIBTOOLIZE_FLAGS"
run $AUTORECONF $AUTORECONF_FLAGS || die "Cannot execute $AUTORECONF $AUTORECONF_FLAGS"
configure_target_platform
if [ "$CONFIGURE_OPTION" == "yes" ]; then
exit
fi
# Backwards compatibility
if [ -n "$VALGRIND" ]; then
MAKE_TARGET="valgrind"
fi
# Setup LIBTOOL_COMMAND if we need it
if [[ -f libtool ]]; then
LIBTOOL_COMMAND="./libtool --mode=execute"
fi
if [ -f docs/conf.py ]; then
if command_exists sphinx-build; then
make_target "man"
fi
fi
# If we are running under Jenkins we predetermine what tests we will run against
if [[ -n "$JENKINS_HOME" ]]; then
make_target_platform
elif [ "$MAKE_TARGET" == "gdb" ]; then
make_gdb
elif [ "$MAKE_TARGET" == "valgrind" ]; then
make_valgrind
elif [ "$MAKE_TARGET" == "jenkins" ]; then
# Set ENV MAKE_TARGET in order to override default of "all"
make_target_platform
elif [ -z "$MAKE_TARGET" ]; then
make_local
else
make_target $MAKE_TARGET
fi
}
export -n VCS_CHECKOUT
export -n PLATFORM
export -n TARGET_PLATFORM
CONFIGURE_OPTION=no
VCS_CHECKOUT=
PLATFORM=unknown
TARGET_PLATFORM=unknown
bootstrap $@
|
tony2001/libmemcached
|
bootstrap.sh
|
Shell
|
bsd-3-clause
| 9,830 |
rm database/cvfh/*.pcd
rm database/pointCloud/*.pcd
rm database/poseArm/*.txt
rm database/poseObject/*.txt
rm database/transform/*.txt
|
jpmerc/perception3d
|
cleanDatabase.sh
|
Shell
|
bsd-3-clause
| 135 |
#!/bin/sh
export DESTDIR="${HOME}/prefix"
export PATH="$DESTDIR/usr/bin:$PATH"
export LD_LIBRARY_PATH="$DESTDIR/usr/lib" #FIXME should be lib64 for a 64bit build
export DYLD_LIBRARY_PATH="$LD_LIBRARY_PATH" # OSX
export PYTHONPATH="../python:$DESTDIR/var/libcrange/python"
#rm python/*.pyc
cd t
for i in *.t; do echo "Testing: $i"; ./$i || exit 1; done
|
eam/libcrange
|
source/testit.sh
|
Shell
|
bsd-3-clause
| 354 |
cmake .. \
-DCMAKE_C_COMPILER="/opt/cray/craype/2.2.1/bin/cc" \
-DCMAKE_CXX_COMPILER="/opt/cray/craype/2.2.1/bin/CC" \
-DCMAKE_C_FLAGS="-g -O2" \
-DCMAKE_CXX_FLAGS="-g -O2" \
-DENABLE_THREADS=ON \
-DENABLE_ZOLTAN=ON \
-DIS_TESTING=OFF \
-DZOLTAN_INCLUDE_DIR="/global/project/projectdirs/mp288/edison/petsc-3.5.4-complex/cray-mpich-7.2/include" \
-DZOLTAN_LIBRARY="/global/project/projectdirs/mp288/edison/petsc-3.5.4-complex/cray-mpich-7.2/lib/libzoltan.a" \
-DPARMETIS_INCLUDE_DIR="/global/project/projectdirs/mp288/edison/petsc-3.5.4-complex/cray-mpich-7.2/include" \
-DPARMETIS_LIBRARY="/global/project/projectdirs/mp288/edison/petsc-3.5.4-complex/cray-mpich-7.2/lib/libparmetis.a" \
-DMETIS_LIBRARY="/global/project/projectdirs/mp288/edison/petsc-3.5.4-complex/cray-mpich-7.2/lib/libmetis.a" \
-DCMAKE_INSTALL_PREFIX="/global/project/projectdirs/mp288/edison/scorec/Oct2015" \
-DCMAKE_BUILD_TYPE=Debug
|
SCOREC/m3dc1_scorec
|
config-files/core-edison-config.sh
|
Shell
|
bsd-3-clause
| 934 |
#!/bin/sh -e
#
# Copyright (c) 2015 Wi-Fi Alliance
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
# USE OR PERFORMANCE OF THIS SOFTWARE.
#
# File: WTGService.sh - The script to control Wi-Fi Test Suite WTG Service
# This Service start PC-Endpoint and all the required control agents(for Testbed STAs)
# based on the configuration file - WTG.conf
#
#
CONF_FILE=/etc/WTG.conf
SERVICE=/usr/bin/WTG
case "$1" in
start)
echo "Starting Wi-Fi Test Suite WTG Service"
# $SERVICE $CONF_FILE 2>&1 >/dev/null&
$SERVICE $CONF_FILE 2>&1 &
;;
restart|reload|force-reload)
echo "Stopping Wi-Fi Test Suite WTG Service"
/usr/bin/killall -9 $SERVICE wfa_dut wfa_ca >/dev/null 2>&1
echo "Starting Wi-Fi Test Suite WTG Service"
$SERVICE $CONF_FILE >/dev/null 2>&1 &
exit 3
;;
stop)
echo "Stopping Wi-Fi Test Suite WTG Service"
/usr/bin/killall -9 $SERVICE wfa_dut wfa_ca >/dev/null 2>&1
;;
*)
# This case is called by /etc/init.d/rc.local on system bootup
echo "Starting Wi-Fi Test Suite WTG Service..."
$SERVICE $CONF_FILE >/dev/null 2>&1 &
exit 0
;;
esac
exit 0
|
liyinsg/Wi-FiTestSuite-Linux-DUT
|
WTGService/WTGService.sh
|
Shell
|
isc
| 1,809 |
#!/bin/bash
#
# Author: Florian Pelgrim
# Email: [email protected]
# URL: https://github.com/craneworks/pki-manager
# License: MIT (see LICENSE for more informations)
# Copyright 2013
#
# Helper script to generate a crl from given config file
#
# Script vars
SCRIPT=`basename $0`
source ./etc/vars
main() {
CRLFILE="$DIR/$CRLDIR/$CA$CRLEXT"
PUBCRLFILE="$DIR/$PUBDIR/$CA$CRLEXT"
gen
export_der
}
gen() {
echo "Init crl file"
openssl ca -gencrl \
-config $CFG \
$PASSWORD \
-out $CRLFILE
check $?
}
export_der() {
echo "Export crl into public format der"
openssl crl \
-in $CRLFILE \
-out $PUBCRLFILE \
-outform der
check $?
}
check() {
if [ $1 -gt 0 ];
then
echo "An error occured"
echo "Return code was $1"
exit 1
fi
}
help() {
echo "
Usage: $SCRIPT [ARGS] CFG CA
Helper script to generate a CRL from given config file
CFG Path to config file
CA Name of the CA
-h, --help Shows up this help
--password-file Password file to decrypt the key
"
}
while :
do
case $1 in
-h|--help)
help
exit 0
;;
--password-file)
PASSWORD="-passin file:$2"
shift 2
;;
--password-file=*)
PASSWORD="-passin file:{$1#*=}"
shift
;;
*)
CFG=$1
CA=$2
main
exit 0
;;
esac
done
|
craneworks/pki-manager
|
bin/helpers/create-crl.sh
|
Shell
|
mit
| 1,629 |
#!/usr/bin/env sh
# generated from catkin/cmake/templates/env.sh.in
if [ $# -eq 0 ] ; then
/bin/echo "Entering environment at '/home/meka/mekabot/m3meka/ros/shm_led_mouth/catkin_generated', type 'exit' to leave"
. "/home/meka/mekabot/m3meka/ros/shm_led_mouth/catkin_generated/setup_cached.sh"
"$SHELL" -i
/bin/echo "Exiting environment at '/home/meka/mekabot/m3meka/ros/shm_led_mouth/catkin_generated'"
else
. "/home/meka/mekabot/m3meka/ros/shm_led_mouth/catkin_generated/setup_cached.sh"
exec "$@"
fi
|
CentralLabFacilities/m3meka
|
ros/shm_led_mouth/catkin_generated/env_cached.sh
|
Shell
|
mit
| 515 |
#!/bin/bash
if [[ $TRAVIS_BRANCH == 'master' ]]
then
echo "Building Site Configs"
grunt build-config
# echo "Building Chrome Extension"
# grunt chrome-extension
fi
|
waltzio/waltz
|
travis-before.sh
|
Shell
|
mit
| 169 |
#!/bin/bash
FN="AffymetrixDataTestFiles_0.26.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/experiment/src/contrib/AffymetrixDataTestFiles_0.26.0.tar.gz"
"https://bioarchive.galaxyproject.org/AffymetrixDataTestFiles_0.26.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-affymetrixdatatestfiles/bioconductor-affymetrixdatatestfiles_0.26.0_src_all.tar.gz"
)
MD5="03e0b770e79cd9a5e83452b692805efd"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-affymetrixdatatestfiles/post-link.sh
|
Shell
|
mit
| 1,367 |
#!/usr/bin/env bash
set -Eeuxo pipefail
git push https://${GH_TOKEN}@github.com/argcv/argcv.git master --force
cd /tmp
git clone https://github.com/argcv/go-argcvapis.git
cd go-argcvapis
git commit --allow-empty -m "Upstream is updated"
git push https://${GH_TOKEN}@github.com/argcv/go-argcvapis.git master
|
yuikns/argcv
|
.github/travis/deploy.sh
|
Shell
|
mit
| 310 |
#!/usr/bin/env sh
# -*- Mode: sh -*-
# tar-world.sh --- tar up important configuration files
# Copyright (C) 2015-2020 Dan Harms (dharms)
# Author: Dan Harms <[email protected]>
# Created: Friday, May 29, 2015
# Version: 1.0
# Modified Time-stamp: <2020-09-24 10:10:47 dharms>
# Modified by: Dan Harms
# Keywords: configuration
tar=$TAR
user=$(id -nu)
os=$(uname)
host=$(hostname -s)
site="$SITE"
dest=world.tar
verbose=
if [ -z "$tar" ]; then
tar=$(which tar)
echo "Using $tar"
fi
if [ -z "$tar" ]; then
echo "! no tar available; quitting"
exit 1
fi
if [ $# -gt 0 ] ; then
dest=$1
shift
fi
echo user is "$user"
echo os is "$os"
echo host is "$host"
echo site is "$site"
echo Generating "$dest"...
if [ -f "$dest" ] ; then
rm -f "$dest"
fi
$tar c"$verbose"f "$dest" config doc src .gnupg .fonts .config .terminfo .proviso.d
$tar u"$verbose"f "$dest" --exclude=*.elc .emacs.d
$tar u"$verbose"f "$dest" --transform=s%ext%.emacs.d/ext% ext
$tar u"$verbose"f "$dest" --transform=s/scripts/bin/ scripts
$tar u"$verbose"f "$dest" --transform=s/dotfiles\\/// dotfiles
$tar u"$verbose"f "$dest" --transform=s/bash\\/// bash
$tar u"$verbose"f "$dest" --transform=s/tcsh\\/// tcsh
$tar u"$verbose"f "$dest" --transform=s%user/"$user"\\/%% user/"$user"
$tar u"$verbose"f "$dest" --transform=s%os/"$os"\\/%% os/"$os"
$tar u"$verbose"f "$dest" --transform=s%host/"$host"\\/%% host/"$host"
$tar u"$verbose"f "$dest" --transform=s%site/"$site"\\/%% site/"$site"
# if [ -d site/xr ]; then
# echo "Also transferring select settings from site xr"
# $tar u"$verbose"f "$dest" --transform=s%site/xr\\/%% site/xr/.emacs.d/custom
# $tar u"$verbose"f "$dest" --transform=s%site/xr\\/%% site/xr/.emacs.d/settings/host/hosts
# $tar u"$verbose"f "$dest" --transform=s%site/xr\\/%% site/xr/.emacs.d/settings/site/xr
# $tar u"$verbose"f "$dest" --transform=s%site/xr\\/%% site/xr/.emacs.d/xr.bmk
# $tar u"$verbose"f "$dest" --transform=s%site/xr\\/%% site/xr/.proviso.d
# fi
echo ...done generating "$dest"
# tar-world.sh ends here
|
articuluxe/harmsway
|
host/cathode/bin/tar-world.sh
|
Shell
|
mit
| 2,063 |
#!/usr/bin/env bash
export GOROOT="/usr/local/src/go"
export PATH="${GOROOT}/bin:${PATH}"
export GOPATH="/go"
export PATH="${PATH}:${GOPATH}/bin"
|
viljaste/docker-registry
|
src/registry/build/modules/registry/files/etc/profile.d/go.sh
|
Shell
|
mit
| 150 |
#!/bin/sh
# Check Command-Line Arguments.
USAGE="usage: ${0##*/} <Topic Name>"
if [ $# -eq 0 ] ; then
echo "No arguments supplied - ${USAGE}"
exit 1
fi
if [ $# -ne 1 ] ; then
echo "Incorrect # of arguments - ${USAGE}"
exit 1
fi
kafka-topics --zookeeper localhost:2181 --delete --topic $1
|
tmarrs/json-at-work
|
chapter-10/scripts/delete-topic.sh
|
Shell
|
mit
| 300 |
# https://github.com/dotless-de/vagrant-vbguest
vagrant plugin install vagrant-vbguest
# https://github.com/dergachev/vagrant-vbox-snapshot
vagrant plugin install vagrant-vbox-snapshot
# https://github.com/fgrehm/vagrant-pristine
vagrant plugin install vagrant-pristine
# https://github.com/nickryand/vagrant-multi-putty
vagrant plugin install vagrant-multi-putty
# https://github.com/fgrehm/vagrant-cachier
vagrant plugin install vagrant-cachier
|
DieterReuter/meanstack-ide
|
vagrant-plugins.sh
|
Shell
|
mit
| 453 |
#!/bin/bash
start=`date +%s.%N`
mkdir multigpu-trained
CUDA_VISIBLE_DEVICES=$deviceId python fcn5_mnist.py --batch_size=$batch_size --epochs=$epochs --device_id=$deviceId
end=`date +%s.%N`
runtime=$( echo "$end - $start" | bc -l )
echo "finished with execute time: ${runtime}"
rm -rf multigpu-trained
|
hclhkbu/dlbench
|
tools/tensorflow/fc/t.sh
|
Shell
|
mit
| 303 |
#!/bin/sh
set -eu
# No args.
# Example usage: sh winetricks-update.sh
# Remove existing winetricks.
sudo aptdcon -r winetricks
# Fetch and install winetricks.
wget https://raw.githubusercontent.com/Winetricks/winetricks/master/src/winetricks
chmod +x winetricks
sudo mv winetricks /usr/local/bin
# Update winetricks.
sudo winetricks --self-update
|
iterami/AutomationScripts
|
wine/winetricks-update.sh
|
Shell
|
cc0-1.0
| 351 |
#!/bin/bash
wget https://zenodo.org/record/13945/files/popcRUN4.tpr
wget https://zenodo.org/record/13945/files/popcRUN4.trr
tprname=popcRUN4.tpr
trajname=popcRUN4.trr
trajgroname=analTMP.gro
sn1outname=OrderParamSN1lowHYD.dat
sn2outname=OrderParamSN2lowHYD.dat
mappingFILE=../MAPPING/mappingPOPCcharmm.txt
analFILE=../../nmrlipids.blogspot.fi/scripts/gro_OP.awk
echo System | /home/ollilas1/gromacs/gromacs465/bin/trjconv -f $trajname -s $tprname -o $trajgroname -pbc res -b 0
for(( j = 3 ; j <= 16; j=j+1 ))
do
Cname=$(grep M_G1C"$j"_M $mappingFILE | awk '{printf "%5s\n",$2}')
H1name=$(grep M_G1C"$j"H1_M $mappingFILE | awk '{printf "%5s\n",$2}')
H2name=$(grep M_G1C"$j"H2_M $mappingFILE | awk '{printf "%5s\n",$2}')
H1op=$(awk -v Cname="$Cname" -v Hname="$H1name" -f $analFILE $trajgroname)
H2op=$(awk -v Cname="$Cname" -v Hname="$H2name" -f $analFILE $trajgroname)
echo $j $H1op $H2op >> $sn1outname
done
for(( j = 3 ; j <= 18; j=j+1 ))
do
Cname=$(grep M_G2C"$j"_M $mappingFILE | awk '{printf "%5s\n",$2}')
H1name=$(grep M_G2C"$j"H1_M $mappingFILE | awk '{printf "%5s\n",$2}')
H2name=$(grep M_G2C"$j"H2_M $mappingFILE | awk '{printf "%5s\n",$2}')
H1op=$(awk -v Cname="$Cname" -v Hname="$H1name" -f $analFILE $trajgroname)
H2op=$(awk -v Cname="$Cname" -v Hname="$H2name" -f $analFILE $trajgroname)
echo $j $H1op $H2op >> $sn2outname
done
|
jmelcr/NmrLipidsCholXray
|
scratch/POPCcharmm/calcORDPchainsLOWhydration.sh
|
Shell
|
gpl-2.0
| 1,397 |
#!/usr/bin/env bash
# Author: Jon Schipp
########
# Examples:
# 1.) Return critical if stat does not exit successfully
# $ ./check_filesystem_errors.sh -p /mnt -d 2
#
# Nagios Exit Codes
OK=0
WARNING=1
CRITICAL=2
UNKNOWN=3
usage()
{
cat <<EOF
Nagios plug-in that recursively checks for filesystem input/output
errors by directory using stat.
Options:
-p <dir> Directory or mountpoint to begin
-d <int> Depth i.e. level of subdirectories to check (def: 1)
EOF
}
argcheck() {
if [ $ARGC -lt $1 ]; then
echo "Please specify an argument!, try $0 -h for more information"
exit 1
fi
}
DEPTH=1
CHECK=0
COUNT=0
ARGC=$#
# Print warning and exit if less than n arguments specified
argcheck 1
# option and argument handling
while getopts "hp:d:" OPTION
do
case $OPTION in
h)
usage
exit $UNKNOWN
;;
p)
CHECK=1
DIR=$OPTARG
;;
d)
DEPTH=$OPTARG
;;
*)
exit $UNKNOWN
;;
esac
done
if [ $CHECK -eq 1 ]; then
find $DIR -maxdepth $DEPTH -type d -print0 | xargs -0 -I file sh -c 'stat "file" 1>/dev/null 2>/dev/null || (echo "Error file" && exit 2)'
if [ $? -gt 0 ]; then
echo "CRITICAL: Found filesystem errors"
exit $CRITICAL
else
echo "OK: No filesystem errors found"
exit $OK
fi
fi
|
zBMNForks/nagios-plugins
|
check_filesystem_stat.sh
|
Shell
|
gpl-2.0
| 1,342 |
run_bench() {
$SHELLPACK_INCLUDE/shellpack-bench-saladfork \
--iterations $SALADFORK_ITERATIONS
return $?
}
|
kdave/mmtests
|
drivers/driver-saladfork.sh
|
Shell
|
gpl-2.0
| 112 |
# Copyright: 2016 Masatake YAMATO
# License: GPL-2
CTAGS=$1
${CTAGS} --quiet --options=NONE --language-force=C --languages=-C input.c
|
lizh06/ctags
|
Tmain/languages-and-language-force-options.d/run.sh
|
Shell
|
gpl-2.0
| 135 |
# 400_restore_with_dp.sh
# Purpose: Restore script to restore file systems with Data Protector
# /opt/omni/bin/omnidb -filesystem test.internal.it3.be:/ '/' -latest -detail
#
#SessionID : 2008/12/24-1
# Started : wo 24 dec 2008 11:42:21 CET
# Finished : wo 24 dec 2008 11:54:52 CET
# Object status : Completed
# Object size : 2043947 KB
# Backup type : Full
# Protection : Protected for 2 days
# Catalog retention : Protected permanently
# Access : Private
# Number of warnings : 3
# Number of errors : 0
# Device name : DDS4
# Backup ID : n/a
# Copy ID : 20 (Orig)
# The list of file systems to restore is listed in file $TMP_DIR/list_of_fs_objects
# per line we have something like: test.internal.it3.be:/ '/'
[ -f $TMP_DIR/DP_GUI_RESTORE ] && return # GUI restore explicetely requested
# we will loop over all objects listed in $TMP_DIR/dp_list_of_fs_objects
cat $TMP_DIR/dp_list_of_fs_objects | while read object
do
host_fs=`echo ${object} | awk '{print $1}'`
fs=`echo ${object} | awk '{print $1}' | cut -d: -f 2`
label=`echo "${object}" | cut -d"'" -f 2`
# only retain the latest backup which was completed successfully
if grep -q "^${fs} " ${VAR_DIR}/recovery/mountpoint_device; then
LogPrint "Restore filesystem ${object}"
SessionID=`cat $TMP_DIR/dp_recovery_session`
Device=`/opt/omni/bin/omnidb -session ${SessionID} -detail | grep Device | sort -u | tail -n 1 | awk '{print $4}'`
/opt/omni/bin/omnir -filesystem ${host_fs} "${label}" -full -session ${SessionID} -tree ${fs} -into $TARGET_FS_ROOT -sparse -device ${Device} -target `hostname` -log >&8
case $? in
0) Log "Restore of ${fs} was successful." ;;
10) Log "Restore of ${fs} finished with warnings." ;;
*) LogPrint "Restore of ${fs} failed."
> $TMP_DIR/DP_GUI_RESTORE
break # get out of the loop
;;
esac
fi # if grep "^${fs}
done
|
terreActive/rear
|
usr/share/rear/restore/DP/default/400_restore_with_dp.sh
|
Shell
|
gpl-3.0
| 1,936 |
#!/bin/sh
#
# srecord - Manipulate EPROM load files
# Copyright (C) 2013 Peter Miller
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
TEST_SUBJECT="read -tek-ex csum bug"
. test_prelude
cat > test.in << 'fubar'
Hello, World!
fubar
if test $? -ne 0; then no_result; fi
cat > test.in << 'fubar'
%4E6E280000000054495041635953580D5953580600030001000E00050200000300000001595358
%4E664800000020C0168311C0BB000077367735F7317726C4353C055A00BF0746674E5628008001
%0E81E800000000
fubar
if test $? -ne 0; then no_result; fi
cat > test.ok << 'fubar'
Format: Tektronix Extended
Execution Start Address: 00000000
Data: 0000 - 003F
fubar
if test $? -ne 0; then no_result; fi
srec_info test.in -tex > test.out
if test $? -ne 0; then fail; fi
diff test.ok test.out
if test $? -ne 0; then fail; fi
#
# The things tested here, worked.
# No other guarantees are made.
#
pass
# vim: set ts=8 sw=4 et :
|
freyc/SRecord
|
test/02/t0231a.sh
|
Shell
|
gpl-3.0
| 1,479 |
#!/bin/sh
# usage:
# mkcontrol.sh target-control-file package-name version arch
PACKAGE=$2
VERSION=$3
ARCH=$4
cat > $1 << EOF
Package: $PACKAGE
Version: $VERSION
Section: database
Priority: optional
Depends: libstdc++6
Architecture: $ARCH
Installed-Size: 1024
Maintainer: Scalien Software ([email protected])
Source: $PACKAGE
Description: ScalienDB
ScalienDB is a consistently replicated, scalable key-value database.
EOF
|
timoc/scaliendb
|
script/debian/mkcontrol.sh
|
Shell
|
agpl-3.0
| 429 |
#!/usr/bin/env bash
SCM_THEME_PROMPT_DIRTY=" ${red}✗"
SCM_THEME_PROMPT_CLEAN=" ${green}✓"
SCM_THEME_PROMPT_PREFIX=" ${blue}scm:( "
SCM_THEME_PROMPT_SUFFIX="${blue} )"
GIT_THEME_PROMPT_DIRTY=" ${red}✗"
GIT_THEME_PROMPT_CLEAN=" ${green}✓"
GIT_THEME_PROMPT_PREFIX="${green}git:( "
GIT_THEME_PROMPT_SUFFIX="${green} )"
function git_prompt_info {
git_prompt_vars
echo -e "$SCM_PREFIX$SCM_BRANCH$SCM_STATE$SCM_SUFFIX"
}
function prompt() {
PS1="\h: \W $(scm_prompt_info)${reset_color} $ "
}
PROMPT_COMMAND=prompt
|
huang-qiao/Tools
|
dot_files/bash/themes/pro/pro.theme.bash
|
Shell
|
apache-2.0
| 526 |
#!/bin/sh
PWD_MINE=
echo $PW<caret>
export PWD_MINE_NO_OK=
|
jansorg/BashSupport
|
testData/codeInsight/completion/variableNameCompletion/globalCompletionInvocationTwo.bash
|
Shell
|
apache-2.0
| 61 |
#!/bin/bash
echo "#ITS \t 5 \t 10 \t 15 \t 20" > data.txt
folder=$(pwd)
for i in 10 20 30 40 50 60 70 80 90 100 120 150 200 500
do
s5=0;
s10=0;
s15=0;
s20=0;
for j in {1..100}
do
echo $i $j
a=$($folder/build/eval_ocr $i TrainingDataF5)
s5=$(echo "scale=4; $s5+$a" | bc -q 2>/dev/null)
a=$($folder/build/eval_ocr $i TrainingDataF10)
s10=$(echo "scale=4; $s10+$a" | bc -q 2>/dev/null)
a=$($folder/build/eval_ocr $i TrainingDataF15)
s15=$(echo "scale=4; $s15+$a" | bc -q 2>/dev/null)
a=$($folder/build/eval_ocr $i TrainingDataF20)
s20=$(echo "scale=4; $s20+$a" | bc -q 2>/dev/null)
done
echo "$i \t $s5 \t $s10 \t $s15 \t $s20"
echo "$i \t $s5 \t $s10 \t $s15 \t $s20" >> data.txt
done
|
johmathe/computer_vision
|
PlateRecognition/utils/eval.sh
|
Shell
|
apache-2.0
| 719 |
# ltmain.sh - Provide generalized library-building support services.
# NOTE: Changing this file will not affect anything until you rerun configure.
#
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005
# Free Software Foundation, Inc.
# Originally by Gordon Matzigkeit <[email protected]>, 1996
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
basename="s,^.*/,,g"
# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
# is ksh but when the shell is invoked as "sh" and the current value of
# the _XPG environment variable is not equal to 1 (one), the special
# positional parameter $0, within a function call, is the name of the
# function.
progpath="$0"
# The name of this program:
progname=`echo "$progpath" | $SED $basename`
modename="$progname"
# Global variables:
EXIT_SUCCESS=0
EXIT_FAILURE=1
PROGRAM=ltmain.sh
PACKAGE=libtool
VERSION=1.5.18
TIMESTAMP=" (1.1220.2.245 2005/05/16 08:55:27)"
# See if we are running on zsh, and set the options which allow our
# commands through without removal of \ escapes.
if test -n "${ZSH_VERSION+set}" ; then
setopt NO_GLOB_SUBST
fi
# Check that we have a working $echo.
if test "X$1" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test "X$1" = X--fallback-echo; then
# Avoid inline document here, it may be left over
:
elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
# Yippee, $echo works!
:
else
# Restart under the correct shell, and then maybe $echo will work.
exec $SHELL "$progpath" --no-reexec ${1+"$@"}
fi
if test "X$1" = X--fallback-echo; then
# used as fallback echo
shift
cat <<EOF
$*
EOF
exit $EXIT_SUCCESS
fi
default_mode=
help="Try \`$progname --help' for more information."
magic="%%%MAGIC variable%%%"
mkdir="mkdir"
mv="mv -f"
rm="rm -f"
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed="${SED}"' -e 1s/^X//'
sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
# test EBCDIC or ASCII
case `echo A|tr A '\301'` in
A) # EBCDIC based system
SP2NL="tr '\100' '\n'"
NL2SP="tr '\r\n' '\100\100'"
;;
*) # Assume ASCII based system
SP2NL="tr '\040' '\012'"
NL2SP="tr '\015\012' '\040\040'"
;;
esac
# NLS nuisances.
# Only set LANG and LC_ALL to C if already set.
# These must not be set unconditionally because not all systems understand
# e.g. LANG=C (notably SCO).
# We save the old values to restore during execute mode.
if test "${LC_ALL+set}" = set; then
save_LC_ALL="$LC_ALL"; LC_ALL=C; export LC_ALL
fi
if test "${LANG+set}" = set; then
save_LANG="$LANG"; LANG=C; export LANG
fi
# Make sure IFS has a sensible default
lt_nl='
'
IFS=" $lt_nl"
if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
$echo "$modename: not configured to build any kind of library" 1>&2
$echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit $EXIT_FAILURE
fi
# Global variables.
mode=$default_mode
nonopt=
prev=
prevopt=
run=
show="$echo"
show_help=
execute_dlfiles=
lo2o="s/\\.lo\$/.${objext}/"
o2lo="s/\\.${objext}\$/.lo/"
quote_scanset='[[~#^*{};<>?'"'"' ]'
#####################################
# Shell function definitions:
# This seems to be the best place for them
# func_win32_libid arg
# return the library type of file 'arg'
#
# Need a lot of goo to handle *both* DLLs and import libs
# Has to be a shell function in order to 'eat' the argument
# that is supplied when $file_magic_command is called.
func_win32_libid ()
{
win32_libid_type="unknown"
win32_fileres=`file -L $1 2>/dev/null`
case $win32_fileres in
*ar\ archive\ import\ library*) # definitely import
win32_libid_type="x86 archive import"
;;
*ar\ archive*) # could be an import, or static
if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \
$EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then
win32_nmres=`eval $NM -f posix -A $1 | \
sed -n -e '1,100{/ I /{x;/import/!{s/^/import/;h;p;};x;};}'`
if test "X$win32_nmres" = "Ximport" ; then
win32_libid_type="x86 archive import"
else
win32_libid_type="x86 archive static"
fi
fi
;;
*DLL*)
win32_libid_type="x86 DLL"
;;
*executable*) # but shell scripts are "executable" too...
case $win32_fileres in
*MS\ Windows\ PE\ Intel*)
win32_libid_type="x86 DLL"
;;
esac
;;
esac
$echo $win32_libid_type
}
# func_infer_tag arg
# Infer tagged configuration to use if any are available and
# if one wasn't chosen via the "--tag" command line option.
# Only attempt this if the compiler in the base compile
# command doesn't match the default compiler.
# arg is usually of the form 'gcc ...'
func_infer_tag ()
{
# Set the commonly-used compilers for some tags.
tag_compilers_CC="*cc *xlc"
tag_compilers_CXX="*++ *CC"
tag_compilers_F77="*77 *fort"
base_compiler=`set -- "$@"; $echo $1`
# If $tagname still isn't set, then try to infer if the default
# "CC" tag applies by matching up the base compile command to some
# compilers commonly used for the "CC" tag.
if test -z "$tagname"; then
z=CC
eval "tag_compilers=\$tag_compilers_${z}"
if test -n "$tag_compilers"; then
for zp in $tag_compilers; do
case $base_compiler in
$zp)
# The compiler in the base compile command matches
# one of the common compilers for the current tag.
# Assume this is the tagged configuration we want.
tagname=$z
break
;;
esac
done
fi
fi
if test -n "$available_tags" && test -z "$tagname"; then
CC_quoted=
for arg in $CC; do
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
CC_quoted="$CC_quoted $arg"
done
case $@ in
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when configure was run.
" $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;;
# Blanks at the start of $base_compile will cause this to fail
# if we don't check for them as well.
*)
for z in $available_tags; do
if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
CC_quoted=
for arg in $CC; do
# Double-quote args containing other shell metacharacters.
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
CC_quoted="$CC_quoted $arg"
done
case "$@ " in
" $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*)
# The compiler in the base compile command matches
# the one in the tagged configuration.
# Assume this is the tagged configuration we want.
tagname=$z
break
;;
esac
# If $tagname still isn't set, then try to infer it by
# matching up the base compile command to some compilers
# commonly used for certain tags.
base_compiler=`set -- "$@"; $echo $1`
eval "tag_compilers=\$tag_compilers_${z}"
if test -n "$tag_compilers"; then
for zp in $tag_compilers; do
case $base_compiler in
$zp)
# The compiler in the base compile command matches
# one of the common compilers for the current tag.
# Assume this is the tagged configuration we want.
tagname=$z
break
;;
esac
done
if test -n "$tagname"; then
break
fi
fi
fi
done
# If $tagname still isn't set, then no tagged configuration
# was found and let the user know that the "--tag" command
# line option must be used.
if test -z "$tagname"; then
$echo "$modename: unable to infer tagged configuration"
$echo "$modename: specify a tag with \`--tag'" 1>&2
exit $EXIT_FAILURE
# else
# $echo "$modename: using $tagname tagged configuration"
fi
;;
esac
fi
}
# func_extract_an_archive dir oldlib
func_extract_an_archive ()
{
f_ex_an_ar_dir="$1"; shift
f_ex_an_ar_oldlib="$1"
$show "(cd $f_ex_an_ar_dir && $AR x $f_ex_an_ar_oldlib)"
$run eval "(cd \$f_ex_an_ar_dir && $AR x \$f_ex_an_ar_oldlib)" || exit $?
if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: ERROR: object name conflicts: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" 1>&2
exit $EXIT_FAILURE
fi
}
# func_extract_archives gentop oldlib ...
func_extract_archives ()
{
my_gentop="$1"; shift
my_oldlibs=${1+"$@"}
my_oldobjs=""
my_xlib=""
my_xabs=""
my_xdir=""
my_status=""
$show "${rm}r $my_gentop"
$run ${rm}r "$my_gentop"
$show "$mkdir $my_gentop"
$run $mkdir "$my_gentop"
my_status=$?
if test "$my_status" -ne 0 && test ! -d "$my_gentop"; then
exit $my_status
fi
for my_xlib in $my_oldlibs; do
# Extract the objects.
case $my_xlib in
[\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;;
*) my_xabs=`pwd`"/$my_xlib" ;;
esac
my_xlib=`$echo "X$my_xlib" | $Xsed -e 's%^.*/%%'`
my_xdir="$my_gentop/$my_xlib"
$show "${rm}r $my_xdir"
$run ${rm}r "$my_xdir"
$show "$mkdir $my_xdir"
$run $mkdir "$my_xdir"
status=$?
if test "$status" -ne 0 && test ! -d "$my_xdir"; then
exit $status
fi
case $host in
*-darwin*)
$show "Extracting $my_xabs"
# Do not bother doing anything if just a dry run
if test -z "$run"; then
darwin_orig_dir=`pwd`
cd $my_xdir || exit $?
darwin_archive=$my_xabs
darwin_curdir=`pwd`
darwin_base_archive=`$echo "X$darwin_archive" | $Xsed -e 's%^.*/%%'`
darwin_arches=`lipo -info "$darwin_archive" 2>/dev/null | $EGREP Architectures 2>/dev/null`
if test -n "$darwin_arches"; then
darwin_arches=`echo "$darwin_arches" | $SED -e 's/.*are://'`
darwin_arch=
$show "$darwin_base_archive has multiple architectures $darwin_arches"
for darwin_arch in $darwin_arches ; do
mkdir -p "unfat-$$/${darwin_base_archive}-${darwin_arch}"
lipo -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}"
cd "unfat-$$/${darwin_base_archive}-${darwin_arch}"
func_extract_an_archive "`pwd`" "${darwin_base_archive}"
cd "$darwin_curdir"
$rm "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}"
done # $darwin_arches
## Okay now we have a bunch of thin objects, gotta fatten them up :)
darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print| xargs basename | sort -u | $NL2SP`
darwin_file=
darwin_files=
for darwin_file in $darwin_filelist; do
darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP`
lipo -create -output "$darwin_file" $darwin_files
done # $darwin_filelist
${rm}r unfat-$$
cd "$darwin_orig_dir"
else
cd "$darwin_orig_dir"
func_extract_an_archive "$my_xdir" "$my_xabs"
fi # $darwin_arches
fi # $run
;;
*)
func_extract_an_archive "$my_xdir" "$my_xabs"
;;
esac
my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
done
func_extract_archives_result="$my_oldobjs"
}
# End of Shell function definitions
#####################################
# Darwin sucks
eval std_shrext=\"$shrext_cmds\"
# Parse our command line options once, thoroughly.
while test "$#" -gt 0
do
arg="$1"
shift
case $arg in
-*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
*) optarg= ;;
esac
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
execute_dlfiles)
execute_dlfiles="$execute_dlfiles $arg"
;;
tag)
tagname="$arg"
preserve_args="${preserve_args}=$arg"
# Check whether tagname contains only valid characters
case $tagname in
*[!-_A-Za-z0-9,/]*)
$echo "$progname: invalid tag name: $tagname" 1>&2
exit $EXIT_FAILURE
;;
esac
case $tagname in
CC)
# Don't test for the "default" C tag, as we know, it's there, but
# not specially marked.
;;
*)
if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then
taglist="$taglist $tagname"
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`"
else
$echo "$progname: ignoring unknown tag $tagname" 1>&2
fi
;;
esac
;;
*)
eval "$prev=\$arg"
;;
esac
prev=
prevopt=
continue
fi
# Have we seen a non-optional argument yet?
case $arg in
--help)
show_help=yes
;;
--version)
$echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
$echo
$echo "Copyright (C) 2005 Free Software Foundation, Inc."
$echo "This is free software; see the source for copying conditions. There is NO"
$echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
exit $?
;;
--config)
${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath
# Now print the configurations for the tags.
for tagname in $taglist; do
${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath"
done
exit $?
;;
--debug)
$echo "$progname: enabling shell trace mode"
set -x
preserve_args="$preserve_args $arg"
;;
--dry-run | -n)
run=:
;;
--features)
$echo "host: $host"
if test "$build_libtool_libs" = yes; then
$echo "enable shared libraries"
else
$echo "disable shared libraries"
fi
if test "$build_old_libs" = yes; then
$echo "enable static libraries"
else
$echo "disable static libraries"
fi
exit $?
;;
--finish) mode="finish" ;;
--mode) prevopt="--mode" prev=mode ;;
--mode=*) mode="$optarg" ;;
--preserve-dup-deps) duplicate_deps="yes" ;;
--quiet | --silent)
show=:
preserve_args="$preserve_args $arg"
;;
--tag) prevopt="--tag" prev=tag ;;
--tag=*)
set tag "$optarg" ${1+"$@"}
shift
prev=tag
preserve_args="$preserve_args --tag"
;;
-dlopen)
prevopt="-dlopen"
prev=execute_dlfiles
;;
-*)
$echo "$modename: unrecognized option \`$arg'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
*)
nonopt="$arg"
break
;;
esac
done
if test -n "$prevopt"; then
$echo "$modename: option \`$prevopt' requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# If this variable is set in any of the actions, the command in it
# will be execed at the end. This prevents here-documents from being
# left over by shells.
exec_cmd=
if test -z "$show_help"; then
# Infer the operation mode.
if test -z "$mode"; then
$echo "*** Warning: inferring the mode of operation is deprecated." 1>&2
$echo "*** Future versions of Libtool will require --mode=MODE be specified." 1>&2
case $nonopt in
*cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc* | *CC)
mode=link
for arg
do
case $arg in
-c)
mode=compile
break
;;
esac
done
;;
*db | *dbx | *strace | *truss)
mode=execute
;;
*install*|cp|mv)
mode=install
;;
*rm)
mode=uninstall
;;
*)
# If we have no mode, but dlfiles were specified, then do execute mode.
test -n "$execute_dlfiles" && mode=execute
# Just use the default operation mode.
if test -z "$mode"; then
if test -n "$nonopt"; then
$echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2
else
$echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2
fi
fi
;;
esac
fi
# Only execute mode is allowed to have -dlopen flags.
if test -n "$execute_dlfiles" && test "$mode" != execute; then
$echo "$modename: unrecognized option \`-dlopen'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Change the help message to a mode-specific one.
generic_help="$help"
help="Try \`$modename --help --mode=$mode' for more information."
# These modes are in order of execution frequency so that they run quickly.
case $mode in
# libtool compile mode
compile)
modename="$modename: compile"
# Get the compilation command and the source file.
base_compile=
srcfile="$nonopt" # always keep a non-empty value in "srcfile"
suppress_opt=yes
suppress_output=
arg_mode=normal
libobj=
later=
for arg
do
case "$arg_mode" in
arg )
# do not "continue". Instead, add this to base_compile
lastarg="$arg"
arg_mode=normal
;;
target )
libobj="$arg"
arg_mode=normal
continue
;;
normal )
# Accept any command-line options.
case $arg in
-o)
if test -n "$libobj" ; then
$echo "$modename: you cannot specify \`-o' more than once" 1>&2
exit $EXIT_FAILURE
fi
arg_mode=target
continue
;;
-static | -prefer-pic | -prefer-non-pic)
later="$later $arg"
continue
;;
-no-suppress)
suppress_opt=no
continue
;;
-Xcompiler)
arg_mode=arg # the next one goes into the "base_compile" arg list
continue # The current "srcfile" will either be retained or
;; # replaced later. I would guess that would be a bug.
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"`
lastarg=
save_ifs="$IFS"; IFS=','
for arg in $args; do
IFS="$save_ifs"
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
lastarg="$lastarg $arg"
done
IFS="$save_ifs"
lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"`
# Add the arguments to base_compile.
base_compile="$base_compile $lastarg"
continue
;;
* )
# Accept the current argument as the source file.
# The previous "srcfile" becomes the current argument.
#
lastarg="$srcfile"
srcfile="$arg"
;;
esac # case $arg
;;
esac # case $arg_mode
# Aesthetically quote the previous argument.
lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
case $lastarg in
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, and some SunOS ksh mistreat backslash-escaping
# in scan sets (worked around with variable expansion),
# and furthermore cannot handle '|' '&' '(' ')' in scan sets
# at all, so we specify them separately.
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
lastarg="\"$lastarg\""
;;
esac
base_compile="$base_compile $lastarg"
done # for arg
case $arg_mode in
arg)
$echo "$modename: you must specify an argument for -Xcompile"
exit $EXIT_FAILURE
;;
target)
$echo "$modename: you must specify a target with \`-o'" 1>&2
exit $EXIT_FAILURE
;;
*)
# Get the name of the library object.
[ -z "$libobj" ] && libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'`
;;
esac
# Recognize several different file suffixes.
# If the user specifies -o file.o, it is replaced with file.lo
xform='[cCFSifmso]'
case $libobj in
*.ada) xform=ada ;;
*.adb) xform=adb ;;
*.ads) xform=ads ;;
*.asm) xform=asm ;;
*.c++) xform=c++ ;;
*.cc) xform=cc ;;
*.ii) xform=ii ;;
*.class) xform=class ;;
*.cpp) xform=cpp ;;
*.cxx) xform=cxx ;;
*.f90) xform=f90 ;;
*.for) xform=for ;;
*.java) xform=java ;;
esac
libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
case $libobj in
*.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
*)
$echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
exit $EXIT_FAILURE
;;
esac
func_infer_tag $base_compile
for arg in $later; do
case $arg in
-static)
build_old_libs=yes
continue
;;
-prefer-pic)
pic_mode=yes
continue
;;
-prefer-non-pic)
pic_mode=no
continue
;;
esac
done
qlibobj=`$echo "X$libobj" | $Xsed -e "$sed_quote_subst"`
case $qlibobj in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
qlibobj="\"$qlibobj\"" ;;
esac
if test "X$libobj" != "X$qlibobj"; then
$echo "$modename: libobj name \`$libobj' may not contain shell special characters."
exit $EXIT_FAILURE
fi
objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$obj"; then
xdir=
else
xdir=$xdir/
fi
lobj=${xdir}$objdir/$objname
if test -z "$base_compile"; then
$echo "$modename: you must specify a compilation command" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Delete any leftover library objects.
if test "$build_old_libs" = yes; then
removelist="$obj $lobj $libobj ${libobj}T"
else
removelist="$lobj $libobj ${libobj}T"
fi
$run $rm $removelist
trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15
# On Cygwin there's no "real" PIC flag so we must build both object types
case $host_os in
cygwin* | mingw* | pw32* | os2*)
pic_mode=default
;;
esac
if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then
# non-PIC code in shared libraries is not supported
pic_mode=default
fi
# Calculate the filename of the output object if compiler does
# not support -o with -c
if test "$compiler_c_o" = no; then
output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
lockfile="$output_obj.lock"
removelist="$removelist $output_obj $lockfile"
trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15
else
output_obj=
need_locks=no
lockfile=
fi
# Lock this critical section if it is needed
# We use this script file to make the link, it avoids creating a new file
if test "$need_locks" = yes; then
until $run ln "$progpath" "$lockfile" 2>/dev/null; do
$show "Waiting for $lockfile to be removed"
sleep 2
done
elif test "$need_locks" = warn; then
if test -f "$lockfile"; then
$echo "\
*** ERROR, $lockfile exists and contains:
`cat $lockfile 2>/dev/null`
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
$echo "$srcfile" > "$lockfile"
fi
if test -n "$fix_srcfile_path"; then
eval srcfile=\"$fix_srcfile_path\"
fi
qsrcfile=`$echo "X$srcfile" | $Xsed -e "$sed_quote_subst"`
case $qsrcfile in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
qsrcfile="\"$qsrcfile\"" ;;
esac
$run $rm "$libobj" "${libobj}T"
# Create a libtool object file (analogous to a ".la" file),
# but don't create it if we're doing a dry run.
test -z "$run" && cat > ${libobj}T <<EOF
# $libobj - a libtool object file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# Name of the PIC object.
EOF
# Only build a PIC object if we are building libtool libraries.
if test "$build_libtool_libs" = yes; then
# Without this assignment, base_compile gets emptied.
fbsd_hideous_sh_bug=$base_compile
if test "$pic_mode" != no; then
command="$base_compile $qsrcfile $pic_flag"
else
# Don't build PIC code
command="$base_compile $qsrcfile"
fi
if test ! -d "${xdir}$objdir"; then
$show "$mkdir ${xdir}$objdir"
$run $mkdir ${xdir}$objdir
status=$?
if test "$status" -ne 0 && test ! -d "${xdir}$objdir"; then
exit $status
fi
fi
if test -z "$output_obj"; then
# Place PIC objects in $objdir
command="$command -o $lobj"
fi
$run $rm "$lobj" "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
test -n "$output_obj" && $run $rm $removelist
exit $EXIT_FAILURE
fi
if test "$need_locks" = warn &&
test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
$echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
# Just move the object if needed, then go on to compile the next one
if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
$show "$mv $output_obj $lobj"
if $run $mv $output_obj $lobj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Append the name of the PIC object to the libtool object file.
test -z "$run" && cat >> ${libobj}T <<EOF
pic_object='$objdir/$objname'
EOF
# Allow error messages only from the first compilation.
if test "$suppress_opt" = yes; then
suppress_output=' >/dev/null 2>&1'
fi
else
# No PIC object so indicate it doesn't exist in the libtool
# object file.
test -z "$run" && cat >> ${libobj}T <<EOF
pic_object=none
EOF
fi
# Only build a position-dependent object if we build old libraries.
if test "$build_old_libs" = yes; then
if test "$pic_mode" != yes; then
# Don't build PIC code
command="$base_compile $qsrcfile"
else
command="$base_compile $qsrcfile $pic_flag"
fi
if test "$compiler_c_o" = yes; then
command="$command -o $obj"
fi
# Suppress compiler output if we already did a PIC compilation.
command="$command$suppress_output"
$run $rm "$obj" "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
$run $rm $removelist
exit $EXIT_FAILURE
fi
if test "$need_locks" = warn &&
test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
$echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
# Just move the object if needed
if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
$show "$mv $output_obj $obj"
if $run $mv $output_obj $obj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Append the name of the non-PIC object the libtool object file.
# Only append if the libtool object file exists.
test -z "$run" && cat >> ${libobj}T <<EOF
# Name of the non-PIC object.
non_pic_object='$objname'
EOF
else
# Append the name of the non-PIC object the libtool object file.
# Only append if the libtool object file exists.
test -z "$run" && cat >> ${libobj}T <<EOF
# Name of the non-PIC object.
non_pic_object=none
EOF
fi
$run $mv "${libobj}T" "${libobj}"
# Unlock the critical section if it was locked
if test "$need_locks" != no; then
$run $rm "$lockfile"
fi
exit $EXIT_SUCCESS
;;
# libtool link mode
link | relink)
modename="$modename: link"
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# It is impossible to link a dll without this setting, and
# we shouldn't force the makefile maintainer to figure out
# which system we are compiling for in order to pass an extra
# flag for every libtool invocation.
# allow_undefined=no
# FIXME: Unfortunately, there are problems with the above when trying
# to make a dll which has undefined symbols, in which case not
# even a static library is built. For now, we need to specify
# -no-undefined on the libtool link line when we can be certain
# that all symbols are satisfied, otherwise we get a static library.
allow_undefined=yes
;;
*)
allow_undefined=yes
;;
esac
libtool_args="$nonopt"
base_compile="$nonopt $@"
compile_command="$nonopt"
finalize_command="$nonopt"
compile_rpath=
finalize_rpath=
compile_shlibpath=
finalize_shlibpath=
convenience=
old_convenience=
deplibs=
old_deplibs=
compiler_flags=
linker_flags=
dllsearchpath=
lib_search_path=`pwd`
inst_prefix_dir=
avoid_version=no
dlfiles=
dlprefiles=
dlself=no
export_dynamic=no
export_symbols=
export_symbols_regex=
generated=
libobjs=
ltlibs=
module=no
no_install=no
objs=
non_pic_objects=
precious_files_regex=
prefer_static_libs=no
preload=no
prev=
prevarg=
release=
rpath=
xrpath=
perm_rpath=
temp_rpath=
thread_safe=no
vinfo=
vinfo_number=no
func_infer_tag $base_compile
# We need to know -static, to get the right output filenames.
for arg
do
case $arg in
-all-static | -static)
if test "X$arg" = "X-all-static"; then
if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
$echo "$modename: warning: complete static linking is impossible in this configuration" 1>&2
fi
if test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
else
if test -z "$pic_flag" && test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
fi
build_libtool_libs=no
build_old_libs=yes
prefer_static_libs=yes
break
;;
esac
done
# See if our shared archives depend on static archives.
test -n "$old_archive_from_new_cmds" && build_old_libs=yes
# Go through the arguments, transforming them on the way.
while test "$#" -gt 0; do
arg="$1"
shift
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test
;;
*) qarg=$arg ;;
esac
libtool_args="$libtool_args $qarg"
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
output)
compile_command="$compile_command @OUTPUT@"
finalize_command="$finalize_command @OUTPUT@"
;;
esac
case $prev in
dlfiles|dlprefiles)
if test "$preload" = no; then
# Add the symbol object into the linking commands.
compile_command="$compile_command @SYMFILE@"
finalize_command="$finalize_command @SYMFILE@"
preload=yes
fi
case $arg in
*.la | *.lo) ;; # We handle these cases below.
force)
if test "$dlself" = no; then
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
self)
if test "$prev" = dlprefiles; then
dlself=yes
elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
dlself=yes
else
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
*)
if test "$prev" = dlfiles; then
dlfiles="$dlfiles $arg"
else
dlprefiles="$dlprefiles $arg"
fi
prev=
continue
;;
esac
;;
expsyms)
export_symbols="$arg"
if test ! -f "$arg"; then
$echo "$modename: symbol file \`$arg' does not exist"
exit $EXIT_FAILURE
fi
prev=
continue
;;
expsyms_regex)
export_symbols_regex="$arg"
prev=
continue
;;
inst_prefix)
inst_prefix_dir="$arg"
prev=
continue
;;
precious_regex)
precious_files_regex="$arg"
prev=
continue
;;
release)
release="-$arg"
prev=
continue
;;
objectlist)
if test -f "$arg"; then
save_arg=$arg
moreargs=
for fil in `cat $save_arg`
do
# moreargs="$moreargs $fil"
arg=$fil
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
pic_object=
non_pic_object=
# Read the .lo file
# If there is no directory component, then add one.
case $arg in
*/* | *\\*) . $arg ;;
*) . ./$arg ;;
esac
if test -z "$pic_object" || \
test -z "$non_pic_object" ||
test "$pic_object" = none && \
test "$non_pic_object" = none; then
$echo "$modename: cannot find name of object for \`$arg'" 1>&2
exit $EXIT_FAILURE
fi
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
libobjs="$libobjs $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
non_pic_objects="$non_pic_objects $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
fi
else
# Only an error if not doing a dry-run.
if test -z "$run"; then
$echo "$modename: \`$arg' is not a valid libtool object" 1>&2
exit $EXIT_FAILURE
else
# Dry-run case.
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"`
non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"`
libobjs="$libobjs $pic_object"
non_pic_objects="$non_pic_objects $non_pic_object"
fi
fi
done
else
$echo "$modename: link input file \`$save_arg' does not exist"
exit $EXIT_FAILURE
fi
arg=$save_arg
prev=
continue
;;
rpath | xrpath)
# We need an absolute path.
case $arg in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit $EXIT_FAILURE
;;
esac
# Canonicalise the pathname
tmp=""
while test "$arg" != "$tmp"
do
tmp=$arg
arg=`$echo "X$arg" | $Xsed -e 's%[^/.][^/.]*/\.\.%%g' -e 's%/\./%/%g' -e 's%//*%/%g' -e 's%/$%%g'`
done
if test "$prev" = rpath; then
case "$rpath " in
*" $arg "*) ;;
*) rpath="$rpath $arg" ;;
esac
else
case "$xrpath " in
*" $arg "*) ;;
*) xrpath="$xrpath $arg" ;;
esac
fi
prev=
continue
;;
xcompiler)
compiler_flags="$compiler_flags $qarg"
prev=
compile_command="$compile_command $qarg"
finalize_command="$finalize_command $qarg"
continue
;;
xlinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $wl$qarg"
prev=
compile_command="$compile_command $wl$qarg"
finalize_command="$finalize_command $wl$qarg"
continue
;;
xcclinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $qarg"
prev=
compile_command="$compile_command $qarg"
finalize_command="$finalize_command $qarg"
continue
;;
shrext)
shrext_cmds="$arg"
prev=
continue
;;
darwin_framework)
compiler_flags="$compiler_flags $arg"
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
prev=
continue
;;
*)
eval "$prev=\"\$arg\""
prev=
continue
;;
esac
fi # test -n "$prev"
prevarg="$arg"
case $arg in
-all-static)
if test -n "$link_static_flag"; then
compile_command="$compile_command $link_static_flag"
finalize_command="$finalize_command $link_static_flag"
fi
continue
;;
-allow-undefined)
# FIXME: remove this flag sometime in the future.
$echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2
continue
;;
-avoid-version)
avoid_version=yes
continue
;;
-dlopen)
prev=dlfiles
continue
;;
-dlpreopen)
prev=dlprefiles
continue
;;
-export-dynamic)
export_dynamic=yes
continue
;;
-export-symbols | -export-symbols-regex)
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: more than one -exported-symbols argument is not allowed"
exit $EXIT_FAILURE
fi
if test "X$arg" = "X-export-symbols"; then
prev=expsyms
else
prev=expsyms_regex
fi
continue
;;
-framework)
prev=darwin_framework
compiler_flags="$compiler_flags $arg"
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
continue
;;
-inst-prefix-dir)
prev=inst_prefix
continue
;;
# The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
# so, if we see these flags be careful not to treat them like -L
-L[A-Z][A-Z]*:*)
case $with_gcc/$host in
no/*-*-irix* | /*-*-irix*)
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
;;
esac
continue
;;
-L*)
dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2
exit $EXIT_FAILURE
fi
dir="$absdir"
;;
esac
case "$deplibs " in
*" -L$dir "*) ;;
*)
deplibs="$deplibs -L$dir"
lib_search_path="$lib_search_path $dir"
;;
esac
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$dir:"*) ;;
*) dllsearchpath="$dllsearchpath:$dir";;
esac
;;
esac
continue
;;
-l*)
if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
case $host in
*-*-cygwin* | *-*-pw32* | *-*-beos*)
# These systems don't actually have a C or math library (as such)
continue
;;
*-*-mingw* | *-*-os2*)
# These systems don't actually have a C library (as such)
test "X$arg" = "X-lc" && continue
;;
*-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
# Do not include libc due to us having libc/libc_r.
test "X$arg" = "X-lc" && continue
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C and math libraries are in the System framework
deplibs="$deplibs -framework System"
continue
esac
elif test "X$arg" = "X-lc_r"; then
case $host in
*-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
# Do not include libc_r directly, use -pthread flag.
continue
;;
esac
fi
deplibs="$deplibs $arg"
continue
;;
# Tru64 UNIX uses -model [arg] to determine the layout of C++
# classes, name mangling, and exception handling.
-model)
compile_command="$compile_command $arg"
compiler_flags="$compiler_flags $arg"
finalize_command="$finalize_command $arg"
prev=xcompiler
continue
;;
-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe)
compiler_flags="$compiler_flags $arg"
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
case $host:$arg in
*-*-dragonfly*:-pthread)
# pkgsrc hack to use -pthread in .la file for final linking
deplibs="$deplibs $arg"
;;
esac
continue
;;
-module)
module=yes
continue
;;
# -64, -mips[0-9] enable 64-bit mode on the SGI compiler
# -r[0-9][0-9]* specifies the processor on the SGI compiler
# -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler
# +DA*, +DD* enable 64-bit mode on the HP compiler
# -q* pass through compiler args for the IBM compiler
# -m* pass through architecture-specific compiler args for GCC
-64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
if test "$with_gcc" = "yes" ; then
compiler_flags="$compiler_flags $arg"
fi
continue
;;
-shrext)
prev=shrext
continue
;;
-no-fast-install)
fast_install=no
continue
;;
-no-install)
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# The PATH hackery in wrapper scripts is required on Windows
# in order for the loader to find any dlls it needs.
$echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2
$echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2
fast_install=no
;;
*) no_install=yes ;;
esac
continue
;;
-no-undefined)
allow_undefined=no
continue
;;
-objectlist)
prev=objectlist
continue
;;
-o) prev=output ;;
-precious-files-regex)
prev=precious_regex
continue
;;
-release)
prev=release
continue
;;
-rpath)
prev=rpath
continue
;;
-R)
prev=xrpath
continue
;;
-R*)
dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit $EXIT_FAILURE
;;
esac
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
continue
;;
-static)
# The effects of -static are defined in a previous loop.
# We used to do the same as -all-static on platforms that
# didn't have a PIC flag, but the assumption that the effects
# would be equivalent was wrong. It would break on at least
# Digital Unix and AIX.
continue
;;
-thread-safe)
thread_safe=yes
continue
;;
-version-info)
prev=vinfo
continue
;;
-version-number)
prev=vinfo
vinfo_number=yes
continue
;;
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'`
arg=
save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Wl,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'`
arg=
save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $wl$flag"
linker_flags="$linker_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Xcompiler)
prev=xcompiler
continue
;;
-Xlinker)
prev=xlinker
continue
;;
-XCClinker)
prev=xcclinker
continue
;;
# Some other compiler flag.
-* | +*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
;;
*.$objext)
# A standard object.
objs="$objs $arg"
;;
*.lo)
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
pic_object=
non_pic_object=
# Read the .lo file
# If there is no directory component, then add one.
case $arg in
*/* | *\\*) . $arg ;;
*) . ./$arg ;;
esac
if test -z "$pic_object" || \
test -z "$non_pic_object" ||
test "$pic_object" = none && \
test "$non_pic_object" = none; then
$echo "$modename: cannot find name of object for \`$arg'" 1>&2
exit $EXIT_FAILURE
fi
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
libobjs="$libobjs $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
non_pic_objects="$non_pic_objects $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
fi
else
# Only an error if not doing a dry-run.
if test -z "$run"; then
$echo "$modename: \`$arg' is not a valid libtool object" 1>&2
exit $EXIT_FAILURE
else
# Dry-run case.
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"`
non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"`
libobjs="$libobjs $pic_object"
non_pic_objects="$non_pic_objects $non_pic_object"
fi
fi
;;
*.$libext)
# An archive.
deplibs="$deplibs $arg"
old_deplibs="$old_deplibs $arg"
continue
;;
*.la)
# A libtool-controlled library.
if test "$prev" = dlfiles; then
# This library was specified with -dlopen.
dlfiles="$dlfiles $arg"
prev=
elif test "$prev" = dlprefiles; then
# The library was specified with -dlpreopen.
dlprefiles="$dlprefiles $arg"
prev=
else
deplibs="$deplibs $arg"
fi
continue
;;
# Some other compiler argument.
*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
;;
esac # arg
# Now actually substitute the argument into the commands.
if test -n "$arg"; then
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
done # argument parsing loop
if test -n "$prev"; then
$echo "$modename: the \`$prevarg' option requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
eval arg=\"$export_dynamic_flag_spec\"
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
oldlibs=
# calculate the name of the file, without its directory
outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
libobjs_save="$libobjs"
if test -n "$shlibpath_var"; then
# get the directories listed in $shlibpath_var
eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
else
shlib_search_path=
fi
eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
if test "X$output_objdir" = "X$output"; then
output_objdir="$objdir"
else
output_objdir="$output_objdir/$objdir"
fi
# Create the object directory.
if test ! -d "$output_objdir"; then
$show "$mkdir $output_objdir"
$run $mkdir $output_objdir
status=$?
if test "$status" -ne 0 && test ! -d "$output_objdir"; then
exit $status
fi
fi
# Determine the type of output
case $output in
"")
$echo "$modename: you must specify an output file" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
*.$libext) linkmode=oldlib ;;
*.lo | *.$objext) linkmode=obj ;;
*.la) linkmode=lib ;;
*) linkmode=prog ;; # Anything else should be a program.
esac
case $host in
*cygwin* | *mingw* | *pw32*)
# don't eliminate duplications in $postdeps and $predeps
duplicate_compiler_generated_deps=yes
;;
*)
duplicate_compiler_generated_deps=$duplicate_deps
;;
esac
specialdeplibs=
libs=
# Find all interdependent deplibs by searching for libraries
# that are linked more than once (e.g. -la -lb -la)
for deplib in $deplibs; do
if test "X$duplicate_deps" = "Xyes" ; then
case "$libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
libs="$libs $deplib"
done
if test "$linkmode" = lib; then
libs="$predeps $libs $compiler_lib_search_path $postdeps"
# Compute libraries that are listed more than once in $predeps
# $postdeps and mark them as special (i.e., whose duplicates are
# not to be eliminated).
pre_post_deps=
if test "X$duplicate_compiler_generated_deps" = "Xyes" ; then
for pre_post_dep in $predeps $postdeps; do
case "$pre_post_deps " in
*" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;;
esac
pre_post_deps="$pre_post_deps $pre_post_dep"
done
fi
pre_post_deps=
fi
deplibs=
newdependency_libs=
newlib_search_path=
need_relink=no # whether we're linking any uninstalled libtool libraries
notinst_deplibs= # not-installed libtool libraries
notinst_path= # paths that contain not-installed libtool libraries
case $linkmode in
lib)
passes="conv link"
for file in $dlfiles $dlprefiles; do
case $file in
*.la) ;;
*)
$echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2
exit $EXIT_FAILURE
;;
esac
done
;;
prog)
compile_deplibs=
finalize_deplibs=
alldeplibs=no
newdlfiles=
newdlprefiles=
passes="conv scan dlopen dlpreopen link"
;;
*) passes="conv"
;;
esac
for pass in $passes; do
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan"; then
libs="$deplibs"
deplibs=
fi
if test "$linkmode" = prog; then
case $pass in
dlopen) libs="$dlfiles" ;;
dlpreopen) libs="$dlprefiles" ;;
link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
esac
fi
if test "$pass" = dlopen; then
# Collect dlpreopened libraries
save_deplibs="$deplibs"
deplibs=
fi
for deplib in $libs; do
lib=
found=no
case $deplib in
-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe)
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
compiler_flags="$compiler_flags $deplib"
fi
case $host:$deplib in
*-*-dragonfly*:-pthread)
# pkgsrc hack to use -pthread in .la file for final linking
case $linkmode in
lib)
deplibs="$deplib $deplibs"
test "$pass" = conv && continue
newdependency_libs="$deplib $newdependency_libs"
;;
prog)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
if test "$pass" = scan; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
;;
esac
;;
esac
continue
;;
-l*)
if test "$linkmode" != lib && test "$linkmode" != prog; then
$echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2
continue
fi
name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
for search_ext in .la $std_shrext .so .a; do
# Search the libtool library
lib="$searchdir/lib${name}${search_ext}"
if test -f "$lib"; then
if test "$search_ext" = ".la"; then
found=yes
else
found=no
fi
break 2
fi
done
done
if test "$found" != yes; then
# deplib doesn't seem to be a libtool library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
else # deplib is a libtool library
# If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
# We need to do some special things here, and not later.
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $deplib "*)
if (${SED} -e '2q' $lib |
grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
library_names=
old_library=
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
for l in $old_library $library_names; do
ll="$l"
done
if test "X$ll" = "X$old_library" ; then # only static version available
found=no
ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
test "X$ladir" = "X$lib" && ladir="."
lib=$ladir/$old_library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
fi
fi
;;
*) ;;
esac
fi
fi
;; # -l
-L*)
case $linkmode in
lib)
deplibs="$deplib $deplibs"
test "$pass" = conv && continue
newdependency_libs="$deplib $newdependency_libs"
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
;;
prog)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
if test "$pass" = scan; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
;;
*)
$echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2
;;
esac # linkmode
continue
;; # -L
-R*)
if test "$pass" = link; then
dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
# Make sure the xrpath contains only unique directories.
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
fi
deplibs="$deplib $deplibs"
continue
;;
*.la) lib="$deplib" ;;
*.$libext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
case $linkmode in
lib)
valid_a_lib=no
case $deplibs_check_method in
match_pattern*)
set dummy $deplibs_check_method
match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
if eval $echo \"$deplib\" 2>/dev/null \
| $SED 10q \
| $EGREP "$match_pattern_regex" > /dev/null; then
valid_a_lib=yes
fi
;;
pass_all)
valid_a_lib=yes
;;
esac
if test "$valid_a_lib" != yes; then
$echo
$echo "*** Warning: Trying to link with static lib archive $deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because the file extensions .$libext of this argument makes me believe"
$echo "*** that it is just a static archive that I should not used here."
else
$echo
$echo "*** Warning: Linking the shared library $output against the"
$echo "*** static library $deplib is not portable!"
deplibs="$deplib $deplibs"
fi
continue
;;
prog)
if test "$pass" != link; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
continue
;;
esac # linkmode
;; # *.$libext
*.lo | *.$objext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
elif test "$linkmode" = prog; then
if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlopen support or we're linking statically,
# we need to preload.
newdlprefiles="$newdlprefiles $deplib"
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
newdlfiles="$newdlfiles $deplib"
fi
fi
continue
;;
%DEPLIBS%)
alldeplibs=yes
continue
;;
esac # case $deplib
if test "$found" = yes || test -f "$lib"; then :
else
$echo "$modename: cannot find the library \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
test "X$ladir" = "X$lib" && ladir="."
dlname=
dlopen=
dlpreopen=
libdir=
library_names=
old_library=
# If the library was installed with an old release of libtool,
# it will not redefine variables installed, or shouldnotlink
installed=yes
shouldnotlink=no
avoidtemprpath=
# Read the .la file
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan" ||
{ test "$linkmode" != prog && test "$linkmode" != lib; }; then
test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
fi
if test "$pass" = conv; then
# Only check for convenience libraries
deplibs="$lib $deplibs"
if test -z "$libdir"; then
if test -z "$old_library"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# It is a libtool convenience library, so add in its objects.
convenience="$convenience $ladir/$objdir/$old_library"
old_convenience="$old_convenience $ladir/$objdir/$old_library"
tmp_libs=
for deplib in $dependency_libs; do
deplibs="$deplib $deplibs"
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done
elif test "$linkmode" != prog && test "$linkmode" != lib; then
$echo "$modename: \`$lib' is not a convenience library" 1>&2
exit $EXIT_FAILURE
fi
continue
fi # $pass = conv
# Get the name of the library we link against.
linklib=
for l in $old_library $library_names; do
linklib="$l"
done
if test -z "$linklib"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# This library was specified with -dlopen.
if test "$pass" = dlopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
if test -z "$dlname" ||
test "$dlopen_support" != yes ||
test "$build_libtool_libs" = no; then
# If there is no dlname, no dlopen support or we're linking
# statically, we need to preload. We also need to preload any
# dependent libraries so libltdl's deplib preloader doesn't
# bomb out in the load deplibs phase.
dlprefiles="$dlprefiles $lib $dependency_libs"
else
newdlfiles="$newdlfiles $lib"
fi
continue
fi # $pass = dlopen
# We need an absolute path.
case $ladir in
[\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
*)
abs_ladir=`cd "$ladir" && pwd`
if test -z "$abs_ladir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2
$echo "$modename: passing it literally to the linker, although it might fail" 1>&2
abs_ladir="$ladir"
fi
;;
esac
laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
# Find the relevant object directory and library name.
if test "X$installed" = Xyes; then
if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
$echo "$modename: warning: library \`$lib' was moved." 1>&2
dir="$ladir"
absdir="$abs_ladir"
libdir="$abs_ladir"
else
dir="$libdir"
absdir="$libdir"
fi
test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes
else
if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then
dir="$ladir"
absdir="$abs_ladir"
# Remove this search path later
notinst_path="$notinst_path $abs_ladir"
else
dir="$ladir/$objdir"
absdir="$abs_ladir/$objdir"
# Remove this search path later
notinst_path="$notinst_path $abs_ladir"
fi
fi # $installed = yes
name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
# This library was specified with -dlpreopen.
if test "$pass" = dlpreopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# Prefer using a static library (so that no silly _DYNAMIC symbols
# are required to link).
if test -n "$old_library"; then
newdlprefiles="$newdlprefiles $dir/$old_library"
# Otherwise, use the dlname, so that lt_dlopen finds it.
elif test -n "$dlname"; then
newdlprefiles="$newdlprefiles $dir/$dlname"
else
newdlprefiles="$newdlprefiles $dir/$linklib"
fi
fi # $pass = dlpreopen
if test -z "$libdir"; then
# Link the convenience library
if test "$linkmode" = lib; then
deplibs="$dir/$old_library $deplibs"
elif test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$dir/$old_library $compile_deplibs"
finalize_deplibs="$dir/$old_library $finalize_deplibs"
else
deplibs="$lib $deplibs" # used for prog,scan pass
fi
continue
fi
if test "$linkmode" = prog && test "$pass" != link; then
newlib_search_path="$newlib_search_path $ladir"
deplibs="$lib $deplibs"
linkalldeplibs=no
if test "$link_all_deplibs" != no || test -z "$library_names" ||
test "$build_libtool_libs" = no; then
linkalldeplibs=yes
fi
tmp_libs=
for deplib in $dependency_libs; do
case $deplib in
-L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test
esac
# Need to link against all dependency_libs?
if test "$linkalldeplibs" = yes; then
deplibs="$deplib $deplibs"
else
# Need to hardcode shared library paths
# or/and link against static libraries
newdependency_libs="$deplib $newdependency_libs"
fi
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done # for deplib
continue
fi # $linkmode = prog...
if test "$linkmode,$pass" = "prog,link"; then
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
# We need to hardcode the library path
if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then
# Make sure the rpath contains only unique directories.
case "$temp_rpath " in
*" $dir "*) ;;
*" $absdir "*) ;;
*) temp_rpath="$temp_rpath $dir" ;;
esac
fi
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi # $linkmode,$pass = prog,link...
if test "$alldeplibs" = yes &&
{ test "$deplibs_check_method" = pass_all ||
{ test "$build_libtool_libs" = yes &&
test -n "$library_names"; }; }; then
# We only need to search for static libraries
continue
fi
fi
link_static=no # Whether the deplib will be linked statically
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
if test "$installed" = no; then
notinst_deplibs="$notinst_deplibs $lib"
need_relink=yes
fi
# This is a shared library
# Warn about portability, can't link against -module's on
# some systems (darwin)
if test "$shouldnotlink" = yes && test "$pass" = link ; then
$echo
if test "$linkmode" = prog; then
$echo "*** Warning: Linking the executable $output against the loadable module"
else
$echo "*** Warning: Linking the shared library $output against the loadable module"
fi
$echo "*** $linklib is not portable!"
fi
if test "$linkmode" = lib &&
test "$hardcode_into_libs" = yes; then
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi
if test -n "$old_archive_from_expsyms_cmds"; then
# figure out the soname
set dummy $library_names
realname="$2"
shift; shift
libname=`eval \\$echo \"$libname_spec\"`
# use dlname if we got it. it's perfectly good, no?
if test -n "$dlname"; then
soname="$dlname"
elif test -n "$soname_spec"; then
# bleh windows
case $host in
*cygwin* | mingw*)
major=`expr $current - $age`
versuffix="-$major"
;;
esac
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
# Make a new name for the extract_expsyms_cmds to use
soroot="$soname"
soname=`$echo $soroot | ${SED} -e 's/^.*\///'`
newlib="libimp-`$echo $soname | ${SED} 's/^lib//;s/\.dll$//'`.a"
# If the library has no export list, then create one now
if test -f "$output_objdir/$soname-def"; then :
else
$show "extracting exported symbol list from \`$soname'"
save_ifs="$IFS"; IFS='~'
cmds=$extract_expsyms_cmds
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Create $newlib
if test -f "$output_objdir/$newlib"; then :; else
$show "generating import library for \`$soname'"
save_ifs="$IFS"; IFS='~'
cmds=$old_archive_from_expsyms_cmds
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# make sure the library variables are pointing to the new library
dir=$output_objdir
linklib=$newlib
fi # test -n "$old_archive_from_expsyms_cmds"
if test "$linkmode" = prog || test "$mode" != relink; then
add_shlibpath=
add_dir=
add=
lib_linked=yes
case $hardcode_action in
immediate | unsupported)
if test "$hardcode_direct" = no; then
add="$dir/$linklib"
case $host in
*-*-sco3.2v5* ) add_dir="-L$dir" ;;
*-*-darwin* )
# if the lib is a module then we can not link against
# it, someone is ignoring the new warnings I added
if /usr/bin/file -L $add 2> /dev/null | $EGREP "bundle" >/dev/null ; then
$echo "** Warning, lib $linklib is a module, not a shared library"
if test -z "$old_library" ; then
$echo
$echo "** And there doesn't seem to be a static archive available"
$echo "** The link will probably fail, sorry"
else
add="$dir/$old_library"
fi
fi
esac
elif test "$hardcode_minus_L" = no; then
case $host in
*-*-sunos*) add_shlibpath="$dir" ;;
esac
add_dir="-L$dir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = no; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
relink)
if test "$hardcode_direct" = yes; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$dir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case "$libdir" in
[\\/]*)
add_dir="$add_dir -L$inst_prefix_dir$libdir"
;;
esac
fi
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
*) lib_linked=no ;;
esac
if test "$lib_linked" != yes; then
$echo "$modename: configuration error: unsupported hardcode properties"
exit $EXIT_FAILURE
fi
if test -n "$add_shlibpath"; then
case :$compile_shlibpath: in
*":$add_shlibpath:"*) ;;
*) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
esac
fi
if test "$linkmode" = prog; then
test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
test -n "$add" && compile_deplibs="$add $compile_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
if test "$hardcode_direct" != yes && \
test "$hardcode_minus_L" != yes && \
test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
fi
fi
fi
if test "$linkmode" = prog || test "$mode" = relink; then
add_shlibpath=
add_dir=
add=
# Finalize command for both is simple: just hardcode it.
if test "$hardcode_direct" = yes; then
add="$libdir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$libdir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
add="-l$name"
elif test "$hardcode_automatic" = yes; then
if test -n "$inst_prefix_dir" &&
test -f "$inst_prefix_dir$libdir/$linklib" ; then
add="$inst_prefix_dir$libdir/$linklib"
else
add="$libdir/$linklib"
fi
else
# We cannot seem to hardcode it, guess we'll fake it.
add_dir="-L$libdir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case "$libdir" in
[\\/]*)
add_dir="$add_dir -L$inst_prefix_dir$libdir"
;;
esac
fi
add="-l$name"
fi
if test "$linkmode" = prog; then
test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
fi
fi
elif test "$linkmode" = prog; then
# Here we assume that one of hardcode_direct or hardcode_minus_L
# is not unsupported. This is valid on all known static and
# shared platforms.
if test "$hardcode_direct" != unsupported; then
test -n "$old_library" && linklib="$old_library"
compile_deplibs="$dir/$linklib $compile_deplibs"
finalize_deplibs="$dir/$linklib $finalize_deplibs"
else
compile_deplibs="-l$name -L$dir $compile_deplibs"
finalize_deplibs="-l$name -L$dir $finalize_deplibs"
fi
elif test "$build_libtool_libs" = yes; then
# Not a shared library
if test "$deplibs_check_method" != pass_all; then
# We're trying link a shared library against a static one
# but the system doesn't support it.
# Just print a warning and add the library to dependency_libs so
# that the program can be linked against the static library.
$echo
$echo "*** Warning: This system can not link to static lib archive $lib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have."
if test "$module" = yes; then
$echo "*** But as you try to build a module library, libtool will still create "
$echo "*** a static module, that should work as long as the dlopening application"
$echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
if test -z "$global_symbol_pipe"; then
$echo
$echo "*** However, this would only work if libtool was able to extract symbol"
$echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
$echo "*** not find such a program. So, this module is probably useless."
$echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
else
convenience="$convenience $dir/$old_library"
old_convenience="$old_convenience $dir/$old_library"
deplibs="$dir/$old_library $deplibs"
link_static=yes
fi
fi # link shared/static library?
if test "$linkmode" = lib; then
if test -n "$dependency_libs" &&
{ test "$hardcode_into_libs" != yes ||
test "$build_old_libs" = yes ||
test "$link_static" = yes; }; then
# Extract -R from dependency_libs
temp_deplibs=
for libdir in $dependency_libs; do
case $libdir in
-R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'`
case " $xrpath " in
*" $temp_xrpath "*) ;;
*) xrpath="$xrpath $temp_xrpath";;
esac;;
*) temp_deplibs="$temp_deplibs $libdir";;
esac
done
dependency_libs="$temp_deplibs"
fi
newlib_search_path="$newlib_search_path $absdir"
# Link against this library
test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
# ... and its dependency_libs
tmp_libs=
for deplib in $dependency_libs; do
newdependency_libs="$deplib $newdependency_libs"
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done
if test "$link_all_deplibs" != no; then
# Add the search paths of all dependency libraries
for deplib in $dependency_libs; do
case $deplib in
-L*) path="$deplib" ;;
*.la)
dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$deplib" && dir="."
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
absdir="$dir"
fi
;;
esac
if grep "^installed=no" $deplib > /dev/null; then
path="$absdir/$objdir"
else
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
if test "$absdir" != "$libdir"; then
$echo "$modename: warning: \`$deplib' seems to be moved" 1>&2
fi
path="$absdir"
fi
depdepl=
case $host in
*-*-darwin*)
# we do not want to link against static libs,
# but need to link against shared
eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
if test -n "$deplibrary_names" ; then
for tmp in $deplibrary_names ; do
depdepl=$tmp
done
if test -f "$path/$depdepl" ; then
depdepl="$path/$depdepl"
fi
# do not add paths which are already there
case " $newlib_search_path " in
*" $path "*) ;;
*) newlib_search_path="$newlib_search_path $path";;
esac
fi
path=""
;;
*)
path="-L$path"
;;
esac
;;
-l*)
case $host in
*-*-darwin*)
# Again, we only want to link against shared libraries
eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"`
for tmp in $newlib_search_path ; do
if test -f "$tmp/lib$tmp_libs.dylib" ; then
eval depdepl="$tmp/lib$tmp_libs.dylib"
break
fi
done
path=""
;;
*) continue ;;
esac
;;
*) continue ;;
esac
case " $deplibs " in
*" $path "*) ;;
*) deplibs="$path $deplibs" ;;
esac
case " $deplibs " in
*" $depdepl "*) ;;
*) deplibs="$depdepl $deplibs" ;;
esac
done
fi # link_all_deplibs != no
fi # linkmode = lib
done # for deplib in $libs
dependency_libs="$newdependency_libs"
if test "$pass" = dlpreopen; then
# Link the dlpreopened libraries before other libraries
for deplib in $save_deplibs; do
deplibs="$deplib $deplibs"
done
fi
if test "$pass" != dlopen; then
if test "$pass" != conv; then
# Make sure lib_search_path contains only unique directories.
lib_search_path=
for dir in $newlib_search_path; do
case "$lib_search_path " in
*" $dir "*) ;;
*) lib_search_path="$lib_search_path $dir" ;;
esac
done
newlib_search_path=
fi
if test "$linkmode,$pass" != "prog,link"; then
vars="deplibs"
else
vars="compile_deplibs finalize_deplibs"
fi
for var in $vars dependency_libs; do
# Add libraries to $var in reverse order
eval tmp_libs=\"\$$var\"
new_libs=
for deplib in $tmp_libs; do
# FIXME: Pedantically, this is the right thing to do, so
# that some nasty dependency loop isn't accidentally
# broken:
#new_libs="$deplib $new_libs"
# Pragmatically, this seems to cause very few problems in
# practice:
case $deplib in
-L*) new_libs="$deplib $new_libs" ;;
-R*) ;;
*)
# And here is the reason: when a library appears more
# than once as an explicit dependence of a library, or
# is implicitly linked in more than once by the
# compiler, it is considered special, and multiple
# occurrences thereof are not removed. Compare this
# with having the same library being listed as a
# dependency of multiple other libraries: in this case,
# we know (pedantically, we assume) the library does not
# need to be listed more than once, so we keep only the
# last copy. This is not always right, but it is rare
# enough that we require users that really mean to play
# such unportable linking tricks to link the library
# using -Wl,-lname, so that libtool does not consider it
# for duplicate removal.
case " $specialdeplibs " in
*" $deplib "*) new_libs="$deplib $new_libs" ;;
*)
case " $new_libs " in
*" $deplib "*) ;;
*) new_libs="$deplib $new_libs" ;;
esac
;;
esac
;;
esac
done
tmp_libs=
for deplib in $new_libs; do
case $deplib in
-L*)
case " $tmp_libs " in
*" $deplib "*) ;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
done
eval $var=\"$tmp_libs\"
done # for var
fi
# Last step: remove runtime libs from dependency_libs
# (they stay in deplibs)
tmp_libs=
for i in $dependency_libs ; do
case " $predeps $postdeps $compiler_lib_search_path " in
*" $i "*)
i=""
;;
esac
if test -n "$i" ; then
tmp_libs="$tmp_libs $i"
fi
done
dependency_libs=$tmp_libs
done # for pass
if test "$linkmode" = prog; then
dlfiles="$newdlfiles"
dlprefiles="$newdlprefiles"
fi
case $linkmode in
oldlib)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for archives" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info/-version-number' is ignored for archives" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for archives" 1>&2
fi
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2
fi
# Now set the variables for building old libraries.
build_libtool_libs=no
oldlibs="$output"
objs="$objs$old_deplibs"
;;
lib)
# Make sure we only generate libraries of the form `libNAME.la'.
case $outputname in
lib*)
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
eval shared_ext=\"$shrext_cmds\"
eval libname=\"$libname_spec\"
;;
*)
if test "$module" = no; then
$echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test "$need_lib_prefix" != no; then
# Add the "lib" prefix for modules if required
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
eval shared_ext=\"$shrext_cmds\"
eval libname=\"$libname_spec\"
else
libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
fi
;;
esac
if test -n "$objs"; then
if test "$deplibs_check_method" != pass_all; then
$echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1
exit $EXIT_FAILURE
else
$echo
$echo "*** Warning: Linking the shared library $output against the non-libtool"
$echo "*** objects $objs is not portable!"
libobjs="$libobjs $objs"
fi
fi
if test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2
fi
set dummy $rpath
if test "$#" -gt 2; then
$echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2
fi
install_libdir="$2"
oldlibs=
if test -z "$rpath"; then
if test "$build_libtool_libs" = yes; then
# Building a libtool convenience library.
# Some compilers have problems with a `.al' extension so
# convenience libraries should have the same extension an
# archive normally would.
oldlibs="$output_objdir/$libname.$libext $oldlibs"
build_libtool_libs=convenience
build_old_libs=yes
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info/-version-number' is ignored for convenience libraries" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2
fi
else
# Parse the version information argument.
save_ifs="$IFS"; IFS=':'
set dummy $vinfo 0 0 0
IFS="$save_ifs"
if test -n "$8"; then
$echo "$modename: too many parameters to \`-version-info'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# convert absolute version numbers to libtool ages
# this retains compatibility with .la files and attempts
# to make the code below a bit more comprehensible
case $vinfo_number in
yes)
number_major="$2"
number_minor="$3"
number_revision="$4"
#
# There are really only two kinds -- those that
# use the current revision as the major version
# and those that subtract age and use age as
# a minor version. But, then there is irix
# which has an extra 1 added just for fun
#
case $version_type in
darwin|linux|osf|windows)
current=`expr $number_major + $number_minor`
age="$number_minor"
revision="$number_revision"
;;
freebsd-aout|freebsd-elf|sunos)
current="$number_major"
revision="$number_minor"
age="0"
;;
irix|nonstopux)
current=`expr $number_major + $number_minor - 1`
age="$number_minor"
revision="$number_minor"
;;
esac
;;
no)
current="$2"
revision="$3"
age="$4"
;;
esac
# Check that each of the things are valid numbers.
case $current in
0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
*)
$echo "$modename: CURRENT \`$current' must be a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
case $revision in
0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
*)
$echo "$modename: REVISION \`$revision' must be a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
case $age in
0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
*)
$echo "$modename: AGE \`$age' must be a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
if test "$age" -gt "$current"; then
$echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
fi
# Calculate the version variables.
major=
versuffix=
versuffix2=
verstring=
case $version_type in
none) ;;
darwin)
# Like Linux, but with the current version available in
# verstring for coding it into the library header
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
# Darwin ld doesn't like 0 for these options...
minor_current=`expr $current + 1`
verstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision"
;;
freebsd-aout)
major=".$current"
versuffix=".$current.$revision";
;;
freebsd-elf)
major=".$current"
versuffix=".$current";
;;
irix | nonstopux)
major=`expr $current - $age + 1`
case $version_type in
nonstopux) verstring_prefix=nonstopux ;;
*) verstring_prefix=sgi ;;
esac
verstring="$verstring_prefix$major.$revision"
# Add in all the interfaces that we are compatible with.
loop=$revision
while test "$loop" -ne 0; do
iface=`expr $revision - $loop`
loop=`expr $loop - 1`
verstring="$verstring_prefix$major.$iface:$verstring"
done
# Before this point, $major must not contain `.'.
major=.$major
versuffix="$major.$revision"
;;
linux)
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
versuffix2="$major.$age"
;;
osf)
major=.`expr $current - $age`
versuffix=".$current.$age.$revision"
verstring="$current.$age.$revision"
# Add in all the interfaces that we are compatible with.
loop=$age
while test "$loop" -ne 0; do
iface=`expr $current - $loop`
loop=`expr $loop - 1`
verstring="$verstring:${iface}.0"
done
# Make executables depend on our current version.
verstring="$verstring:${current}.0"
;;
sunos)
major=".$current"
versuffix=".$current.$revision"
;;
windows)
# Use '-' rather than '.', since we only want one
# extension on DOS 8.3 filesystems.
major=`expr $current - $age`
versuffix="-$major"
;;
*)
$echo "$modename: unknown library version type \`$version_type'" 1>&2
$echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit $EXIT_FAILURE
;;
esac
# Clear the version info if we defaulted, and they specified a release.
if test -z "$vinfo" && test -n "$release"; then
major=
case $version_type in
darwin)
# we can't check for "0.0" in archive_cmds due to quoting
# problems, so we reset it completely
verstring=
;;
*)
verstring="0.0"
;;
esac
if test "$need_version" = no; then
versuffix=
else
versuffix=".0.0"
fi
fi
# Remove version info from name if versioning should be avoided
if test "$avoid_version" = yes && test "$need_version" = no; then
major=
versuffix=
versuffix2=
verstring=""
fi
# Check to see if the archive will have undefined symbols.
if test "$allow_undefined" = yes; then
if test "$allow_undefined_flag" = unsupported; then
$echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2
build_libtool_libs=no
build_old_libs=yes
fi
else
# Don't allow undefined symbols.
allow_undefined_flag="$no_undefined_flag"
fi
fi
if test "$mode" != relink; then
# Remove our outputs, but don't remove object files since they
# may have been created when compiling PIC objects.
removelist=
tempremovelist=`$echo "$output_objdir/*"`
for p in $tempremovelist; do
case $p in
*.$objext)
;;
$output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*)
if test "X$precious_files_regex" != "X"; then
if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
then
continue
fi
fi
removelist="$removelist $p"
;;
*) ;;
esac
done
if test -n "$removelist"; then
$show "${rm}r $removelist"
$run ${rm}r $removelist
fi
fi
# Now set the variables for building old libraries.
if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
oldlibs="$oldlibs $output_objdir/$libname.$libext"
# Transform .lo files to .o files.
oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
fi
# Eliminate all temporary directories.
for path in $notinst_path; do
lib_search_path=`$echo "$lib_search_path " | ${SED} -e 's% $path % %g'`
deplibs=`$echo "$deplibs " | ${SED} -e 's% -L$path % %g'`
dependency_libs=`$echo "$dependency_libs " | ${SED} -e 's% -L$path % %g'`
done
if test -n "$xrpath"; then
# If the user specified any rpath flags, then add them.
temp_xrpath=
for libdir in $xrpath; do
temp_xrpath="$temp_xrpath -R$libdir"
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then
dependency_libs="$temp_xrpath $dependency_libs"
fi
fi
# Make sure dlfiles contains only unique files that won't be dlpreopened
old_dlfiles="$dlfiles"
dlfiles=
for lib in $old_dlfiles; do
case " $dlprefiles $dlfiles " in
*" $lib "*) ;;
*) dlfiles="$dlfiles $lib" ;;
esac
done
# Make sure dlprefiles contains only unique files
old_dlprefiles="$dlprefiles"
dlprefiles=
for lib in $old_dlprefiles; do
case "$dlprefiles " in
*" $lib "*) ;;
*) dlprefiles="$dlprefiles $lib" ;;
esac
done
if test "$build_libtool_libs" = yes; then
if test -n "$rpath"; then
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
# these systems don't actually have a c library (as such)!
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C library is in the System framework
deplibs="$deplibs -framework System"
;;
*-*-netbsd*)
# Don't link with libc until the a.out ld.so is fixed.
;;
*-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
# Do not include libc due to us having libc/libc_r.
test "X$arg" = "X-lc" && continue
;;
*)
# Add libc to deplibs on all other systems if necessary.
if test "$build_libtool_need_lc" = "yes"; then
deplibs="$deplibs -lc"
fi
;;
esac
fi
# Transform deplibs into only deplibs that can be linked in shared.
name_save=$name
libname_save=$libname
release_save=$release
versuffix_save=$versuffix
major_save=$major
# I'm not sure if I'm treating the release correctly. I think
# release should show up in the -l (ie -lgmp5) so we don't want to
# add it in twice. Is that correct?
release=""
versuffix=""
major=""
newdeplibs=
droppeddeps=no
case $deplibs_check_method in
pass_all)
# Don't check for shared/static. Everything works.
# This might be a little naive. We might want to check
# whether the library exists or not. But this is on
# osf3 & osf4 and I'm not really sure... Just
# implementing what was already the behavior.
newdeplibs=$deplibs
;;
test_compile)
# This code stresses the "libraries are programs" paradigm to its
# limits. Maybe even breaks it. We compile a program, linking it
# against the deplibs as a proxy for the library. Then we can check
# whether they linked in statically or dynamically with ldd.
$rm conftest.c
cat > conftest.c <<EOF
int main() { return 0; }
EOF
$rm conftest
$LTCC -o conftest conftest.c $deplibs
if test "$?" -eq 0 ; then
ldd_output=`ldd conftest`
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" -ne "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
newdeplibs="$newdeplibs $i"
i=""
;;
esac
fi
if test -n "$i" ; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
$echo
$echo "*** Warning: dynamic linker does not accept needed library $i."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which I believe you do not have"
$echo "*** because a test_compile did reveal that the linker did not use it for"
$echo "*** its dynamic dependency list that programs get resolved with at runtime."
fi
fi
else
newdeplibs="$newdeplibs $i"
fi
done
else
# Error occurred in the first compile. Let's try to salvage
# the situation: Compile a separate program for each library.
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" != "0"; then
$rm conftest
$LTCC -o conftest conftest.c $i
# Did it work?
if test "$?" -eq 0 ; then
ldd_output=`ldd conftest`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
newdeplibs="$newdeplibs $i"
i=""
;;
esac
fi
if test -n "$i" ; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
$echo
$echo "*** Warning: dynamic linker does not accept needed library $i."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because a test_compile did reveal that the linker did not use this one"
$echo "*** as a dynamic dependency that programs can get resolved with at runtime."
fi
fi
else
droppeddeps=yes
$echo
$echo "*** Warning! Library $i is needed by this library but I was not able to"
$echo "*** make it link in! You will probably need to install it or some"
$echo "*** library that it depends on before this library will be fully"
$echo "*** functional. Installing it before continuing would be even better."
fi
else
newdeplibs="$newdeplibs $i"
fi
done
fi
;;
file_magic*)
set dummy $deplibs_check_method
file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" != "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
# Follow soft links.
if ls -lLd "$potent_lib" 2>/dev/null \
| grep " -> " >/dev/null; then
continue
fi
# The statement above tries to avoid entering an
# endless loop below, in case of cyclic links.
# We might still enter an endless loop, since a link
# loop can be closed while we follow links,
# but so what?
potlib="$potent_lib"
while test -h "$potlib" 2>/dev/null; do
potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'`
case $potliblink in
[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
*) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
esac
done
if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \
| ${SED} 10q \
| $EGREP "$file_magic_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
$echo
$echo "*** Warning: linker path does not have real file for library $a_deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$echo "*** with $libname but no candidates were found. (...for file magic test)"
else
$echo "*** with $libname and none of the candidates passed a file format test"
$echo "*** using a file magic. Last file checked: $potlib"
fi
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
match_pattern*)
set dummy $deplibs_check_method
match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test -n "$name" && test "$name" != "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
potlib="$potent_lib" # see symlink-check above in file_magic test
if eval $echo \"$potent_lib\" 2>/dev/null \
| ${SED} 10q \
| $EGREP "$match_pattern_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
$echo
$echo "*** Warning: linker path does not have real file for library $a_deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$echo "*** with $libname but no candidates were found. (...for regex pattern test)"
else
$echo "*** with $libname and none of the candidates passed a file format test"
$echo "*** using a regex pattern. Last file checked: $potlib"
fi
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
none | unknown | *)
newdeplibs=""
tmp_deplibs=`$echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
-e 's/ -[LR][^ ]*//g'`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
for i in $predeps $postdeps ; do
# can't use Xsed below, because $i might contain '/'
tmp_deplibs=`$echo "X $tmp_deplibs" | ${SED} -e "1s,^X,," -e "s,$i,,"`
done
fi
if $echo "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' \
| grep . >/dev/null; then
$echo
if test "X$deplibs_check_method" = "Xnone"; then
$echo "*** Warning: inter-library dependencies are not supported in this platform."
else
$echo "*** Warning: inter-library dependencies are not known to be supported."
fi
$echo "*** All declared inter-library dependencies are being dropped."
droppeddeps=yes
fi
;;
esac
versuffix=$versuffix_save
major=$major_save
release=$release_save
libname=$libname_save
name=$name_save
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'`
;;
esac
if test "$droppeddeps" = yes; then
if test "$module" = yes; then
$echo
$echo "*** Warning: libtool could not satisfy all declared inter-library"
$echo "*** dependencies of module $libname. Therefore, libtool will create"
$echo "*** a static module, that should work as long as the dlopening"
$echo "*** application is linked with the -dlopen flag."
if test -z "$global_symbol_pipe"; then
$echo
$echo "*** However, this would only work if libtool was able to extract symbol"
$echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
$echo "*** not find such a program. So, this module is probably useless."
$echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
else
$echo "*** The inter-library dependencies that have been dropped here will be"
$echo "*** automatically added whenever a program is linked with this library"
$echo "*** or is declared to -dlopen it."
if test "$allow_undefined" = no; then
$echo
$echo "*** Since this library must not contain undefined symbols,"
$echo "*** because either the platform does not support them or"
$echo "*** it was explicitly requested with -no-undefined,"
$echo "*** libtool will only create a static version of it."
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
fi
fi
# Done checking deplibs!
deplibs=$newdeplibs
fi
# All the library-specific variables (install_libdir is set above).
library_names=
old_library=
dlname=
# Test again, we may have decided not to build it any more
if test "$build_libtool_libs" = yes; then
if test "$hardcode_into_libs" = yes; then
# Hardcode the library paths
hardcode_libdirs=
dep_rpath=
rpath="$finalize_rpath"
test "$mode" != relink && rpath="$compile_rpath$rpath"
for libdir in $rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
dep_rpath="$dep_rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
if test -n "$hardcode_libdir_flag_spec_ld"; then
eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\"
else
eval dep_rpath=\"$hardcode_libdir_flag_spec\"
fi
fi
if test -n "$runpath_var" && test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
fi
test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
fi
shlibpath="$finalize_shlibpath"
test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
if test -n "$shlibpath"; then
eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
fi
# Get the real and link names of the library.
eval shared_ext=\"$shrext_cmds\"
eval library_names=\"$library_names_spec\"
set dummy $library_names
realname="$2"
shift; shift
if test -n "$soname_spec"; then
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
if test -z "$dlname"; then
dlname=$soname
fi
lib="$output_objdir/$realname"
for link
do
linknames="$linknames $link"
done
# Use standard objects if they are pic
test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
$show "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
cmds=$export_symbols_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
if len=`expr "X$cmd" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
$show "$cmd"
$run eval "$cmd" || exit $?
skipped_export=false
else
# The command line is too long to execute in one step.
$show "using reloadable object file for export list..."
skipped_export=:
fi
done
IFS="$save_ifs"
if test -n "$export_symbols_regex"; then
$show "$EGREP -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\""
$run eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
$show "$mv \"${export_symbols}T\" \"$export_symbols\""
$run eval '$mv "${export_symbols}T" "$export_symbols"'
fi
fi
fi
if test -n "$export_symbols" && test -n "$include_expsyms"; then
$run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"'
fi
tmp_deplibs=
for test_deplib in $deplibs; do
case " $convenience " in
*" $test_deplib "*) ;;
*)
tmp_deplibs="$tmp_deplibs $test_deplib"
;;
esac
done
deplibs="$tmp_deplibs"
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${outputname}x"
generated="$generated $gentop"
func_extract_archives $gentop $convenience
libobjs="$libobjs $func_extract_archives_result"
fi
fi
if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
eval flag=\"$thread_safe_flag_spec\"
linker_flags="$linker_flags $flag"
fi
# Make a backup of the uninstalled library when relinking
if test "$mode" = relink; then
$run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $?
fi
# Do each of the archive commands.
if test "$module" = yes && test -n "$module_cmds" ; then
if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
eval test_cmds=\"$module_expsym_cmds\"
cmds=$module_expsym_cmds
else
eval test_cmds=\"$module_cmds\"
cmds=$module_cmds
fi
else
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
eval test_cmds=\"$archive_expsym_cmds\"
cmds=$archive_expsym_cmds
else
eval test_cmds=\"$archive_cmds\"
cmds=$archive_cmds
fi
fi
if test "X$skipped_export" != "X:" && len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
:
else
# The command line is too long to link in one step, link piecewise.
$echo "creating reloadable object files..."
# Save the value of $output and $libobjs because we want to
# use them later. If we have whole_archive_flag_spec, we
# want to use save_libobjs as it was before
# whole_archive_flag_spec was expanded, because we can't
# assume the linker understands whole_archive_flag_spec.
# This may have to be revisited, in case too many
# convenience libraries get linked in and end up exceeding
# the spec.
if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
fi
save_output=$output
output_la=`$echo "X$output" | $Xsed -e "$basename"`
# Clear the reloadable object creation command queue and
# initialize k to one.
test_cmds=
concat_cmds=
objlist=
delfiles=
last_robj=
k=1
output=$output_objdir/$output_la-${k}.$objext
# Loop over the list of objects to be linked.
for obj in $save_libobjs
do
eval test_cmds=\"$reload_cmds $objlist $last_robj\"
if test "X$objlist" = X ||
{ len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len"; }; then
objlist="$objlist $obj"
else
# The command $test_cmds is almost too long, add a
# command to the queue.
if test "$k" -eq 1 ; then
# The first file doesn't have a previous command to add.
eval concat_cmds=\"$reload_cmds $objlist $last_robj\"
else
# All subsequent reloadable object files will link in
# the last one created.
eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\"
fi
last_robj=$output_objdir/$output_la-${k}.$objext
k=`expr $k + 1`
output=$output_objdir/$output_la-${k}.$objext
objlist=$obj
len=1
fi
done
# Handle the remaining objects by creating one last
# reloadable object file. All subsequent reloadable object
# files will link in the last one created.
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\"
if ${skipped_export-false}; then
$show "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
libobjs=$output
# Append the command to create the export file.
eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\"
fi
# Set up a command to remove the reloadable object files
# after they are used.
i=0
while test "$i" -lt "$k"
do
i=`expr $i + 1`
delfiles="$delfiles $output_objdir/$output_la-${i}.$objext"
done
$echo "creating a temporary reloadable object file: $output"
# Loop through the commands generated above and execute them.
save_ifs="$IFS"; IFS='~'
for cmd in $concat_cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
libobjs=$output
# Restore the value of output.
output=$save_output
if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
fi
# Expand the library linking commands again to reset the
# value of $libobjs for piecewise linking.
# Do each of the archive commands.
if test "$module" = yes && test -n "$module_cmds" ; then
if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
cmds=$module_expsym_cmds
else
cmds=$module_cmds
fi
else
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
cmds=$archive_expsym_cmds
else
cmds=$archive_cmds
fi
fi
# Append the command to remove the reloadable object files
# to the just-reset $cmds.
eval cmds=\"\$cmds~\$rm $delfiles\"
fi
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Restore the uninstalled library and exit
if test "$mode" = relink; then
$run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
exit $EXIT_SUCCESS
fi
# Create links to the real library.
for linkname in $linknames; do
if test "$realname" != "$linkname"; then
$show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $?
fi
done
# If -module or -export-dynamic was specified, set the dlname.
if test "$module" = yes || test "$export_dynamic" = yes; then
# On all known operating systems, these are identical.
dlname="$soname"
fi
fi
;;
obj)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for objects" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for objects" 1>&2
fi
case $output in
*.lo)
if test -n "$objs$old_deplibs"; then
$echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
exit $EXIT_FAILURE
fi
libobj="$output"
obj=`$echo "X$output" | $Xsed -e "$lo2o"`
;;
*)
libobj=
obj="$output"
;;
esac
# Delete the old objects.
$run $rm $obj $libobj
# Objects from convenience libraries. This assumes
# single-version convenience libraries. Whenever we create
# different ones for PIC/non-PIC, this we'll have to duplicate
# the extraction.
reload_conv_objs=
gentop=
# reload_cmds runs $LD directly, so let us get rid of
# -Wl from whole_archive_flag_spec
wl=
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${obj}x"
generated="$generated $gentop"
func_extract_archives $gentop $convenience
reload_conv_objs="$reload_objs $func_extract_archives_result"
fi
fi
# Create the old-style object.
reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
output="$obj"
cmds=$reload_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Exit if we aren't doing a library object file.
if test -z "$libobj"; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit $EXIT_SUCCESS
fi
if test "$build_libtool_libs" != yes; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
# Create an invalid libtool object if no PIC, so that we don't
# accidentally link it into a program.
# $show "echo timestamp > $libobj"
# $run eval "echo timestamp > $libobj" || exit $?
exit $EXIT_SUCCESS
fi
if test -n "$pic_flag" || test "$pic_mode" != default; then
# Only do commands if we really have different PIC objects.
reload_objs="$libobjs $reload_conv_objs"
output="$libobj"
cmds=$reload_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit $EXIT_SUCCESS
;;
prog)
case $host in
*cygwin*) output=`$echo $output | ${SED} -e 's,.exe$,,;s,$,.exe,'` ;;
esac
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for programs" 1>&2
fi
if test "$preload" = yes; then
if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown &&
test "$dlopen_self_static" = unknown; then
$echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
fi
fi
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
;;
esac
case $host in
*darwin*)
# Don't allow lazy linking, it breaks C++ global constructors
if test "$tagname" = CXX ; then
compile_command="$compile_command ${wl}-bind_at_load"
finalize_command="$finalize_command ${wl}-bind_at_load"
fi
;;
esac
compile_command="$compile_command $compile_deplibs"
finalize_command="$finalize_command $finalize_deplibs"
if test -n "$rpath$xrpath"; then
# If the user specified any rpath flags, then add them.
for libdir in $rpath $xrpath; do
# This is the magic to use -rpath.
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
fi
# Now hardcode the library paths
rpath=
hardcode_libdirs=
for libdir in $compile_rpath $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$libdir:"*) ;;
*) dllsearchpath="$dllsearchpath:$libdir";;
esac
;;
esac
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
compile_rpath="$rpath"
rpath=
hardcode_libdirs=
for libdir in $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$finalize_perm_rpath " in
*" $libdir "*) ;;
*) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
finalize_rpath="$rpath"
if test -n "$libobjs" && test "$build_old_libs" = yes; then
# Transform all the library objects into standard objects.
compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
fi
dlsyms=
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
if test -n "$NM" && test -n "$global_symbol_pipe"; then
dlsyms="${outputname}S.c"
else
$echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2
fi
fi
if test -n "$dlsyms"; then
case $dlsyms in
"") ;;
*.c)
# Discover the nlist of each of the dlfiles.
nlist="$output_objdir/${outputname}.nm"
$show "$rm $nlist ${nlist}S ${nlist}T"
$run $rm "$nlist" "${nlist}S" "${nlist}T"
# Parse the name list into a source file.
$show "creating $output_objdir/$dlsyms"
test -z "$run" && $echo > "$output_objdir/$dlsyms" "\
/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */
/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */
#ifdef __cplusplus
extern \"C\" {
#endif
/* Prevent the only kind of declaration conflicts we can make. */
#define lt_preloaded_symbols some_other_symbol
/* External symbol declarations for the compiler. */\
"
if test "$dlself" = yes; then
$show "generating symbol list for \`$output'"
test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
# Add our own program objects to the symbol list.
progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
for arg in $progfiles; do
$show "extracting global C symbols from \`$arg'"
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -n "$exclude_expsyms"; then
$run eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
if test -n "$export_symbols_regex"; then
$run eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
export_symbols="$output_objdir/$outputname.exp"
$run $rm $export_symbols
$run eval "${SED} -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
else
$run eval "${SED} -e 's/\([ ][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
$run eval 'grep -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
$run eval 'mv "$nlist"T "$nlist"'
fi
fi
for arg in $dlprefiles; do
$show "extracting global C symbols from \`$arg'"
name=`$echo "$arg" | ${SED} -e 's%^.*/%%'`
$run eval '$echo ": $name " >> "$nlist"'
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -z "$run"; then
# Make sure we have at least an empty file.
test -f "$nlist" || : > "$nlist"
if test -n "$exclude_expsyms"; then
$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
$mv "$nlist"T "$nlist"
fi
# Try sorting and uniquifying the output.
if grep -v "^: " < "$nlist" |
if sort -k 3 </dev/null >/dev/null 2>&1; then
sort -k 3
else
sort +2
fi |
uniq > "$nlist"S; then
:
else
grep -v "^: " < "$nlist" > "$nlist"S
fi
if test -f "$nlist"S; then
eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"'
else
$echo '/* NONE */' >> "$output_objdir/$dlsyms"
fi
$echo >> "$output_objdir/$dlsyms" "\
#undef lt_preloaded_symbols
#if defined (__STDC__) && __STDC__
# define lt_ptr void *
#else
# define lt_ptr char *
# define const
#endif
/* The mapping between symbol names and symbols. */
"
case $host in
*cygwin* | *mingw* )
$echo >> "$output_objdir/$dlsyms" "\
/* DATA imports from DLLs on WIN32 can't be const, because
runtime relocations are performed -- see ld's documentation
on pseudo-relocs */
struct {
"
;;
* )
$echo >> "$output_objdir/$dlsyms" "\
const struct {
"
;;
esac
$echo >> "$output_objdir/$dlsyms" "\
const char *name;
lt_ptr address;
}
lt_preloaded_symbols[] =
{\
"
eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms"
$echo >> "$output_objdir/$dlsyms" "\
{0, (lt_ptr) 0}
};
/* This works around a problem in FreeBSD linker */
#ifdef FREEBSD_WORKAROUND
static const void *lt_preloaded_setup() {
return lt_preloaded_symbols;
}
#endif
#ifdef __cplusplus
}
#endif\
"
fi
pic_flag_for_symtable=
case $host in
# compiling the symbol table file with pic_flag works around
# a FreeBSD bug that causes programs to crash when -lm is
# linked before any other PIC object. But we must not use
# pic_flag when linking with -static. The problem exists in
# FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
*-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";;
esac;;
*-*-hpux*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag";;
esac
esac
# Now compile the dynamic symbol file.
$show "(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")"
$run eval '(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $?
# Clean up the generated files.
$show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T"
$run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T"
# Transform the symbol file into the correct name.
compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
;;
*)
$echo "$modename: unknown suffix for \`$dlsyms'" 1>&2
exit $EXIT_FAILURE
;;
esac
else
# We keep going just in case the user didn't refer to
# lt_preloaded_symbols. The linker will fail if global_symbol_pipe
# really was required.
# Nullify the symbol file.
compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
fi
if test "$need_relink" = no || test "$build_libtool_libs" != yes; then
# Replace the output file specification.
compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
link_command="$compile_command$compile_rpath"
# We have no uninstalled library dependencies, so finalize right now.
$show "$link_command"
$run eval "$link_command"
status=$?
# Delete the generated files.
if test -n "$dlsyms"; then
$show "$rm $output_objdir/${outputname}S.${objext}"
$run $rm "$output_objdir/${outputname}S.${objext}"
fi
exit $status
fi
if test -n "$shlibpath_var"; then
# We should set the shlibpath_var
rpath=
for dir in $temp_rpath; do
case $dir in
[\\/]* | [A-Za-z]:[\\/]*)
# Absolute path.
rpath="$rpath$dir:"
;;
*)
# Relative path: add a thisdir entry.
rpath="$rpath\$thisdir/$dir:"
;;
esac
done
temp_rpath="$rpath"
fi
if test -n "$compile_shlibpath$finalize_shlibpath"; then
compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
fi
if test -n "$finalize_shlibpath"; then
finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
fi
compile_var=
finalize_var=
if test -n "$runpath_var"; then
if test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
if test -n "$finalize_perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $finalize_perm_rpath; do
rpath="$rpath$dir:"
done
finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
fi
if test "$no_install" = yes; then
# We don't need to create a wrapper script.
link_command="$compile_var$compile_command$compile_rpath"
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
# Delete the old output file.
$run $rm $output
# Link the executable and exit
$show "$link_command"
$run eval "$link_command" || exit $?
exit $EXIT_SUCCESS
fi
if test "$hardcode_action" = relink; then
# Fast installation is not supported
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
$echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
$echo "$modename: \`$output' will be relinked during installation" 1>&2
else
if test "$fast_install" != no; then
link_command="$finalize_var$compile_command$finalize_rpath"
if test "$fast_install" = yes; then
relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
else
# fast_install is set to needless
relink_command=
fi
else
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
fi
fi
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
# Delete the old output files.
$run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
$show "$link_command"
$run eval "$link_command" || exit $?
# Now create the wrapper script.
$show "creating $output"
# Quote the relink command for shipping.
if test -n "$relink_command"; then
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
relink_command="(cd `pwd`; $relink_command)"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
fi
# Quote $echo for shipping.
if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then
case $progpath in
[\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";;
*) qecho="$SHELL `pwd`/$progpath --fallback-echo";;
esac
qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"`
else
qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"`
fi
# Only actually do things if our run command is non-null.
if test -z "$run"; then
# win32 will think the script is a binary if it has
# a .exe suffix, so we strip it off here.
case $output in
*.exe) output=`$echo $output|${SED} 's,.exe$,,'` ;;
esac
# test for cygwin because mv fails w/o .exe extensions
case $host in
*cygwin*)
exeext=.exe
outputname=`$echo $outputname|${SED} 's,.exe$,,'` ;;
*) exeext= ;;
esac
case $host in
*cygwin* | *mingw* )
cwrappersource=`$echo ${objdir}/lt-${outputname}.c`
cwrapper=`$echo ${output}.exe`
$rm $cwrappersource $cwrapper
trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
cat > $cwrappersource <<EOF
/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
The $output program cannot be directly executed until all the libtool
libraries that it depends on are installed.
This wrapper executable should never be moved out of the build directory.
If it is, it will not operate correctly.
Currently, it simply execs the wrapper *script* "/bin/sh $output",
but could eventually absorb all of the scripts functionality and
exec $objdir/$outputname directly.
*/
EOF
cat >> $cwrappersource<<"EOF"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <malloc.h>
#include <stdarg.h>
#include <assert.h>
#if defined(PATH_MAX)
# define LT_PATHMAX PATH_MAX
#elif defined(MAXPATHLEN)
# define LT_PATHMAX MAXPATHLEN
#else
# define LT_PATHMAX 1024
#endif
#ifndef DIR_SEPARATOR
#define DIR_SEPARATOR '/'
#endif
#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
defined (__OS2__)
#define HAVE_DOS_BASED_FILE_SYSTEM
#ifndef DIR_SEPARATOR_2
#define DIR_SEPARATOR_2 '\\'
#endif
#endif
#ifndef DIR_SEPARATOR_2
# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
#else /* DIR_SEPARATOR_2 */
# define IS_DIR_SEPARATOR(ch) \
(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
#endif /* DIR_SEPARATOR_2 */
#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type)))
#define XFREE(stale) do { \
if (stale) { free ((void *) stale); stale = 0; } \
} while (0)
const char *program_name = NULL;
void * xmalloc (size_t num);
char * xstrdup (const char *string);
char * basename (const char *name);
char * fnqualify(const char *path);
char * strendzap(char *str, const char *pat);
void lt_fatal (const char *message, ...);
int
main (int argc, char *argv[])
{
char **newargz;
int i;
program_name = (char *) xstrdup ((char *) basename (argv[0]));
newargz = XMALLOC(char *, argc+2);
EOF
cat >> $cwrappersource <<EOF
newargz[0] = "$SHELL";
EOF
cat >> $cwrappersource <<"EOF"
newargz[1] = fnqualify(argv[0]);
/* we know the script has the same name, without the .exe */
/* so make sure newargz[1] doesn't end in .exe */
strendzap(newargz[1],".exe");
for (i = 1; i < argc; i++)
newargz[i+1] = xstrdup(argv[i]);
newargz[argc+1] = NULL;
EOF
cat >> $cwrappersource <<EOF
execv("$SHELL",newargz);
EOF
cat >> $cwrappersource <<"EOF"
}
void *
xmalloc (size_t num)
{
void * p = (void *) malloc (num);
if (!p)
lt_fatal ("Memory exhausted");
return p;
}
char *
xstrdup (const char *string)
{
return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL
;
}
char *
basename (const char *name)
{
const char *base;
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
/* Skip over the disk name in MSDOS pathnames. */
if (isalpha (name[0]) && name[1] == ':')
name += 2;
#endif
for (base = name; *name; name++)
if (IS_DIR_SEPARATOR (*name))
base = name + 1;
return (char *) base;
}
char *
fnqualify(const char *path)
{
size_t size;
char *p;
char tmp[LT_PATHMAX + 1];
assert(path != NULL);
/* Is it qualified already? */
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
if (isalpha (path[0]) && path[1] == ':')
return xstrdup (path);
#endif
if (IS_DIR_SEPARATOR (path[0]))
return xstrdup (path);
/* prepend the current directory */
/* doesn't handle '~' */
if (getcwd (tmp, LT_PATHMAX) == NULL)
lt_fatal ("getcwd failed");
size = strlen(tmp) + 1 + strlen(path) + 1; /* +2 for '/' and '\0' */
p = XMALLOC(char, size);
sprintf(p, "%s%c%s", tmp, DIR_SEPARATOR, path);
return p;
}
char *
strendzap(char *str, const char *pat)
{
size_t len, patlen;
assert(str != NULL);
assert(pat != NULL);
len = strlen(str);
patlen = strlen(pat);
if (patlen <= len)
{
str += len - patlen;
if (strcmp(str, pat) == 0)
*str = '\0';
}
return str;
}
static void
lt_error_core (int exit_status, const char * mode,
const char * message, va_list ap)
{
fprintf (stderr, "%s: %s: ", program_name, mode);
vfprintf (stderr, message, ap);
fprintf (stderr, ".\n");
if (exit_status >= 0)
exit (exit_status);
}
void
lt_fatal (const char *message, ...)
{
va_list ap;
va_start (ap, message);
lt_error_core (EXIT_FAILURE, "FATAL", message, ap);
va_end (ap);
}
EOF
# we should really use a build-platform specific compiler
# here, but OTOH, the wrappers (shell script and this C one)
# are only useful if you want to execute the "real" binary.
# Since the "real" binary is built for $host, then this
# wrapper might as well be built for $host, too.
$run $LTCC -s -o $cwrapper $cwrappersource
;;
esac
$rm $output
trap "$rm $output; exit $EXIT_FAILURE" 1 2 15
$echo > $output "\
#! $SHELL
# $output - temporary wrapper script for $objdir/$outputname
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# The $output program cannot be directly executed until all the libtool
# libraries that it depends on are installed.
#
# This wrapper script should never be moved out of the build directory.
# If it is, it will not operate correctly.
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed='${SED} -e 1s/^X//'
sed_quote_subst='$sed_quote_subst'
# The HP-UX ksh and POSIX shell print the target directory to stdout
# if CDPATH is set.
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
relink_command=\"$relink_command\"
# This environment variable determines our operation mode.
if test \"\$libtool_install_magic\" = \"$magic\"; then
# install mode needs the following variable:
notinst_deplibs='$notinst_deplibs'
else
# When we are sourced in execute mode, \$file and \$echo are already set.
if test \"\$libtool_execute_magic\" != \"$magic\"; then
echo=\"$qecho\"
file=\"\$0\"
# Make sure echo works.
if test \"X\$1\" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then
# Yippee, \$echo works!
:
else
# Restart under the correct shell, and then maybe \$echo will work.
exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
fi
fi\
"
$echo >> $output "\
# Find the directory that this script lives in.
thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
test \"x\$thisdir\" = \"x\$file\" && thisdir=.
# Follow symbolic links until we get to the real thisdir.
file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\`
while test -n \"\$file\"; do
destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
# If there was a directory component, then change thisdir.
if test \"x\$destdir\" != \"x\$file\"; then
case \"\$destdir\" in
[\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
*) thisdir=\"\$thisdir/\$destdir\" ;;
esac
fi
file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\`
done
# Try to get the absolute directory name.
absdir=\`cd \"\$thisdir\" && pwd\`
test -n \"\$absdir\" && thisdir=\"\$absdir\"
"
if test "$fast_install" = yes; then
$echo >> $output "\
program=lt-'$outputname'$exeext
progdir=\"\$thisdir/$objdir\"
if test ! -f \"\$progdir/\$program\" || \\
{ file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\
test \"X\$file\" != \"X\$progdir/\$program\"; }; then
file=\"\$\$-\$program\"
if test ! -d \"\$progdir\"; then
$mkdir \"\$progdir\"
else
$rm \"\$progdir/\$file\"
fi"
$echo >> $output "\
# relink executable if necessary
if test -n \"\$relink_command\"; then
if relink_command_output=\`eval \$relink_command 2>&1\`; then :
else
$echo \"\$relink_command_output\" >&2
$rm \"\$progdir/\$file\"
exit $EXIT_FAILURE
fi
fi
$mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
{ $rm \"\$progdir/\$program\";
$mv \"\$progdir/\$file\" \"\$progdir/\$program\"; }
$rm \"\$progdir/\$file\"
fi"
else
$echo >> $output "\
program='$outputname'
progdir=\"\$thisdir/$objdir\"
"
fi
$echo >> $output "\
if test -f \"\$progdir/\$program\"; then"
# Export our shlibpath_var if we have one.
if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
$echo >> $output "\
# Add our own library path to $shlibpath_var
$shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
# Some systems cannot cope with colon-terminated $shlibpath_var
# The second colon is a workaround for a bug in BeOS R4 sed
$shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
export $shlibpath_var
"
fi
# fixup the dll searchpath if we need to.
if test -n "$dllsearchpath"; then
$echo >> $output "\
# Add the dll search path components to the executable PATH
PATH=$dllsearchpath:\$PATH
"
fi
$echo >> $output "\
if test \"\$libtool_execute_magic\" != \"$magic\"; then
# Run the actual program with our arguments.
"
case $host in
# Backslashes separate directories on plain windows
*-*-mingw | *-*-os2*)
$echo >> $output "\
exec \$progdir\\\\\$program \${1+\"\$@\"}
"
;;
*)
$echo >> $output "\
exec \$progdir/\$program \${1+\"\$@\"}
"
;;
esac
$echo >> $output "\
\$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\"
exit $EXIT_FAILURE
fi
else
# The program doesn't exist.
\$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
\$echo \"This script is just a wrapper for \$program.\" 1>&2
$echo \"See the $PACKAGE documentation for more information.\" 1>&2
exit $EXIT_FAILURE
fi
fi\
"
chmod +x $output
fi
exit $EXIT_SUCCESS
;;
esac
# See if we need to build an old-fashioned archive.
for oldlib in $oldlibs; do
if test "$build_libtool_libs" = convenience; then
oldobjs="$libobjs_save"
addlibs="$convenience"
build_libtool_libs=no
else
if test "$build_libtool_libs" = module; then
oldobjs="$libobjs_save"
build_libtool_libs=no
else
oldobjs="$old_deplibs $non_pic_objects"
fi
addlibs="$old_convenience"
fi
if test -n "$addlibs"; then
gentop="$output_objdir/${outputname}x"
generated="$generated $gentop"
func_extract_archives $gentop $addlibs
oldobjs="$oldobjs $func_extract_archives_result"
fi
# Do each command in the archive commands.
if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
cmds=$old_archive_from_new_cmds
else
# POSIX demands no paths to be encoded in archives. We have
# to avoid creating archives with duplicate basenames if we
# might have to extract them afterwards, e.g., when creating a
# static archive out of a convenience library, or when linking
# the entirety of a libtool archive into another (currently
# not supported by libtool).
if (for obj in $oldobjs
do
$echo "X$obj" | $Xsed -e 's%^.*/%%'
done | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "copying selected object files to avoid basename conflicts..."
if test -z "$gentop"; then
gentop="$output_objdir/${outputname}x"
generated="$generated $gentop"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test "$status" -ne 0 && test ! -d "$gentop"; then
exit $status
fi
fi
save_oldobjs=$oldobjs
oldobjs=
counter=1
for obj in $save_oldobjs
do
objbase=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
case " $oldobjs " in
" ") oldobjs=$obj ;;
*[\ /]"$objbase "*)
while :; do
# Make sure we don't pick an alternate name that also
# overlaps.
newobj=lt$counter-$objbase
counter=`expr $counter + 1`
case " $oldobjs " in
*[\ /]"$newobj "*) ;;
*) if test ! -f "$gentop/$newobj"; then break; fi ;;
esac
done
$show "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
$run ln "$obj" "$gentop/$newobj" ||
$run cp "$obj" "$gentop/$newobj"
oldobjs="$oldobjs $gentop/$newobj"
;;
*) oldobjs="$oldobjs $obj" ;;
esac
done
fi
eval cmds=\"$old_archive_cmds\"
if len=`expr "X$cmds" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
cmds=$old_archive_cmds
else
# the command line is too long to link in one step, link in parts
$echo "using piecewise archive linking..."
save_RANLIB=$RANLIB
RANLIB=:
objlist=
concat_cmds=
save_oldobjs=$oldobjs
# Is there a better way of finding the last object in the list?
for obj in $save_oldobjs
do
last_oldobj=$obj
done
for obj in $save_oldobjs
do
oldobjs="$objlist $obj"
objlist="$objlist $obj"
eval test_cmds=\"$old_archive_cmds\"
if len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len"; then
:
else
# the above command should be used before it gets too long
oldobjs=$objlist
if test "$obj" = "$last_oldobj" ; then
RANLIB=$save_RANLIB
fi
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\"
objlist=
fi
done
RANLIB=$save_RANLIB
oldobjs=$objlist
if test "X$oldobjs" = "X" ; then
eval cmds=\"\$concat_cmds\"
else
eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
fi
fi
fi
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
eval cmd=\"$cmd\"
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$generated"; then
$show "${rm}r$generated"
$run ${rm}r$generated
fi
# Now create the libtool archive.
case $output in
*.la)
old_library=
test "$build_old_libs" = yes && old_library="$libname.$libext"
$show "creating $output"
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
# Quote the link command for shipping.
relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
if test "$hardcode_automatic" = yes ; then
relink_command=
fi
# Only create the output if not a dry run.
if test -z "$run"; then
for installed in no yes; do
if test "$installed" = yes; then
if test -z "$install_libdir"; then
break
fi
output="$output_objdir/$outputname"i
# Replace all uninstalled libtool libraries with the installed ones
newdependency_libs=
for deplib in $dependency_libs; do
case $deplib in
*.la)
name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdependency_libs="$newdependency_libs $libdir/$name"
;;
*) newdependency_libs="$newdependency_libs $deplib" ;;
esac
done
dependency_libs="$newdependency_libs"
newdlfiles=
for lib in $dlfiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdlfiles="$newdlfiles $libdir/$name"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdlprefiles="$newdlprefiles $libdir/$name"
done
dlprefiles="$newdlprefiles"
else
newdlfiles=
for lib in $dlfiles; do
case $lib in
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
newdlfiles="$newdlfiles $abs"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
case $lib in
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
newdlprefiles="$newdlprefiles $abs"
done
dlprefiles="$newdlprefiles"
fi
$rm $output
# place dlname in correct position for cygwin
tdlname=$dlname
case $host,$output,$installed,$module,$dlname in
*cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
esac
$echo > $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='$tdlname'
# Names of this library.
library_names='$library_names'
# The name of the static archive.
old_library='$old_library'
# Libraries that this one depends upon.
dependency_libs='$dependency_libs'
# Version information for $libname.
current=$current
age=$age
revision=$revision
# Is this an already installed library?
installed=$installed
# Should we warn about portability when linking against -modules?
shouldnotlink=$module
# Files to dlopen/dlpreopen
dlopen='$dlfiles'
dlpreopen='$dlprefiles'
# Directory that this library needs to be installed in:
libdir='$install_libdir'"
if test "$installed" = no && test "$need_relink" = yes; then
$echo >> $output "\
relink_command=\"$relink_command\""
fi
done
fi
# Do a symbolic link so that the libtool archive can be found in
# LD_LIBRARY_PATH before the program is installed.
$show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
$run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $?
;;
esac
exit $EXIT_SUCCESS
;;
# libtool install mode
install)
modename="$modename: install"
# There may be an optional sh(1) argument at the beginning of
# install_prog (especially on Windows NT).
if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
# Allow the use of GNU shtool's install command.
$echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
# Aesthetically quote it.
arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
install_prog="$arg "
arg="$1"
shift
else
install_prog=
arg="$nonopt"
fi
# The real first argument should be the name of the installation program.
# Aesthetically quote it.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
install_prog="$install_prog$arg"
# We need to accept at least all the BSD install flags.
dest=
files=
opts=
prev=
install_type=
isdir=no
stripme=
for arg
do
if test -n "$dest"; then
files="$files $dest"
dest="$arg"
continue
fi
case $arg in
-d) isdir=yes ;;
-f) prev="-f" ;;
-g) prev="-g" ;;
-m) prev="-m" ;;
-o) prev="-o" ;;
-s)
stripme=" -s"
continue
;;
-*) ;;
*)
# If the previous option needed an argument, then skip it.
if test -n "$prev"; then
prev=
else
dest="$arg"
continue
fi
;;
esac
# Aesthetically quote the argument.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*$quote_scanset* | *]* | *\|* | *\&* | *\(* | *\)* | "")
arg="\"$arg\""
;;
esac
install_prog="$install_prog $arg"
done
if test -z "$install_prog"; then
$echo "$modename: you must specify an install program" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test -n "$prev"; then
$echo "$modename: the \`$prev' option requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test -z "$files"; then
if test -z "$dest"; then
$echo "$modename: no file or destination specified" 1>&2
else
$echo "$modename: you must specify a destination" 1>&2
fi
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Canonicalise the pathname:
# - remove foo/../
# - replace //
# - remove /./
# - strip any trailing /
tmp=""
while test "$dest" != "$tmp"; do
tmp=$dest
dest=`$echo "X$dest" | $Xsed -e 's%[^/.][^/.]*/\.\.%%g' -e 's%/\./%/%g' -e 's%//*%/%g' -e 's%/$%%g'`
done
# Check to see that the destination is a directory.
test -d "$dest" && isdir=yes
if test "$isdir" = yes; then
destdir="$dest"
destname=
else
destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'`
test "X$destdir" = "X$dest" && destdir=.
destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'`
# Not a directory, so check to see that there is only one file specified.
set dummy $files
if test "$#" -gt 2; then
$echo "$modename: \`$dest' is not a directory" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
fi
case $destdir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
for file in $files; do
case $file in
*.lo) ;;
*)
$echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
done
;;
esac
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
staticlibs=
future_libdirs=
current_libdirs=
for file in $files; do
# Do each installation.
case $file in
*.$libext)
# Do the static libraries later.
staticlibs="$staticlibs $file"
;;
*.la)
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$file' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
library_names=
old_library=
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Add the libdir to current_libdirs if it is the destination.
if test "X$destdir" = "X$libdir"; then
case "$current_libdirs " in
*" $libdir "*) ;;
*) current_libdirs="$current_libdirs $libdir" ;;
esac
else
# Note the libdir as a future libdir.
case "$future_libdirs " in
*" $libdir "*) ;;
*) future_libdirs="$future_libdirs $libdir" ;;
esac
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/
test "X$dir" = "X$file/" && dir=
dir="$dir$objdir"
if test -n "$relink_command"; then
# Determine the prefix the user has applied to our future dir.
inst_prefix_dir=`$echo "$destdir" | $SED "s%$libdir\$%%"`
# Don't allow the user to place us outside of our expected
# location b/c this prevents finding dependent libraries that
# are installed to the same prefix.
# At present, this check doesn't affect windows .dll's that
# are installed into $libdir/../bin (currently, that works fine)
# but it's something to keep an eye on.
if test "$inst_prefix_dir" = "$destdir"; then
$echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2
exit $EXIT_FAILURE
fi
if test -n "$inst_prefix_dir"; then
# Stick the inst_prefix_dir data into the link command.
relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
else
relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%%"`
fi
$echo "$modename: warning: relinking \`$file'" 1>&2
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
exit $EXIT_FAILURE
fi
fi
# See the names of the shared library.
set dummy $library_names
if test -n "$2"; then
realname="$2"
shift
shift
srcname="$realname"
test -n "$relink_command" && srcname="$realname"T
# Install the shared library and build the symlinks.
$show "$install_prog $dir/$srcname $destdir/$realname"
$run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $?
if test -n "$stripme" && test -n "$striplib"; then
$show "$striplib $destdir/$realname"
$run eval "$striplib $destdir/$realname" || exit $?
fi
if test "$#" -gt 0; then
# Delete the old symlinks, and create new ones.
for linkname
do
if test "$linkname" != "$realname"; then
$show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
fi
done
fi
# Do each command in the postinstall commands.
lib="$destdir/$realname"
cmds=$postinstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Install the pseudo-library for information purposes.
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
instname="$dir/$name"i
$show "$install_prog $instname $destdir/$name"
$run eval "$install_prog $instname $destdir/$name" || exit $?
# Maybe install the static library, too.
test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
;;
*.lo)
# Install (i.e. copy) a libtool object.
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# Deduce the name of the destination old-style object file.
case $destfile in
*.lo)
staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
;;
*.$objext)
staticdest="$destfile"
destfile=
;;
*)
$echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
# Install the libtool object if requested.
if test -n "$destfile"; then
$show "$install_prog $file $destfile"
$run eval "$install_prog $file $destfile" || exit $?
fi
# Install the old object if enabled.
if test "$build_old_libs" = yes; then
# Deduce the name of the old-style object file.
staticobj=`$echo "X$file" | $Xsed -e "$lo2o"`
$show "$install_prog $staticobj $staticdest"
$run eval "$install_prog \$staticobj \$staticdest" || exit $?
fi
exit $EXIT_SUCCESS
;;
*)
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# If the file is missing, and there is a .exe on the end, strip it
# because it is most likely a libtool script we actually want to
# install
stripped_ext=""
case $file in
*.exe)
if test ! -f "$file"; then
file=`$echo $file|${SED} 's,.exe$,,'`
stripped_ext=".exe"
fi
;;
esac
# Do a test to see if this is really a libtool program.
case $host in
*cygwin*|*mingw*)
wrapper=`$echo $file | ${SED} -e 's,.exe$,,'`
;;
*)
wrapper=$file
;;
esac
if (${SED} -e '4q' $wrapper | grep "^# Generated by .*$PACKAGE")>/dev/null 2>&1; then
notinst_deplibs=
relink_command=
# To insure that "foo" is sourced, and not "foo.exe",
# finese the cygwin/MSYS system by explicitly sourcing "foo."
# which disallows the automatic-append-.exe behavior.
case $build in
*cygwin* | *mingw*) wrapperdot=${wrapper}. ;;
*) wrapperdot=${wrapper} ;;
esac
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . ${wrapperdot} ;;
*) . ./${wrapperdot} ;;
esac
# Check the variables that should have been set.
if test -z "$notinst_deplibs"; then
$echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2
exit $EXIT_FAILURE
fi
finalize=yes
for lib in $notinst_deplibs; do
# Check to see that each library is installed.
libdir=
if test -f "$lib"; then
# If there is no directory component, then add one.
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
fi
libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
if test -n "$libdir" && test ! -f "$libfile"; then
$echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
finalize=no
fi
done
relink_command=
# To insure that "foo" is sourced, and not "foo.exe",
# finese the cygwin/MSYS system by explicitly sourcing "foo."
# which disallows the automatic-append-.exe behavior.
case $build in
*cygwin* | *mingw*) wrapperdot=${wrapper}. ;;
*) wrapperdot=${wrapper} ;;
esac
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . ${wrapperdot} ;;
*) . ./${wrapperdot} ;;
esac
outputname=
if test "$fast_install" = no && test -n "$relink_command"; then
if test "$finalize" = yes && test -z "$run"; then
tmpdir="/tmp"
test -n "$TMPDIR" && tmpdir="$TMPDIR"
tmpdir="$tmpdir/libtool-$$"
save_umask=`umask`
umask 0077
if $mkdir "$tmpdir"; then
umask $save_umask
else
umask $save_umask
$echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
continue
fi
file=`$echo "X$file$stripped_ext" | $Xsed -e 's%^.*/%%'`
outputname="$tmpdir/$file"
# Replace the output file specification.
relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
${rm}r "$tmpdir"
continue
fi
file="$outputname"
else
$echo "$modename: warning: cannot relink \`$file'" 1>&2
fi
else
# Install the binary that we compiled earlier.
file=`$echo "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
fi
fi
# remove .exe since cygwin /usr/bin/install will append another
# one anyways
case $install_prog,$host in
*/usr/bin/install*,*cygwin*)
case $file:$destfile in
*.exe:*.exe)
# this is ok
;;
*.exe:*)
destfile=$destfile.exe
;;
*:*.exe)
destfile=`$echo $destfile | ${SED} -e 's,.exe$,,'`
;;
esac
;;
esac
$show "$install_prog$stripme $file $destfile"
$run eval "$install_prog\$stripme \$file \$destfile" || exit $?
test -n "$outputname" && ${rm}r "$tmpdir"
;;
esac
done
for file in $staticlibs; do
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
# Set up the ranlib parameters.
oldlib="$destdir/$name"
$show "$install_prog $file $oldlib"
$run eval "$install_prog \$file \$oldlib" || exit $?
if test -n "$stripme" && test -n "$old_striplib"; then
$show "$old_striplib $oldlib"
$run eval "$old_striplib $oldlib" || exit $?
fi
# Do each command in the postinstall commands.
cmds=$old_postinstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$future_libdirs"; then
$echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2
fi
if test -n "$current_libdirs"; then
# Maybe just do a dry run.
test -n "$run" && current_libdirs=" -n$current_libdirs"
exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs'
else
exit $EXIT_SUCCESS
fi
;;
# libtool finish mode
finish)
modename="$modename: finish"
libdirs="$nonopt"
admincmds=
if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
for dir
do
libdirs="$libdirs $dir"
done
for libdir in $libdirs; do
if test -n "$finish_cmds"; then
# Do each command in the finish commands.
cmds=$finish_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || admincmds="$admincmds
$cmd"
done
IFS="$save_ifs"
fi
if test -n "$finish_eval"; then
# Do the single finish_eval.
eval cmds=\"$finish_eval\"
$run eval "$cmds" || admincmds="$admincmds
$cmds"
fi
done
fi
# Exit here if they wanted silent mode.
test "$show" = : && exit $EXIT_SUCCESS
$echo "----------------------------------------------------------------------"
$echo "Libraries have been installed in:"
for libdir in $libdirs; do
$echo " $libdir"
done
$echo
$echo "If you ever happen to want to link against installed libraries"
$echo "in a given directory, LIBDIR, you must either use libtool, and"
$echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
$echo "flag during linking and do at least one of the following:"
if test -n "$shlibpath_var"; then
$echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
$echo " during execution"
fi
if test -n "$runpath_var"; then
$echo " - add LIBDIR to the \`$runpath_var' environment variable"
$echo " during linking"
fi
if test -n "$hardcode_libdir_flag_spec"; then
libdir=LIBDIR
eval flag=\"$hardcode_libdir_flag_spec\"
$echo " - use the \`$flag' linker flag"
fi
if test -n "$admincmds"; then
$echo " - have your system administrator run these commands:$admincmds"
fi
if test -f /etc/ld.so.conf; then
$echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
fi
$echo
$echo "See any operating system documentation about shared libraries for"
$echo "more information, such as the ld(1) and ld.so(8) manual pages."
$echo "----------------------------------------------------------------------"
exit $EXIT_SUCCESS
;;
# libtool execute mode
execute)
modename="$modename: execute"
# The first argument is the command name.
cmd="$nonopt"
if test -z "$cmd"; then
$echo "$modename: you must specify a COMMAND" 1>&2
$echo "$help"
exit $EXIT_FAILURE
fi
# Handle -dlopen flags immediately.
for file in $execute_dlfiles; do
if test ! -f "$file"; then
$echo "$modename: \`$file' is not a file" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
dir=
case $file in
*.la)
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Read the libtool library.
dlname=
library_names=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Skip this library if it cannot be dlopened.
if test -z "$dlname"; then
# Warn if it was a shared library.
test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'"
continue
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
if test -f "$dir/$objdir/$dlname"; then
dir="$dir/$objdir"
else
$echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2
exit $EXIT_FAILURE
fi
;;
*.lo)
# Just add the directory containing the .lo file.
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
;;
*)
$echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2
continue
;;
esac
# Get the absolute pathname.
absdir=`cd "$dir" && pwd`
test -n "$absdir" && dir="$absdir"
# Now add the directory to shlibpath_var.
if eval "test -z \"\$$shlibpath_var\""; then
eval "$shlibpath_var=\"\$dir\""
else
eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
fi
done
# This variable tells wrapper scripts just to set shlibpath_var
# rather than running their programs.
libtool_execute_magic="$magic"
# Check if any of the arguments is a wrapper script.
args=
for file
do
case $file in
-*) ;;
*)
# Do a test to see if this is really a libtool program.
if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Transform arg to wrapped name.
file="$progdir/$program"
fi
;;
esac
# Quote arguments (to preserve shell metacharacters).
file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"`
args="$args \"$file\""
done
if test -z "$run"; then
if test -n "$shlibpath_var"; then
# Export the shlibpath_var.
eval "export $shlibpath_var"
fi
# Restore saved environment variables
if test "${save_LC_ALL+set}" = set; then
LC_ALL="$save_LC_ALL"; export LC_ALL
fi
if test "${save_LANG+set}" = set; then
LANG="$save_LANG"; export LANG
fi
# Now prepare to actually exec the command.
exec_cmd="\$cmd$args"
else
# Display what would be done.
if test -n "$shlibpath_var"; then
eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
$echo "export $shlibpath_var"
fi
$echo "$cmd$args"
exit $EXIT_SUCCESS
fi
;;
# libtool clean and uninstall mode
clean | uninstall)
modename="$modename: $mode"
rm="$nonopt"
files=
rmforce=
exit_status=0
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
for arg
do
case $arg in
-f) rm="$rm $arg"; rmforce=yes ;;
-*) rm="$rm $arg" ;;
*) files="$files $arg" ;;
esac
done
if test -z "$rm"; then
$echo "$modename: you must specify an RM program" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
rmdirs=
origobjdir="$objdir"
for file in $files; do
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
if test "X$dir" = "X$file"; then
dir=.
objdir="$origobjdir"
else
objdir="$dir/$origobjdir"
fi
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
test "$mode" = uninstall && objdir="$dir"
# Remember objdir for removal later, being careful to avoid duplicates
if test "$mode" = clean; then
case " $rmdirs " in
*" $objdir "*) ;;
*) rmdirs="$rmdirs $objdir" ;;
esac
fi
# Don't error if the file doesn't exist and rm -f was used.
if (test -L "$file") >/dev/null 2>&1 \
|| (test -h "$file") >/dev/null 2>&1 \
|| test -f "$file"; then
:
elif test -d "$file"; then
exit_status=1
continue
elif test "$rmforce" = yes; then
continue
fi
rmfiles="$file"
case $name in
*.la)
# Possibly a libtool archive, so verify it.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
. $dir/$name
# Delete the libtool libraries and symlinks.
for n in $library_names; do
rmfiles="$rmfiles $objdir/$n"
done
test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
test "$mode" = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
if test "$mode" = uninstall; then
if test -n "$library_names"; then
# Do each command in the postuninstall commands.
cmds=$postuninstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd"
if test "$?" -ne 0 && test "$rmforce" != yes; then
exit_status=1
fi
done
IFS="$save_ifs"
fi
if test -n "$old_library"; then
# Do each command in the old_postuninstall commands.
cmds=$old_postuninstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd"
if test "$?" -ne 0 && test "$rmforce" != yes; then
exit_status=1
fi
done
IFS="$save_ifs"
fi
# FIXME: should reinstall the best remaining shared library.
fi
fi
;;
*.lo)
# Possibly a libtool object, so verify it.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# Read the .lo file
. $dir/$name
# Add PIC object to the list of files to remove.
if test -n "$pic_object" \
&& test "$pic_object" != none; then
rmfiles="$rmfiles $dir/$pic_object"
fi
# Add non-PIC object to the list of files to remove.
if test -n "$non_pic_object" \
&& test "$non_pic_object" != none; then
rmfiles="$rmfiles $dir/$non_pic_object"
fi
fi
;;
*)
if test "$mode" = clean ; then
noexename=$name
case $file in
*.exe)
file=`$echo $file|${SED} 's,.exe$,,'`
noexename=`$echo $name|${SED} 's,.exe$,,'`
# $file with .exe has already been added to rmfiles,
# add $file without .exe
rmfiles="$rmfiles $file"
;;
esac
# Do a test to see if this is a libtool program.
if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
relink_command=
. $dir/$noexename
# note $name still contains .exe if it was in $file originally
# as does the version of $file that was added into $rmfiles
rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
if test "$fast_install" = yes && test -n "$relink_command"; then
rmfiles="$rmfiles $objdir/lt-$name"
fi
if test "X$noexename" != "X$name" ; then
rmfiles="$rmfiles $objdir/lt-${noexename}.c"
fi
fi
fi
;;
esac
$show "$rm $rmfiles"
$run $rm $rmfiles || exit_status=1
done
objdir="$origobjdir"
# Try to remove the ${objdir}s in the directories where we deleted files
for dir in $rmdirs; do
if test -d "$dir"; then
$show "rmdir $dir"
$run rmdir $dir >/dev/null 2>&1
fi
done
exit $exit_status
;;
"")
$echo "$modename: you must specify a MODE" 1>&2
$echo "$generic_help" 1>&2
exit $EXIT_FAILURE
;;
esac
if test -z "$exec_cmd"; then
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$generic_help" 1>&2
exit $EXIT_FAILURE
fi
fi # test -z "$show_help"
if test -n "$exec_cmd"; then
eval exec $exec_cmd
exit $EXIT_FAILURE
fi
# We need to display help for each of the modes.
case $mode in
"") $echo \
"Usage: $modename [OPTION]... [MODE-ARG]...
Provide generalized library-building support services.
--config show all configuration variables
--debug enable verbose shell tracing
-n, --dry-run display commands without modifying any files
--features display basic configuration information and exit
--finish same as \`--mode=finish'
--help display this help message and exit
--mode=MODE use operation mode MODE [default=inferred from MODE-ARGS]
--quiet same as \`--silent'
--silent don't print informational messages
--tag=TAG use configuration variables from tag TAG
--version print version information
MODE must be one of the following:
clean remove files from the build directory
compile compile a source file into a libtool object
execute automatically set library path, then run a program
finish complete the installation of libtool libraries
install install libraries or executables
link create a library or an executable
uninstall remove libraries from an installed directory
MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for
a more detailed description of MODE.
Report bugs to <[email protected]>."
exit $EXIT_SUCCESS
;;
clean)
$echo \
"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
Remove files from the build directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, object or program, all the files associated
with it are deleted. Otherwise, only FILE itself is deleted using RM."
;;
compile)
$echo \
"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
Compile a source file into a libtool library object.
This mode accepts the following additional options:
-o OUTPUT-FILE set the output file name to OUTPUT-FILE
-prefer-pic try to building PIC objects only
-prefer-non-pic try to building non-PIC objects only
-static always build a \`.o' file suitable for static linking
COMPILE-COMMAND is a command to be used in creating a \`standard' object file
from the given SOURCEFILE.
The output file name is determined by removing the directory component from
SOURCEFILE, then substituting the C source code suffix \`.c' with the
library object suffix, \`.lo'."
;;
execute)
$echo \
"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]...
Automatically set library path, then run a program.
This mode accepts the following additional options:
-dlopen FILE add the directory containing FILE to the library path
This mode sets the library path environment variable according to \`-dlopen'
flags.
If any of the ARGS are libtool executable wrappers, then they are translated
into their corresponding uninstalled binary, and any of their required library
directories are added to the library path.
Then, COMMAND is executed, with ARGS as arguments."
;;
finish)
$echo \
"Usage: $modename [OPTION]... --mode=finish [LIBDIR]...
Complete the installation of libtool libraries.
Each LIBDIR is a directory that contains libtool libraries.
The commands that this mode executes may require superuser privileges. Use
the \`--dry-run' option if you just want to see what would be executed."
;;
install)
$echo \
"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND...
Install executables or libraries.
INSTALL-COMMAND is the installation command. The first component should be
either the \`install' or \`cp' program.
The rest of the components are interpreted as arguments to that command (only
BSD-compatible install options are recognized)."
;;
link)
$echo \
"Usage: $modename [OPTION]... --mode=link LINK-COMMAND...
Link object files or libraries together to form another library, or to
create an executable program.
LINK-COMMAND is a command using the C compiler that you would use to create
a program from several object files.
The following components of LINK-COMMAND are treated specially:
-all-static do not do any dynamic linking at all
-avoid-version do not add a version suffix if possible
-dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
-dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
-export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
-export-symbols SYMFILE
try to export only the symbols listed in SYMFILE
-export-symbols-regex REGEX
try to export only the symbols matching REGEX
-LLIBDIR search LIBDIR for required installed libraries
-lNAME OUTPUT-FILE requires the installed library libNAME
-module build a library that can dlopened
-no-fast-install disable the fast-install mode
-no-install link a not-installable executable
-no-undefined declare that a library does not refer to external symbols
-o OUTPUT-FILE create OUTPUT-FILE from the specified objects
-objectlist FILE Use a list of object files found in FILE to specify objects
-precious-files-regex REGEX
don't remove output files matching REGEX
-release RELEASE specify package release information
-rpath LIBDIR the created library will eventually be installed in LIBDIR
-R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
-static do not do any dynamic linking of libtool libraries
-version-info CURRENT[:REVISION[:AGE]]
specify library version info [each variable defaults to 0]
All other options (arguments beginning with \`-') are ignored.
Every other argument is treated as a filename. Files ending in \`.la' are
treated as uninstalled libtool libraries, other files are standard or library
object files.
If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
only library objects (\`.lo' files) may be specified, and \`-rpath' is
required, except when creating a convenience library.
If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
using \`ar' and \`ranlib', or on Windows using \`lib'.
If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
is created, otherwise an executable program is created."
;;
uninstall)
$echo \
"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
Remove libraries from an installation directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, all the files associated with it are deleted.
Otherwise, only FILE itself is deleted using RM."
;;
*)
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
$echo
$echo "Try \`$modename --help' for more information about other modes."
exit $?
# The TAGs below are defined such that we never get into a situation
# in which we disable both kinds of libraries. Given conflicting
# choices, we go for a static library, that is the most portable,
# since we can't tell whether shared libraries were disabled because
# the user asked for that or because the platform doesn't support
# them. This is particularly important on AIX, because we don't
# support having both static and shared libraries enabled at the same
# time on that platform, so we default to a shared-only configuration.
# If a disable-shared tag is given, we'll fallback to a static-only
# configuration. But we'll never go from static-only to shared-only.
# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
build_libtool_libs=no
build_old_libs=yes
# ### END LIBTOOL TAG CONFIG: disable-shared
# ### BEGIN LIBTOOL TAG CONFIG: disable-static
build_old_libs=`case $build_libtool_libs in yes) $echo no;; *) $echo yes;; esac`
# ### END LIBTOOL TAG CONFIG: disable-static
# Local Variables:
# mode:shell-script
# sh-indentation:2
# End:
|
opf-attic/ref
|
tools/file/file-5.00/ltmain.sh
|
Shell
|
apache-2.0
| 188,708 |
# Source this in your test scripts to parse command line arguments correctly.
CORE_CT_DEFAULT=1
START_DEFAULT=2012-01-25
END_DEFAULT=2013-01-24
sedopt='-r'
if [ `uname` == "Darwin" ]; then
sedopt='-E'
fi
# default job dir is named after script and timestamp
JOBDIR=$TWEPI_JOBBASE/`basename $0 | sed $sedopt s/\.[a-z]+\.sh$//`_`date +'%Y%m%d-%H%M'`
CORE_CT=$CORE_CT_DEFAULT
GEODB=$TWEPI_GEODB
START=$START_DEFAULT
END=$END_DEFAULT
while getopts ':c:g:j:' opt; do
case $opt in
c)
CORE_CT=$OPTARG
;;
g)
GEODB=$OPTARG
;;
j)
if [[ "$OPTARG" = /* || "$OPTARG" = "." || "$OPTARG" = ~* ]]; then
# absolute path, dot, or tilde - leave it alone
JOBDIR=$OPTARG
else
# relative; prefix the default
JOBDIR=$TWEPI_JOBBASE/$OPTARG
fi
;;
s)
START=$OPTARG
;;
e)
END=$OPTARG
;;
h|\?|:)
# -h, invalid option, or missing argument... print help
cat <<EOF
These files are shell scripts that run or set up experiments. They take the
following arguments:
-c N number of cores per process (default $CORE_CT_DEFAULT)
-g FILE path to geodb (default $TWEPI_GEODB)
-j DIR job directory (under $TWEPI_JOBBASE if relative)
-s TIMESTAMP start time (default $START_DEFAULT)
-e TIMESTAMP end time (default $END_DEFAULT)
-h show this help text
Conventions:
* scripts named .jex use jexplode to create a job, while scripts named
.imm execute the experiment immediately.
* comment at the top of the script says a little about the experiment.
* put options one per line in the order specified by model-test --help.
Note: model-test and jexplode must be in your \$PATH.
EOF
>&2
exit 1
;;
esac
done
# set these here for convenience
set -e
set -x
|
casmlab/quac
|
experiments/2014_CSCW_Tweetlocating/tests/parseargs.sh
|
Shell
|
apache-2.0
| 2,003 |
#!/bin/bash
set -e
set -u
get_asm() {
grep tsan_$1.: -A 10000 libtsan.objdump | \
awk "/[^:]$/ {print;} />:/ {c++; if (c == 2) {exit}}"
}
list="write1 \
write2 \
write4 \
write8 \
read1 \
read2 \
read4 \
read8 \
func_entry \
func_exit"
BIN=`dirname $0`/tsan_test
objdump -d $BIN > libtsan.objdump
nm -S $BIN | grep "__tsan_" > libtsan.nm
for f in $list; do
file=asm_$f.s
get_asm $f > $file
tot=$(wc -l < $file)
size=$(grep $f$ libtsan.nm | awk --non-decimal-data '{print ("0x"$2)+0}')
rsp=$(grep '(%rsp)' $file | wc -l)
push=$(grep 'push' $file | wc -l)
pop=$(grep 'pop' $file | wc -l)
call=$(grep 'call' $file | wc -l)
load=$(egrep 'mov .*\,.*\(.*\)|cmp .*\,.*\(.*\)' $file | wc -l)
store=$(egrep 'mov .*\(.*\),' $file | wc -l)
mov=$(grep 'mov' $file | wc -l)
lea=$(grep 'lea' $file | wc -l)
sh=$(grep 'shr\|shl' $file | wc -l)
cmp=$(grep 'cmp\|test' $file | wc -l)
printf "%10s tot %3d; size %4d; rsp %d; push %d; pop %d; call %d; load %2d; store %2d; sh %3d; mov %3d; lea %3d; cmp %3d\n" \
$f $tot $size $rsp $push $pop $call $load $store $sh $mov $lea $cmp;
done
|
jeltz/rust-debian-package
|
src/llvm/projects/compiler-rt/lib/tsan/analyze_libtsan.sh
|
Shell
|
apache-2.0
| 1,167 |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
STI_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${STI_ROOT}/hack/common.sh"
# Go to the top of the tree.
cd "${STI_ROOT}"
sti::build::setup_env
find_test_dirs() {
cd "${STI_ROOT}"
find . -not \( \
\( \
-wholename './Godeps' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename '*/Godeps/*' \
-o -wholename '*/_output/*' \
-o -wholename './.git' \
\) -prune \
\) -name '*_test.go' -print0 | xargs -0n1 dirname | sort -u | xargs -n1 printf "${STI_GO_PACKAGE}/%s\n"
}
# there is currently a race in the coverage code in tip. Remove this when it is fixed
# see https://code.google.com/p/go/issues/detail?id=8630 for details.
if [ "${TRAVIS_GO_VERSION-}" == "tip" ]; then
STI_COVER=""
else
# -covermode=atomic becomes default with -race in Go >=1.3
if [ -z ${STI_COVER+x} ]; then
STI_COVER="-cover -covermode=atomic"
fi
fi
STI_TIMEOUT=${STI_TIMEOUT:--timeout 30s}
if [ -z ${STI_RACE+x} ]; then
STI_RACE="-race"
fi
if [ "${1-}" != "" ]; then
test_packages="$STI_GO_PACKAGE/$1"
else
test_packages=`find_test_dirs`
fi
OUTPUT_COVERAGE=${OUTPUT_COVERAGE:-""}
if [[ -n "${STI_COVER}" && -n "${OUTPUT_COVERAGE}" ]]; then
# Iterate over packages to run coverage
test_packages=( $test_packages )
for test_package in "${test_packages[@]}"
do
mkdir -p "$OUTPUT_COVERAGE/$test_package"
STI_COVER_PROFILE="-coverprofile=$OUTPUT_COVERAGE/$test_package/profile.out"
go test $STI_RACE $STI_TIMEOUT $STI_COVER "$STI_COVER_PROFILE" "$test_package" "${@:2}"
if [ -f "${OUTPUT_COVERAGE}/$test_package/profile.out" ]; then
go tool cover "-html=${OUTPUT_COVERAGE}/$test_package/profile.out" -o "${OUTPUT_COVERAGE}/$test_package/coverage.html"
echo "coverage: ${OUTPUT_COVERAGE}/$test_package/coverage.html"
fi
done
else
go test $STI_RACE $STI_TIMEOUT $STI_COVER "${@:2}" $test_packages
fi
|
jhadvig/source-to-image
|
hack/test-go.sh
|
Shell
|
apache-2.0
| 1,981 |
#!/bin/bash
set -e
set -u
set -o pipefail
# For comparing dirs we need to make sure files are sorted in consistent order
export LC_COLLATE=C
script_full_path=$(readlink -f $0)
script_dir=$(dirname ${script_full_path})
pushd ${script_dir}/.. 1>/dev/null
outdir=${1:-$(mktemp -d)}
cluster_wide_common_files=$(find ./deploy/cluster-wide -type f -not -path ./deploy/cluster-wide/clusterrole.yaml -printf '%P ')
single_namespace_common_files=$(find ./deploy/single-namespace -type f -not -path ./deploy/single-namespace/role.yaml -printf '%P ')
specific_namespaces_common_files=$(find ./deploy/specific-namespaces -type f -not -path ./deploy/specific-namespaces/role.yaml -printf '%P ')
diff <(echo "${cluster_wide_common_files}") <(echo "${single_namespace_common_files}")
for file in ${cluster_wide_common_files}; do
diff deploy/cluster-wide/${file} deploy/single-namespace/${file} > ${outdir}/single_namespace-${file}.diff || [[ "$?" == 1 ]]
done
diff deploy/cluster-wide/clusterrole.yaml deploy/single-namespace/role.yaml > ${outdir}/single_namespace-roles.diff || [[ $? == 1 ]]
diff <(echo "${cluster_wide_common_files}") <(echo "${specific_namespaces_common_files}")
for file in ${cluster_wide_common_files}; do
diff deploy/cluster-wide/${file} deploy/specific-namespaces/${file} > ${outdir}/specific_namespaces-${file}.diff || [[ "$?" == 1 ]]
done
diff deploy/cluster-wide/clusterrole.yaml deploy/specific-namespaces/role.yaml > ${outdir}/specific_namespaces-roles.diff || [[ $? == 1 ]]
diff -r deploy/.diffs ${outdir}
|
tnozicka/openshift-acme
|
hack/diff-deploy-files.sh
|
Shell
|
apache-2.0
| 1,537 |
#!/bin/bash
if [ -d /data/scripts/extra-certs ]
then
echo "Importing extra certs from /data/scripts/extra-certs"
cd /data/scripts/extra-certs
for file in *
do
echo "Importing $file"
/usr/bin/keytool -importcert -file $file -alias $file -keystore /etc/pki/ca-trust/extracted/java/cacerts -storepass changeit -noprompt
done
echo "Import extra certs finished."
fi
exit 0
|
bmwcarit/joynr
|
docker/joynr-runtime-environment-base/scripts/docker/setup-extra-certs.sh
|
Shell
|
apache-2.0
| 408 |
#!/bin/bash
set -ex
trap 'echo -e "\033[1;31mSome unit tests have failed!\033[0m"' ERR
for file in $(cd neuralmonkey/tests && echo *.py); do
python3 -m neuralmonkey.tests.${file%.py}
done
|
bastings/neuralmonkey
|
tests/unit-tests_run.sh
|
Shell
|
bsd-3-clause
| 194 |
#!/bin/bash
set -e
if [[ $TRAVIS_OS_NAME == 'osx' ]]; then
which python
export DYLD_LIBRARY_PATH=/Users/travis/googletest-install/lib:$DYLD_LIBRARY_PATH
export TOMVIZ_TEST_PYTHON_EXECUTABLE=/usr/local/bin/python3
ctest -VV -S $TRAVIS_BUILD_DIR/cmake/TravisContinuous.cmake
else
cd $TRAVIS_BUILD_DIR
if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then export PY3="true"; else export PY2="true"; fi
if [ -n "${PY2}" ]; then ./scripts/travis/run_clang_format_diff.sh master $TRAVIS_COMMIT; fi
git checkout $TRAVIS_PULL_REQUEST_SHA
if [ -n "${PY3}" ]; then flake8 --config=flake8.cfg .; fi
cd "${TRAVIS_BUILD_DIR}/acquisition"
pip install --upgrade pip setuptools wheel
pip install --only-binary=numpy,scipy numpy scipy
pip install -r requirements-dev.txt
if [ -n "${PY2}" ]; then pytest -s; fi
# Skip FEI test for Python 3
if [ -n "${PY3}" ]; then pytest -s -k "not fei"; fi
fi
|
OpenChemistry/tomviz
|
scripts/travis/build.sh
|
Shell
|
bsd-3-clause
| 938 |
api_key=9775a026f1ca7d1c6c5af9d94d9595a4
app_key=87ce4a24b5553d2e482ea8a8500e71b8ad4554ff
curl -X POST -H "Content-type: application/json" \
-d '{
"graphs" : [{
"title": "Average Memory Free",
"definition": {
"events": [],
"requests": [
{"q": "avg:system.mem.free{*}"}
]
},
"viz": "timeseries"
}],
"title" : "Average Memory Free Shell",
"description" : "A dashboard with memory info.",
"template_variables": [{
"name": "host1",
"prefix": "host",
"default": "host:my-host"
}]
}' \
"https://app.datadoghq.com/api/v1/dash?api_key=${api_key}&application_key=${app_key}"
|
macobo/documentation
|
code_snippets/api-dashboard-create.sh
|
Shell
|
bsd-3-clause
| 738 |
#!/bin/sh
cp modules/fs_fat/fs_fat.kmod ramdisk/
dot_clean -m ramdisk/
./tool/mkramdisk ramdisk
|
tristanseifert/TSOS
|
build_ramdisk.sh
|
Shell
|
bsd-3-clause
| 97 |
#!/bin/bash
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Don't run this script standalone. Instead, run from the repository root:
# ./tools/run_tests/run_tests.py -l objc
set -ev
cd $(dirname $0)
BINDIR=../../../bins/$CONFIG
[ -f $BINDIR/interop_server ] || {
echo >&2 "Can't find the test server. Make sure run_tests.py is making" \
"interop_server before calling this script."
exit 1
}
[ -z "$(ps aux |egrep 'port_server\.py.*-p\s32766')" ] && {
echo >&2 "Can't find the port server. Start port server with tools/run_tests/start_port_server.py."
exit 1
}
PLAIN_PORT=$(curl localhost:32766/get)
TLS_PORT=$(curl localhost:32766/get)
$BINDIR/interop_server --port=$PLAIN_PORT --max_send_message_size=8388608 &
$BINDIR/interop_server --port=$TLS_PORT --max_send_message_size=8388608 --use_tls &
trap 'kill -9 `jobs -p` ; echo "EXIT TIME: $(date)"' EXIT
set -o pipefail
XCODEBUILD_FILTER='(^CompileC |^Ld |^ *[^ ]*clang |^ *cd |^ *export |^Libtool |^ *[^ ]*libtool |^CpHeader |^ *builtin-copy )'
if [ -z $PLATFORM ]; then
DESTINATION='name=iPhone 8'
elif [ $PLATFORM == ios ]; then
DESTINATION='name=iPhone 8'
elif [ $PLATFORM == macos ]; then
DESTINATION='platform=macOS'
fi
xcodebuild \
-workspace Tests.xcworkspace \
-scheme $SCHEME \
-destination "$DESTINATION" \
HOST_PORT_LOCALSSL=localhost:$TLS_PORT \
HOST_PORT_LOCAL=localhost:$PLAIN_PORT \
HOST_PORT_REMOTE=grpc-test.sandbox.googleapis.com \
test \
| egrep -v "$XCODEBUILD_FILTER" \
| egrep -v '^$' \
| egrep -v "(GPBDictionary|GPBArray)" -
|
endlessm/chromium-browser
|
third_party/grpc/src/src/objective-c/tests/run_one_test.sh
|
Shell
|
bsd-3-clause
| 2,114 |
#!/bin/bash
nosetests --with-noy $@
|
realestate-com-au/dashmat
|
test.sh
|
Shell
|
mit
| 36 |
#!/bin/bash
# set -x
set -e
# Script used to build Ubuntu base vagrant-lxc containers
#
# USAGE:
# $ cd boxes && sudo ./build-ubuntu-box.sh UBUNTU_RELEASE BOX_ARCH
#
# To enable Chef or any other configuration management tool pass '1' to the
# corresponding env var:
# $ CHEF=1 sudo -E ./build-ubuntu-box.sh UBUNTU_RELEASE BOX_ARCH
# $ PUPPET=1 sudo -E ./build-ubuntu-box.sh UBUNTU_RELEASE BOX_ARCH
# $ SALT=1 sudo -E ./build-ubuntu-box.sh UBUNTU_RELEASE BOX_ARCH
# $ BABUSHKA=1 sudo -E ./build-ubuntu-box.sh UBUNTU_RELEASE BOX_ARCH
##################################################################################
# 0 - Initial setup and sanity checks
TODAY=$(date -u +"%Y-%m-%d")
NOW=$(date -u)
RELEASE=${1:-"raring"}
ARCH=${2:-"amd64"}
PKG=vagrant-lxc-${RELEASE}-${ARCH}-${TODAY}.box
WORKING_DIR=/tmp/vagrant-lxc-${RELEASE}
VAGRANT_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key"
ROOTFS=/var/lib/lxc/${RELEASE}-base/rootfs
# Providing '1' will enable these tools
CHEF=${CHEF:-0}
PUPPET=${PUPPET:-0}
SALT=${SALT:-0}
BABUSHKA=${BABUSHKA:-0}
# Path to files bundled with the box
CWD=`readlink -f .`
LXC_TEMPLATE=${CWD}/common/lxc-template
LXC_CONF=${CWD}/common/lxc.conf
METATADA_JSON=${CWD}/common/metadata.json
# Set up a working dir
mkdir -p $WORKING_DIR
if [ -f "${WORKING_DIR}/${PKG}" ]; then
echo "Found a box on ${WORKING_DIR}/${PKG} already!"
exit 1
fi
##################################################################################
# 1 - Create the base container
if $(lxc-ls | grep -q "${RELEASE}-base"); then
echo "Base container already exists, please remove it with \`lxc-destroy -n ${RELEASE}-base\`!"
exit 1
else
lxc-create -n ${RELEASE}-base -t ubuntu -- --release ${RELEASE} --arch ${ARCH}
fi
# Fixes some networking issues
# See https://github.com/fgrehm/vagrant-lxc/issues/91 for more info
echo 'ff02::3 ip6-allhosts' >> ${ROOTFS}/etc/hosts
##################################################################################
# 2 - Prepare vagrant user
mv ${ROOTFS}/home/{ubuntu,vagrant}
chroot ${ROOTFS} usermod -l vagrant -d /home/vagrant ubuntu
chroot ${ROOTFS} groupmod -n vagrant ubuntu
echo -n 'vagrant:vagrant' | chroot ${ROOTFS} chpasswd
##################################################################################
# 3 - Setup SSH access and passwordless sudo
# Configure SSH access
mkdir -p ${ROOTFS}/home/vagrant/.ssh
echo $VAGRANT_KEY > ${ROOTFS}/home/vagrant/.ssh/authorized_keys
chroot ${ROOTFS} chown -R vagrant: /home/vagrant/.ssh
# Enable passwordless sudo for users under the "sudo" group
cp ${ROOTFS}/etc/sudoers{,.orig}
sed -i -e \
's/%sudo\s\+ALL=(ALL\(:ALL\)\?)\s\+ALL/%sudo ALL=NOPASSWD:ALL/g' \
${ROOTFS}/etc/sudoers
##################################################################################
# 4 - Add some goodies and update packages
PACKAGES=(vim curl wget man-db bash-completion)
chroot ${ROOTFS} apt-get install ${PACKAGES[*]} -y --force-yes
chroot ${ROOTFS} apt-get upgrade -y --force-yes
##################################################################################
# 5 - Configuration management tools
if [ $CHEF = 1 ]; then
./common/install-chef $ROOTFS
fi
if [ $PUPPET = 1 ]; then
./common/install-puppet $ROOTFS
fi
if [ $SALT = 1 ]; then
./common/install-salt $ROOTFS
fi
if [ $BABUSHKA = 1 ]; then
./common/install-babushka $ROOTFS
fi
##################################################################################
# 6 - Free up some disk space
rm -rf ${ROOTFS}/tmp/*
chroot ${ROOTFS} apt-get clean
##################################################################################
# 7 - Build box package
# Compress container's rootfs
cd $(dirname $ROOTFS)
tar --numeric-owner -czf /tmp/vagrant-lxc-${RELEASE}/rootfs.tar.gz ./rootfs/*
# Prepare package contents
cd $WORKING_DIR
cp $LXC_TEMPLATE .
cp $LXC_CONF .
cp $METATADA_JSON .
chmod +x lxc-template
sed -i "s/<TODAY>/${NOW}/" metadata.json
# Vagrant box!
tar -czf $PKG ./*
chmod +rw ${WORKING_DIR}/${PKG}
mkdir -p ${CWD}/output
mv ${WORKING_DIR}/${PKG} ${CWD}/output
# Clean up after ourselves
rm -rf ${WORKING_DIR}
lxc-destroy -n ${RELEASE}-base
echo "The base box was built successfully to ${CWD}/output/${PKG}"
|
fpletz/vagrant-lxc
|
boxes/build-ubuntu-box.sh
|
Shell
|
mit
| 4,592 |
#! /bin/sh
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Tests that we can recover from deleted headers generated by 'yacc -d'.
required='cc yacc'
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
AC_PROG_YACC
AC_OUTPUT
END
cat > Makefile.am <<'END'
bin_PROGRAMS = p1 p2 p3 p4
# The order in which files are listed in the p*_SOURCES variables
# below is significant, since it causes make failures whenever
# the proper definition of BUILT_SOURCES or the declaration of
# extra dependencies for 'main3.o' are removed.
p1_SOURCES = main1.c parse1.y
p2_SOURCES = main2.c parse2.y
p3_SOURCES = main3.c parse3.y parse3.h
p4_SOURCES = parse4.y
AM_YFLAGS = -d
p2_YFLAGS = -d
BUILT_SOURCES = parse1.h p2-parse2.h
# When we know which files include a yacc-generated header, we
# should be able to just declare dependencies directly instead
# of relying on the BUILT_SOURCES hack, and things should still
# work correctly.
main3.@OBJEXT@ parse3.@OBJEXT@: parse3.h
.PHONY: clean-p3 build-p3
build-p3: p3$(EXEEXT)
clean-p3:
rm -f p3$(EXEEXT)
END
cat > parse1.y << 'END'
%{
#include "parse1.h"
int yylex () { return 0; }
void yyerror (char *s) { return; }
%}
%token ZARDOZ
%%
x : 'x' {};
%%
END
cat > main1.c << 'END'
#include "parse1.h"
int main (void)
{
return ZARDOZ + yyparse ();
}
END
sed 's/"parse1\.h"/"p2-parse2.h"/' parse1.y > parse2.y
sed 's/"parse1\.h"/"p2-parse2.h"/' main1.c > main2.c
sed 's/"parse1\.h"/"parse3.h"/' parse1.y > parse3.y
sed 's/"parse1\.h"/"parse3.h"/' main1.c > main3.c
cat > parse4.y << 'END'
%{
int yylex () { return 0; }
void yyerror (char *s) { return; }
%}
%%
x : 'x' {};
%%
int main (void)
{
return 0;
}
END
$ACLOCAL
$AUTOCONF
$AUTOMAKE -a
./configure
$MAKE
headers='parse1.h p2-parse2.h parse3.h parse4.h'
# Check that we remake only the necessary headers.
rm -f $headers
$MAKE parse1.h
test -f parse1.h
test ! -e p2-parse2.h
test ! -e parse3.h
test ! -e parse4.h
rm -f $headers
$MAKE p2-parse2.h
test ! -e parse1.h
test -f p2-parse2.h
test ! -e parse3.h
test ! -e parse4.h
rm -f $headers
$MAKE parse3.h
test ! -e parse1.h
test ! -e p2-parse2.h
test -f parse3.h
test ! -e parse4.h
# Since we declared parse3.h into $(p3_SOURCES), make should be
# able to rebuild it automatically before remaking 'p3'.
rm -f $headers
$MAKE clean-p3
test ! -e parse3.h # Sanity check.
$MAKE build-p3
test -f parse3.h
$MAKE
rm -f $headers
$MAKE parse4.h
test ! -e parse1.h
test ! -e p2-parse2.h
test ! -e parse3.h
test -f parse4.h
# Now remake all the headers together.
rm -f $headers
$MAKE $headers
test -f parse1.h
test -f p2-parse2.h
test -f parse3.h
test -f parse4.h
# Most headers should be remade by "make all".
rm -f $headers
$MAKE all
test -f parse1.h
test -f p2-parse2.h
test -f parse3.h
# parse4.h is not declared in any *_SOURCES variable, nor #included
# by any C source file, so it shouldn't be rebuilt by "make all".
test ! -e parse4.h
:
|
kuym/openocd
|
tools/automake-1.15/t/yacc-deleted-headers.sh
|
Shell
|
gpl-2.0
| 3,527 |
#!/bin/sh
#
# Copyright (c) Authors: http://www.armbian.com/authors
#
# This file is licensed under the terms of the GNU General Public
# License version 2. This program is licensed "as is" without any
# warranty of any kind, whether express or implied.
# only do this for interactive shells
if [ "$-" != "${-#*i}" ]; then
printf "\n"
if [ -f "/var/run/.reboot_required" ]; then
printf "[\e[0;91m Kernel was updated, please reboot\x1B[0m ]\n\n"
fi
fi
|
lipro-armbian/lib
|
packages/bsp/common/etc/profile.d/armbian-check-first-login-reboot.sh
|
Shell
|
gpl-2.0
| 456 |
#! /bin/sh
# This file has been automatically generated. DO NOT EDIT BY HAND!
. test-lib.sh
am_tap_implementation=perl
# In the spirit of VPATH, we prefer a test in the build tree
# over one in the source tree.
for dir in . "$am_top_srcdir"; do
if test -f "$dir/t/tap-planskip-whitespace.sh"; then
echo "$0: will source $dir/t/tap-planskip-whitespace.sh"
. "$dir/t/tap-planskip-whitespace.sh"; exit $?
fi
done
echo "$0: cannot find wrapped test 't/tap-planskip-whitespace.sh'" >&2
exit 99
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/tap-planskip-whitespace-w.sh
|
Shell
|
gpl-2.0
| 503 |
#!/usr/bin/env bash
# Usage:
# In a CMS or LMS container,
# from the directory /edx/app/edxapp/edx-platform, run:
# ./scripts/update-assets-dev.sh
#
# This file is an experimental re-implementation of the asset complation process
# defined by the pavelib.assets:update_assets task in
# https://github.com/edx/edx-platform/blob/master/pavelib/assets.py.
# As the script name implies, it is only suited to compile assets for usage
# in a development environment, NOT for production.
#
# It was written as part of the effort to move our dev tools off of Ansible and
# Paver, described here: https://github.com/edx/devstack/pull/866
# TODO: If the effort described above is abandoned, then this script should
# probably be deleted.
set -xeuo pipefail
# Compile assets for baked-in XBlocks that still use the old
# XModule asset pipeline.
# (reimplementing pavelib.assets:process_xmodule_assets)
# `xmodule_assets` complains if `DJANGO_SETTINGS_MODULE` is already set,
# so we set it to empty just for this one invocation.
DJANGO_SETTINGS_MODULE='' xmodule_assets common/static/xmodule
# Create JS and CSS vendor directories.
# (reimplementing pavelib.assets:process_npm_assets)
mkdir -p common/static/common/js/vendor
mkdir -p common/static/common/css/vendor
# Copy studio-frontend CSS and JS into vendor directory.
# (reimplementing pavelib.assets:process_npm_assets)
find node_modules/@edx/studio-frontend/dist -type f \( -name \*.css -o -name \*.css.map \) | \
xargs cp --target-directory=common/static/common/css/vendor
find node_modules/@edx/studio-frontend/dist -type f \! -name \*.css \! -name \*.css.map | \
xargs cp --target-directory=common/static/common/js/vendor
# Copy certain NPM JS into vedor directory.
# (reimplementing pavelib.assets:process_npm_assets)
cp -f --target-directory=common/static/common/js/vendor \
node_modules/backbone.paginator/lib/backbone.paginator.js \
node_modules/backbone/backbone.js \
node_modules/bootstrap/dist/js/bootstrap.bundle.js \
node_modules/hls.js/dist/hls.js \
node_modules/jquery-migrate/dist/jquery-migrate.js \
node_modules/jquery.scrollto/jquery.scrollTo.js \
node_modules/jquery/dist/jquery.js \
node_modules/moment-timezone/builds/moment-timezone-with-data.js \
node_modules/moment/min/moment-with-locales.js \
node_modules/picturefill/dist/picturefill.js \
node_modules/requirejs/require.js \
node_modules/underscore.string/dist/underscore.string.js \
node_modules/underscore/underscore.js \
node_modules/which-country/index.js \
node_modules/sinon/pkg/sinon.js \
node_modules/squirejs/src/Squire.js
# Run webpack.
# (reimplementing pavelib.assets:webpack)
NODE_ENV=development \
STATIC_ROOT_LMS=/edx/var/edxapp/staticfiles \
STATIC_ROOT_CMS=/edx/var/edxapp/staticfiles/studio \
JS_ENV_EXTRA_CONFIG="{}" \
$(npm bin)/webpack --config=webpack.dev.config.js
# Compile SASS for LMS and CMS.
# (reimplementing pavelib.assets:execute_compile_sass)
./manage.py lms compile_sass lms
./manage.py cms compile_sass cms
# Collect static assets for LMS and CMS.
# (reimplementing pavelib.assets:collect_assets)
./manage.py lms collectstatic --noinput \
--ignore "fixtures" \
--ignore "karma_*.js" \
--ignore "spec" \
--ignore "spec_helpers" \
--ignore "spec-helpers" \
--ignore "xmodule_js" \
--ignore "geoip" \
--ignore "sass"
./manage.py cms collectstatic --noinput \
--ignore "fixtures" \
--ignore "karma_*.js" \
--ignore "spec" \
--ignore "spec_helpers" \
--ignore "spec-helpers" \
--ignore "xmodule_js" \
--ignore "geoip" \
--ignore "sass"
|
eduNEXT/edx-platform
|
scripts/update-assets-dev.sh
|
Shell
|
agpl-3.0
| 3,547 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
cd /io
export ARROW_BUILD_DIR=/build/arrow
export EXAMPLE_BUILD_DIR=/build/example
echo
echo "=="
echo "== Building Arrow C++ library"
echo "=="
echo
./build_arrow.sh
echo
echo "=="
echo "== Building example project using Arrow C++ library"
echo "=="
echo
./build_example.sh
echo
echo "=="
echo "== Running example project"
echo "=="
echo
${EXAMPLE_BUILD_DIR}/arrow-example
|
apache/arrow
|
cpp/examples/minimal_build/run.sh
|
Shell
|
apache-2.0
| 1,195 |
#!/bin/bash
set -e
# Publish npm packages for selected locations
# NPM_AUTH_TOKEN must be set by the build environment
readonly GIT_VERSION=$(git describe --tags)
readonly PUBLISHED_NPM_PACKAGES=(
# Published Fontello is used by all builds, and is critical to publish
girder/web_client/fontello
# These lint configs are used by downstream plugins
girder/web_client/eslint-config
girder/web_client/pug-lint-config
# The raw JS source is used by some downstream 'external builds'
girder/web_client/src
# These plugins were published to support downstream external builds, and should be kept updated
plugins/jobs/girder_jobs/web_client
plugins/oauth/girder_oauth/web_client
plugins/gravatar/girder_gravatar/web_client
)
for directory in "${PUBLISHED_NPM_PACKAGES[@]}"; do
pushd "$directory"
# Trying to set the auth token via 'npm_config_' environment variables does not work
echo '//registry.npmjs.org/:_authToken=${NPM_AUTH_TOKEN}' > ./.npmrc
npm version --allow-same-version --no-git-tag-version "$GIT_VERSION"
npm publish --access public
rm --interactive=never ./.npmrc
popd
done
|
Kitware/girder
|
.circleci/publish_npm.sh
|
Shell
|
apache-2.0
| 1,118 |
filter_first () {
head -n 1 || return 0
}
filter_not_first () {
sed '1d' || return 0
}
filter_last () {
tail -n 1 || return 0
}
filter_not_last () {
sed '$d' || return 0
}
filter_matching () {
local pattern
expect_args pattern -- "$@"
awk '/'"${pattern//\//\\/}"'/ { print }' || return 0
}
filter_not_matching () {
local pattern
expect_args pattern -- "$@"
awk '!/'"${pattern//\//\\/}"'/ { print }' || return 0
}
match_at_most_one () {
awk ' NR == 1 { line = $0 "\n" }
NR == 2 { line = ""; exit 1 }
END { printf line }' || return 1
}
match_at_least_one () {
grep '.' || return 1
}
match_exactly_one () {
match_at_most_one | match_at_least_one || return 1
}
strip_trailing_newline () {
awk 'NR > 1 { printf "\n" } { printf "%s", $0 }' || return 0
}
|
haskell-infra/athena
|
bin/bashmenot/src/line.sh
|
Shell
|
apache-2.0
| 786 |
#!/bin/bash
SCRIPT_DIR=`dirname $0`
SCRIPT_NAME=`basename $0`
CURRENT_DIR=`pwd`
BUILD_COMMON=${BUILD_COMMON:=NOPE}
unset NETTOSPHERE_OPTS
export NETTOSPHERE_OPTS="-Dlogback.configurationFile=conf/logback.xml "
JAVA_DEBUG_OPTIONS="-Xdebug -Xrunjdwp:transport=dt_socket,address=9009,server=y,suspend=n "
JAVA_CONFIG_OPTIONS="-Xms4096m -Xmx4096m -XX:NewSize=256m -XX:MaxNewSize=256m -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:PermSize=256m -XX:MaxPermSize=256m"
export JAVA_OPTS="-Duser.timezone=GMT ${JAVA_CONFIG_OPTIONS} ${JAVA_DEBUG_OPTIONS} "
echo "Starting NettoSphere:"
PARAMETERS="-classpath lib/*: org.nettosphere.samples.chat.NettosphereJerseyChat"
COMMAND="java -server ${NETTOSPHERE_OPTS} ${JAVA_OPTS} ${DEV_OPTS} ${PARAMETERS}"
echo $COMMAND
$COMMAND
|
pjvds/atmosphere-samples
|
nettosphere-samples/jersey-chat/src/main/scripts/nettosphere.sh
|
Shell
|
apache-2.0
| 765 |
# Examples to generate some SQL to start defining tables
. tables-common.sh
default_table account
default_table agent_group
default_table zone
start agent
string uri
bool managed_config 1
ref agent_group
ref zone
end agent
start credential
string public_value 4096
string secret_value 4096
end credential
start host
string uri
bigint compute_free
bigint compute_total
ref agent
ref zone
index host compute_free
end host
start image
string url
bool is_public
bigint physical_size_mb
bigint virtual_size_mb
string checksum
string format
end image
start offering
bool is_public
end offering
start instance
string allocation_state
bigint compute
bigint memory_mb
ref image
ref offering
string hostname
ref zone
end instance
start storage_pool
bigint physical_total_size_mb
bigint virtual_total_size_mb
bool external
ref agent
ref zone
end storage_pool
start volume
bigint physical_size_mb
bigint virtual_size_mb
int device_number
string format
string allocation_state
string attached_state
ref instance
ref image
ref offering
ref zone
end volume
map instance host
map image storage_pool
map storage_pool host
map volume storage_pool
|
cjellick/cattle
|
scripts/mysql/tables.sh
|
Shell
|
apache-2.0
| 1,141 |
#!/bin/sh
bosh delete-env \
${BBL_STATE_DIR}/bosh-deployment/bosh.yml \
--state ${BBL_STATE_DIR}/vars/bosh-state.json \
--vars-store ${BBL_STATE_DIR}/vars/director-vars-store.yml \
--vars-file ${BBL_STATE_DIR}/vars/director-vars-file.yml \
-o ${BBL_STATE_DIR}/bosh-deployment/gcp/cpi.yml \
-o ${BBL_STATE_DIR}/bosh-deployment/jumpbox-user.yml \
-o ${BBL_STATE_DIR}/bosh-deployment/uaa.yml \
-o ${BBL_STATE_DIR}/bosh-deployment/credhub.yml \
-o ${BBL_STATE_DIR}/bbl-ops-files/gcp/bosh-director-ephemeral-ip-ops.yml \
--var-file gcp_credentials_json="${BBL_GCP_SERVICE_ACCOUNT_KEY_PATH}" \
-v project_id="${BBL_GCP_PROJECT_ID}" \
-v zone="${BBL_GCP_ZONE}"
|
pivotal-cf-experimental/bosh-bootloader
|
storage/fixtures/upped/delete-director.sh
|
Shell
|
apache-2.0
| 690 |
#!/usr/bin/env sh
# test_net_seg.bin test_proto pre_train_model label.txt outputfolder [CPU/GPU]
ROOTFILE=/nfs/hn46/xiaolonw/cnncode/caffe-3dnormal_r_n
GLOG_logtostderr=1 $ROOTFILE/build/tools/showParameters.bin /nfs/hn46/xiaolonw/cnncode/caffe-3dnormal_r_n/prototxt/3dnormal_win_cls_denoise_fc2/seg_test_2fc_3dnormal.prototxt /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window/models/3dnormal_win_cls_denoise_fc2/3dnormal__iter_280000
|
xiaolonw/caffe-3dnormal_joint_past
|
scripts/3dnormal_r_denoise/showPara.sh
|
Shell
|
bsd-2-clause
| 553 |
#!/bin/sh
GPERF=gperf
TMP0=gperf0.tmp
TMP1=gperf1.tmp
TMP2=gperf2.tmp
TMP3=gperf3.tmp
GPERF_OPT='-n -C -T -c -t -j1 -L ANSI-C '
./make_unicode_fold_data.py > unicode_fold_data.c
${GPERF} ${GPERF_OPT} -F,-1,0 -N onigenc_unicode_unfold_key unicode_unfold_key.gperf > ${TMP0}
./gperf_unfold_key_conv.py < ${TMP0} > unicode_unfold_key.c
${GPERF} ${GPERF_OPT} -F,-1 -N onigenc_unicode_fold1_key unicode_fold1_key.gperf > ${TMP1}
./gperf_fold_key_conv.py 1 < ${TMP1} > unicode_fold1_key.c
${GPERF} ${GPERF_OPT} -F,-1 -N onigenc_unicode_fold2_key unicode_fold2_key.gperf > ${TMP2}
./gperf_fold_key_conv.py 2 < ${TMP2} > unicode_fold2_key.c
${GPERF} ${GPERF_OPT} -F,-1 -N onigenc_unicode_fold3_key unicode_fold3_key.gperf > ${TMP3}
./gperf_fold_key_conv.py 3 < ${TMP3} > unicode_fold3_key.c
# remove redundant EOLs before EOF
perl -i -pe 'BEGIN{undef $/}s/\n\n*\z/\n/;' unicode_fold_data.c
perl -i -pe 'BEGIN{undef $/}s/\n\n*\z/\n/;' unicode_fold1_key.c
perl -i -pe 'BEGIN{undef $/}s/\n\n*\z/\n/;' unicode_fold2_key.c
perl -i -pe 'BEGIN{undef $/}s/\n\n*\z/\n/;' unicode_fold3_key.c
perl -i -pe 'BEGIN{undef $/}s/\n\n*\z/\n/;' unicode_unfold_key.c
rm -f ${TMP0} ${TMP1} ${TMP2} ${TMP3}
rm -f unicode_unfold_key.gperf unicode_fold1_key.gperf unicode_fold2_key.gperf unicode_fold3_key.gperf
exit 0
|
ProsoftEngineering/core
|
prosoft/core/modules/regex/vendor/onig/onig/src/make_unicode_fold.sh
|
Shell
|
bsd-3-clause
| 1,298 |
#!/bin/bash
FN="rRDPData_1.6.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/experiment/src/contrib/rRDPData_1.6.0.tar.gz"
"https://bioarchive.galaxyproject.org/rRDPData_1.6.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-rrdpdata/bioconductor-rrdpdata_1.6.0_src_all.tar.gz"
)
MD5="3cc6fb73d1fe2cb6a1eee44269a87d57"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-rrdpdata/post-link.sh
|
Shell
|
mit
| 1,288 |
#!/bin/bash
FN="lumiRatAll.db_1.22.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/lumiRatAll.db_1.22.0.tar.gz"
"https://bioarchive.galaxyproject.org/lumiRatAll.db_1.22.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-lumiratall.db/bioconductor-lumiratall.db_1.22.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-lumiratall.db/bioconductor-lumiratall.db_1.22.0_src_all.tar.gz"
)
MD5="65027624574d5e33f18b0a54a54f4be9"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-lumiratall.db/post-link.sh
|
Shell
|
mit
| 1,438 |
#!/bin/sh
export VERSION=`cat deployment/VERSION_MAJ_MIN`.$CIRCLE_BUILD_NUM
echo $VERSION > VERSION
python setup.py sdist
|
dhenderson1/MyFlaskApp
|
deployment/circleci_build.sh
|
Shell
|
mit
| 124 |
#!/usr/bin/env bash
export ROFI_PNG_OUTPUT=out.png
rofi -show run &
RPID=$!
# send enter.
sleep 5;
xdotool key 't'
sleep 0.4
xdotool key 'r'
sleep 0.4
xdotool key 'u'
sleep 0.4
xdotool key Alt+Shift+s
sleep 0.4
xdotool key Return
# Get result, kill xvfb
wait ${RPID}
RETV=$?
if [ ! -f out.png ]
then
echo "Failed to create screenshot"
exit 1
fi
exit ${RETV}
|
jasperla/rofi
|
test/run_screenshot_test.sh
|
Shell
|
mit
| 374 |
#! /bin/sh
# $Id: genconfig.sh,v 1.39 2010/09/21 15:36:12 nanard Exp $
# miniupnp daemon
# http://miniupnp.free.fr or http://miniupnp.tuxfamily.org/
# (c) 2006-2010 Thomas Bernard
# This software is subject to the conditions detailed in the
# LICENCE file provided within the distribution
RM="rm -f"
CONFIGFILE="config.h"
CONFIGMACRO="__CONFIG_H__"
# version reported in XML descriptions
#UPNP_VERSION=20070827
UPNP_VERSION=`date +"%Y%m%d"`
# Facility to syslog
LOG_MINIUPNPD="LOG_DAEMON"
# detecting the OS name and version
OS_NAME=`uname -s`
OS_VERSION=`uname -r`
# pfSense special case
if [ -f /etc/platform ]; then
if [ `cat /etc/platform` = "pfSense" ]; then
OS_NAME=pfSense
OS_VERSION=`cat /etc/version`
fi
fi
${RM} ${CONFIGFILE}
echo "/* MiniUPnP Project" >> ${CONFIGFILE}
echo " * http://miniupnp.free.fr/ or http://miniupnp.tuxfamily.org/" >> ${CONFIGFILE}
echo " * (c) 2006-2010 Thomas Bernard" >> ${CONFIGFILE}
echo " * generated by $0 on `date` */" >> ${CONFIGFILE}
echo "#ifndef $CONFIGMACRO" >> ${CONFIGFILE}
echo "#define $CONFIGMACRO" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "#include <inttypes.h>" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "#define UPNP_VERSION \"$UPNP_VERSION\"" >> ${CONFIGFILE}
# OS Specific stuff
case $OS_NAME in
OpenBSD)
MAJORVER=`echo $OS_VERSION | cut -d. -f1`
MINORVER=`echo $OS_VERSION | cut -d. -f2`
#echo "OpenBSD majorversion=$MAJORVER minorversion=$MINORVER"
# rtableid was introduced in OpenBSD 4.0
if [ $MAJORVER -ge 4 ]; then
echo "#define PFRULE_HAS_RTABLEID" >> ${CONFIGFILE}
fi
# from the 3.8 version, packets and bytes counters are double : in/out
if [ \( $MAJORVER -ge 4 \) -o \( $MAJORVER -eq 3 -a $MINORVER -ge 8 \) ]; then
echo "#define PFRULE_INOUT_COUNTS" >> ${CONFIGFILE}
fi
# from the 4.7 version, new pf
if [ \( $MAJORVER -ge 5 \) -o \( $MAJORVER -eq 4 -a $MINORVER -ge 7 \) ]; then
echo "#define PF_NEWSTYLE" >> ${CONFIGFILE}
fi
echo "#define USE_PF 1" >> ${CONFIGFILE}
FW=pf
OS_URL=http://www.openbsd.org/
;;
FreeBSD)
VER=`grep '#define __FreeBSD_version' /usr/include/sys/param.h | awk '{print $3}'`
if [ $VER -ge 700049 ]; then
echo "#define PFRULE_INOUT_COUNTS" >> ${CONFIGFILE}
fi
# new way to see which one to use PF or IPF.
# see http://miniupnp.tuxfamily.org/forum/viewtopic.php?p=957
# source file with handy subroutines like checkyesno
. /etc/rc.subr
# source config file so we can probe vars
. /etc/rc.conf
if checkyesno ipfilter_enable; then
echo "Using ipf"
FW=ipf
echo "#define USE_IPF 1" >> ${CONFIGFILE}
elif checkyesno pf_enable; then
echo "Using pf"
FW=pf
echo "#define USE_PF 1" >> ${CONFIGFILE}
# TODO : Add support for IPFW
# echo "#define USE_IPFW 1" >> ${CONFIGFILE}
# FW=ipfw
else
echo "Could not detect usage of ipf or pf. Compiling for pf by default"
FW=pf
echo "#define USE_PF 1" >> ${CONFIGFILE}
fi
OS_URL=http://www.freebsd.org/
;;
pfSense)
# we need to detect if PFRULE_INOUT_COUNTS macro is needed
echo "#define USE_PF 1" >> ${CONFIGFILE}
FW=pf
OS_URL=http://www.pfsense.com/
;;
NetBSD)
# source file with handy subroutines like checkyesno
. /etc/rc.subr
# source config file so we can probe vars
. /etc/rc.conf
if checkyesno pf; then
echo "#define USE_PF 1" >> ${CONFIGFILE}
FW=pf
elif checkyesno ipfilter; then
echo "#define USE_IPF 1" >> ${CONFIGFILE}
FW=ipf
else
echo "Could not detect ipf nor pf, defaulting to pf."
echo "#define USE_PF 1" >> ${CONFIGFILE}
FW=pf
fi
OS_URL=http://www.netbsd.org/
;;
DragonFly)
# source file with handy subroutines like checkyesno
. /etc/rc.subr
# source config file so we can probe vars
. /etc/rc.conf
if checkyesno pf; then
echo "#define USE_PF 1" >> ${CONFIGFILE}
FW=pf
elif checkyesno ipfilter; then
echo "#define USE_IPF 1" >> ${CONFIGFILE}
FW=ipf
else
echo "Could not detect ipf nor pf, defaulting to pf."
echo "#define USE_PF 1" >> ${CONFIGFILE}
FW=pf
fi
echo "#define USE_PF 1" >> ${CONFIGFILE}
OS_URL=http://www.dragonflybsd.org/
;;
SunOS)
echo "#define USE_IPF 1" >> ${CONFIGFILE}
FW=ipf
echo "#define LOG_PERROR 0" >> ${CONFIGFILE}
echo "#define SOLARIS_KSTATS 1" >> ${CONFIGFILE}
OS_URL=http://www.sun.com/solaris/
;;
Linux)
OS_URL=http://www.kernel.org/
KERNVERA=`echo $OS_VERSION | awk -F. '{print $1}'`
KERNVERB=`echo $OS_VERSION | awk -F. '{print $2}'`
KERNVERC=`echo $OS_VERSION | awk -F. '{print $3}'`
KERNVERD=`echo $OS_VERSION | awk -F. '{print $4}'`
#echo "$KERNVERA.$KERNVERB.$KERNVERC.$KERNVERD"
# Debian GNU/Linux special case
if [ -f /etc/debian_version ]; then
OS_NAME=Debian
OS_VERSION=`cat /etc/debian_version`
OS_URL=http://www.debian.org/
fi
# use lsb_release (Linux Standard Base) when available
LSB_RELEASE=`which lsb_release`
if [ 0 -eq $? ]; then
OS_NAME=`${LSB_RELEASE} -i -s`
OS_VERSION=`${LSB_RELEASE} -r -s`
case $OS_NAME in
Debian)
OS_URL=http://www.debian.org/
OS_VERSION=`${LSB_RELEASE} -c -s`
;;
Ubuntu)
OS_URL=http://www.ubuntu.com/
OS_VERSION=`${LSB_RELEASE} -c -s`
;;
esac
fi
echo "#define USE_NETFILTER 1" >> ${CONFIGFILE}
FW=netfilter
;;
Darwin)
echo "#define USE_IPFW 1" >> ${CONFIGFILE}
FW=ipfw
OS_URL=http://developer.apple.com/macosx
;;
*)
echo "Unknown OS : $OS_NAME"
echo "Please contact the author at http://miniupnp.free.fr/ or http://miniupnp.tuxfamily.org/."
exit 1
;;
esac
echo "Configuring compilation for [$OS_NAME] [$OS_VERSION] with [$FW] firewall software."
echo "Please edit config.h for more compilation options."
echo "#define OS_NAME \"$OS_NAME\"" >> ${CONFIGFILE}
echo "#define OS_VERSION \"$OS_NAME/$OS_VERSION\"" >> ${CONFIGFILE}
echo "#define OS_URL \"${OS_URL}\"" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* syslog facility to be used by miniupnpd */" >> ${CONFIGFILE}
echo "#define LOG_MINIUPNPD ${LOG_MINIUPNPD}" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Uncomment the following line to allow miniupnpd to be" >> ${CONFIGFILE}
echo " * controlled by miniupnpdctl */" >> ${CONFIGFILE}
echo "/*#define USE_MINIUPNPDCTL*/" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Comment the following line to disable NAT-PMP operations */" >> ${CONFIGFILE}
echo "#define ENABLE_NATPMP" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Uncomment the following line to enable generation of" >> ${CONFIGFILE}
echo " * filter rules with pf */" >> ${CONFIGFILE}
echo "/*#define PF_ENABLE_FILTER_RULES*/">> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Uncomment the following line to enable caching of results of" >> ${CONFIGFILE}
echo " * the getifstats() function */" >> ${CONFIGFILE}
echo "/*#define ENABLE_GETIFSTATS_CACHING*/" >> ${CONFIGFILE}
echo "/* The cache duration is indicated in seconds */" >> ${CONFIGFILE}
echo "#define GETIFSTATS_CACHING_DURATION 2" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Uncomment the following line to enable multiple external ip support */" >> ${CONFIGFILE}
echo "/* note : That is EXPERIMENTAL, do not use that unless you know perfectly what you are doing */" >> ${CONFIGFILE}
echo "/* Dynamic external ip adresses are not supported when this option is enabled." >> ${CONFIGFILE}
echo " * Also note that you would need to configure your .conf file accordingly. */" >> ${CONFIGFILE}
echo "/*#define MULTIPLE_EXTERNAL_IP*/" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Comment the following line to use home made daemonize() func instead" >> ${CONFIGFILE}
echo " * of BSD daemon() */" >> ${CONFIGFILE}
echo "#define USE_DAEMON" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Uncomment the following line to enable lease file support */" >> ${CONFIGFILE}
echo "/*#define ENABLE_LEASEFILE*/" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Define one or none of the two following macros in order to make some" >> ${CONFIGFILE}
echo " * clients happy. It will change the XML Root Description of the IGD." >> ${CONFIGFILE}
echo " * Enabling the Layer3Forwarding Service seems to be the more compatible" >> ${CONFIGFILE}
echo " * option. */" >> ${CONFIGFILE}
echo "/*#define HAS_DUMMY_SERVICE*/" >> ${CONFIGFILE}
echo "#define ENABLE_L3F_SERVICE" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Experimental UPnP Events support. */" >> ${CONFIGFILE}
echo "/*#define ENABLE_EVENTS*/" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* include interface name in pf and ipf rules */" >> ${CONFIGFILE}
echo "#define USE_IFNAME_IN_RULES" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "/* Experimental NFQUEUE support. */" >> ${CONFIGFILE}
echo "/*#define ENABLE_NFQUEUE*/" >> ${CONFIGFILE}
echo "" >> ${CONFIGFILE}
echo "#endif" >> ${CONFIGFILE}
exit 0
|
smx-smx/dsl-n55u-bender
|
release/src/router/miniupnpd/genconfig.sh
|
Shell
|
gpl-2.0
| 8,797 |
#!/bin/sh
# dns based ad/abuse domain blocking
# Copyright (c) 2015-2020 Dirk Brenken ([email protected])
# This is free software, licensed under the GNU General Public License v3.
# set (s)hellcheck exceptions
# shellcheck disable=1091,2016,2039,2059,2086,2143,2181,2188
# set initial defaults
#
export LC_ALL=C
export PATH="/usr/sbin:/usr/bin:/sbin:/bin"
set -o pipefail
adb_ver="4.0.6"
adb_enabled=0
adb_debug=0
adb_forcedns=0
adb_maxqueue=4
adb_dnsfilereset=0
adb_dnsflush=0
adb_dnstimeout=20
adb_safesearch=0
adb_safesearchlist=""
adb_safesearchmod=0
adb_report=0
adb_trigger=""
adb_triggerdelay=0
adb_backup=1
adb_mail=0
adb_mailcnt=0
adb_jail=0
adb_dns=""
adb_dnsprefix="adb_list"
adb_locallist="blacklist whitelist"
adb_tmpbase="/tmp"
adb_backupdir="/tmp"
adb_reportdir="/tmp"
adb_jaildir="/tmp"
adb_pidfile="/var/run/adblock.pid"
adb_blacklist="/etc/adblock/adblock.blacklist"
adb_whitelist="/etc/adblock/adblock.whitelist"
adb_ubusservice="/etc/adblock/adblock.monitor"
adb_mailservice="/etc/adblock/adblock.mail"
adb_dnsfile="${adb_dnsprefix}.overall"
adb_dnsjail="${adb_dnsprefix}.jail"
adb_srcarc="/etc/adblock/adblock.sources.gz"
adb_srcfile="${adb_tmpbase}/adb_sources.json"
adb_rtfile="${adb_tmpbase}/adb_runtime.json"
adb_loggercmd="$(command -v logger)"
adb_dumpcmd="$(command -v tcpdump)"
adb_lookupcmd="$(command -v nslookup)"
adb_fetchutil=""
adb_portlist="53 853 5353"
adb_repiface=""
adb_replisten="53"
adb_repchunkcnt="5"
adb_repchunksize="1"
adb_lookupdomain="example.com"
adb_action="${1:-"start"}"
adb_packages=""
adb_sources=""
adb_cnt=""
# load & check adblock environment
#
f_load()
{
adb_sysver="$(ubus -S call system board 2>/dev/null | jsonfilter -e '@.model' -e '@.release.description' | \
"${adb_awk}" 'BEGIN{ORS=", "}{print $0}' | "${adb_awk}" '{print substr($0,1,length($0)-2)}')"
f_conf
if [ "${adb_action}" != "report" ]
then
f_dns
f_fetch
fi
if [ "${adb_enabled}" -eq 0 ]
then
f_extconf
f_temp
f_rmdns
f_bgserv "stop"
f_jsnup "disabled"
f_log "info" "adblock is currently disabled, please set the config option 'adb_enabled' to '1' to use this service"
exit 0
fi
}
# check & set environment
#
f_env()
{
adb_starttime="$(date "+%s")"
f_log "info" "adblock instance started ::: action: ${adb_action}, priority: ${adb_nice:-"0"}, pid: ${$}"
f_jsnup "running"
f_extconf
f_temp
if [ "${adb_dnsflush}" -eq 1 ]
then
printf "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}"
f_dnsup
fi
if [ ! -r "${adb_srcfile}" ]
then
if [ -r "${adb_srcarc}" ]
then
zcat "${adb_srcarc}" > "${adb_srcfile}"
else
f_log "err" "adblock source archive not found"
fi
fi
if [ -r "${adb_srcfile}" ]
then
json_load_file "${adb_srcfile}"
else
f_log "err" "adblock source file not found"
fi
}
# load adblock config
#
f_conf()
{
local cnt=0 cnt_max=10
if [ ! -r "/etc/config/adblock" ] || [ -n "$(uci -q show adblock.@source[0])" ]
then
if { [ -r "/etc/config/adblock-opkg" ] && [ -z "$(uci -q show adblock-opkg.@source[0])" ]; } || \
{ [ -r "/rom/etc/config/adblock" ] && [ -z "$(uci -q show /rom/etc/config/adblock.@source[0])" ]; }
then
if [ -r "/etc/config/adblock" ]
then
cp -pf "/etc/config/adblock" "/etc/config/adblock-backup"
fi
if [ -r "/etc/config/adblock-opkg" ]
then
cp -pf "/etc/config/adblock-opkg" "/etc/config/adblock"
elif [ -r "/rom/etc/config/adblock" ]
then
cp -pf "/rom/etc/config/adblock" "/etc/config/adblock"
fi
f_log "info" "missing or old adblock config replaced with new valid default config"
else
f_log "err" "unrecoverable adblock config error, please re-install the package via opkg with the '--force-reinstall --force-maintainer' options"
fi
fi
config_cb()
{
option_cb()
{
local option="${1}"
local value="${2}"
eval "${option}=\"${value}\""
}
list_cb()
{
local option="${1}"
local value="${2}"
if [ "${option}" = "adb_sources" ]
then
eval "${option}=\"$(printf "%s" "${adb_sources}") ${value}\""
elif [ "${option}" = "adb_safesearchlist" ]
then
eval "${option}=\"$(printf "%s" "${adb_safesearchlist}") ${value}\""
fi
}
}
config_load adblock
if [ -z "${adb_fetchutil}" ] || [ -z "${adb_dns}" ]
then
while [ -z "${adb_packages}" ] && [ "${cnt}" -le "${cnt_max}" ]
do
adb_packages="$(opkg list-installed 2>/dev/null)"
cnt=$((cnt+1))
sleep 1
done
if [ -z "${adb_packages}" ]
then
f_log "err" "local opkg package repository is not available, please set 'adb_fetchutil' and 'adb_dns' manually"
fi
fi
}
# load dns backend config
#
f_dns()
{
local util utils dns_up cnt=0
if [ -z "${adb_dns}" ]
then
utils="knot-resolver named unbound dnsmasq raw"
for util in ${utils}
do
if [ "${util}" = "raw" ] || [ -n "$(printf "%s" "${adb_packages}" | grep "^${util}")" ]
then
if [ "${util}" = "knot-resolver" ]
then
util="kresd"
fi
if [ "${util}" = "raw" ] || [ -x "$(command -v "${util}")" ]
then
adb_dns="${util}"
uci_set adblock global adb_dns "${util}"
f_uci "adblock"
break
fi
fi
done
elif [ "${adb_dns}" != "raw" ] && [ ! -x "$(command -v "${adb_dns}")" ]
then
unset adb_dns
fi
if [ -n "${adb_dns}" ]
then
case "${adb_dns}" in
"dnsmasq")
adb_dnsinotify="${adb_dnsinotify:-"0"}"
adb_dnsinstance="${adb_dnsinstance:-"0"}"
adb_dnsuser="${adb_dnsuser:-"dnsmasq"}"
adb_dnsdir="${adb_dnsdir:-"/tmp/dnsmasq.d"}"
adb_dnsheader="${adb_dnsheader}"
adb_dnsdeny="${adb_dnsdeny:-"${adb_awk} '{print \"address=/\"\$0\"/\"}'"}"
adb_dnsallow="${adb_dnsallow:-"${adb_awk} '{print \"local=/\"\$0\"/#\"}'"}"
adb_dnssafesearch="${adb_dnssafesearch:-"${adb_awk} -v item=\"\$item\" '{print \"address=/\"\$0\"/\"item\"\"}'"}"
adb_dnsstop="${adb_dnsstop:-"address=/#/"}"
;;
"unbound")
adb_dnsinotify="${adb_dnsinotify:-"0"}"
adb_dnsinstance="${adb_dnsinstance:-"0"}"
adb_dnsuser="${adb_dnsuser:-"unbound"}"
adb_dnsdir="${adb_dnsdir:-"/var/lib/unbound"}"
adb_dnsheader="${adb_dnsheader}"
adb_dnsdeny="${adb_dnsdeny:-"${adb_awk} '{print \"local-zone: \\042\"\$0\"\\042 static\"}'"}"
adb_dnsallow="${adb_dnsallow:-"${adb_awk} '{print \"local-zone: \\042\"\$0\"\\042 transparent\"}'"}"
adb_dnssafesearch="${adb_dnssafesearch:-"${adb_awk} -v item=\"\$item\" '{type=\"AAAA\";if(match(item,/^([0-9]{1,3}\.){3}[0-9]{1,3}$/)){type=\"A\"}}{print \"local-data: \\042\"\$0\" \"type\" \"item\"\\042\"}'"}"
adb_dnsstop="${adb_dnsstop:-"local-zone: \".\" static"}"
;;
"named")
adb_dnsinotify="${adb_dnsinotify:-"0"}"
adb_dnsinstance="${adb_dnsinstance:-"0"}"
adb_dnsuser="${adb_dnsuser:-"bind"}"
adb_dnsdir="${adb_dnsdir:-"/var/lib/bind"}"
adb_dnsheader="${adb_dnsheader:-"\$TTL 2h\n@ IN SOA localhost. root.localhost. (1 6h 1h 1w 2h)\n IN NS localhost.\n"}"
adb_dnsdeny="${adb_dnsdeny:-"${adb_awk} '{print \"\"\$0\" CNAME .\\n*.\"\$0\" CNAME .\"}'"}"
adb_dnsallow="${adb_dnsallow:-"${adb_awk} '{print \"\"\$0\" CNAME rpz-passthru.\\n*.\"\$0\" CNAME rpz-passthru.\"}'"}"
adb_dnssafesearch="${adb_dnssafesearch:-"${adb_awk} -v item=\"\$item\" '{print \"\"\$0\" CNAME \"item\".\\n*.\"\$0\" CNAME \"item\".\"}'"}"
adb_dnsstop="${adb_dnsstop:-"* CNAME ."}"
;;
"kresd")
adb_dnsinotify="${adb_dnsinotify:-"0"}"
adb_dnsinstance="${adb_dnsinstance:-"0"}"
adb_dnsuser="${adb_dnsuser:-"root"}"
adb_dnsdir="${adb_dnsdir:-"/etc/kresd"}"
adb_dnsheader="${adb_dnsheader:-"\$TTL 2h\n@ IN SOA localhost. root.localhost. (1 6h 1h 1w 2h)\n"}"
adb_dnsdeny="${adb_dnsdeny:-"${adb_awk} '{print \"\"\$0\" CNAME .\\n*.\"\$0\" CNAME .\"}'"}"
adb_dnsallow="${adb_dnsallow:-"${adb_awk} '{print \"\"\$0\" CNAME rpz-passthru.\\n*.\"\$0\" CNAME rpz-passthru.\"}'"}"
adb_dnssafesearch="${adb_dnssafesearch:-"${adb_awk} -v item=\"\$item\" '{type=\"AAAA\";if(match(item,/^([0-9]{1,3}\.){3}[0-9]{1,3}$/)){type=\"A\"}}{print \"\"\$0\" \"type\" \"item\"\"}'"}"
adb_dnsstop="${adb_dnsstop:-"* CNAME ."}"
;;
"raw")
adb_dnsinotify="${adb_dnsinotify:-"0"}"
adb_dnsinstance="${adb_dnsinstance:-"0"}"
adb_dnsuser="${adb_dnsuser:-"root"}"
adb_dnsdir="${adb_dnsdir:-"/tmp"}"
adb_dnsheader="${adb_dnsheader}"
adb_dnsdeny="${adb_dnsdeny:-"0"}"
adb_dnsallow="${adb_dnsallow:-"1"}"
adb_dnssafesearch="${adb_dnssafesearch:-"0"}"
adb_dnsstop="${adb_dnsstop:-"0"}"
;;
esac
fi
if [ "${adb_dns}" != "raw" ] && { [ -z "${adb_dns}" ] || [ ! -x "$(command -v "${adb_dns}")" ]; }
then
f_log "err" "dns backend not found, please set 'adb_dns' manually"
fi
if [ "${adb_dns}" != "raw" ] && { [ "${adb_dnsdir}" = "${adb_tmpbase}" ] || [ "${adb_dnsdir}" = "${adb_backupdir}" ] || \
[ "${adb_dnsdir}" = "${adb_reportdir}" ] || [ "${adb_dnsdir}" = "${adb_jaildir}" ]; }
then
f_log "err" "dns directory '${adb_dnsdir}' has been misconfigured, it must not point to the 'adb_tmpbase', 'adb_backupdir', 'adb_reportdir' or 'adb_jaildir'"
fi
if [ "${adb_action}" = "start" ] && [ -z "${adb_trigger}" ]
then
sleep ${adb_triggerdelay}
fi
if [ "${adb_dns}" != "raw" ] && [ "${adb_action}" != "stop" ]
then
while [ "${cnt}" -le 30 ]
do
dns_up="$(ubus -S call service list "{\"name\":\"${adb_dns}\"}" 2>/dev/null | jsonfilter -l1 -e "@[\"${adb_dns}\"].instances.*.running" 2>/dev/null)"
if [ "${dns_up}" = "true" ]
then
break
fi
sleep 1
cnt=$((cnt+1))
done
if [ -n "${adb_dnsdir}" ] && [ ! -d "${adb_dnsdir}" ]
then
mkdir -p "${adb_dnsdir}"
if [ "${?}" -eq 0 ]
then
f_log "info" "dns backend directory '${adb_dnsdir}' created"
else
f_log "err" "dns backend directory '${adb_dnsdir}' could not be created"
fi
fi
if [ ! -f "${adb_dnsdir}/${adb_dnsfile}" ]
then
printf "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}"
fi
if [ "${dns_up}" != "true" ]
then
f_dnsup 4
if [ "${?}" -ne 0 ]
then
f_log "err" "dns backend '${adb_dns}' not running or executable"
fi
fi
if [ "${adb_backup}" -eq 1 ] && [ -n "${adb_backupdir}" ] && [ ! -d "${adb_backupdir}" ]
then
mkdir -p "${adb_backupdir}"
if [ "${?}" -eq 0 ]
then
f_log "info" "backup directory '${adb_backupdir}' created"
else
f_log "err" "backup backend directory '${adb_backupdir}' could not be created"
fi
fi
if [ -n "${adb_jaildir}" ] && [ ! -d "${adb_jaildir}" ]
then
mkdir -p "${adb_jaildir}"
if [ "${?}" -eq 0 ]
then
f_log "info" "jail directory '${adb_jaildir}' created"
else
f_log "err" "jail directory '${adb_jaildir}' could not be created"
fi
fi
fi
f_log "debug" "f_dns ::: dns: ${adb_dns}, dns_dir: ${adb_dnsdir}, dns_file: ${adb_dnsfile}, dns_user: ${adb_dnsuser}, dns_inotify: ${adb_dnsinotify}, dns_instance: ${adb_dnsinstance}, backup: ${adb_backup}, backup_dir: ${adb_backupdir}, jail_dir: ${adb_jaildir}"
}
# load fetch utility
#
f_fetch()
{
local util utils cnt=0
if [ -z "${adb_fetchutil}" ]
then
utils="aria2c curl wget uclient-fetch"
for util in ${utils}
do
if { [ "${util}" = "uclient-fetch" ] && [ -n "$(printf "%s" "${adb_packages}" | grep "^libustream-")" ]; } || \
{ [ "${util}" = "wget" ] && [ -n "$(printf "%s" "${adb_packages}" | grep "^wget -")" ]; } || \
[ "${util}" = "curl" ] || [ "${util}" = "aria2c" ]
then
if [ -x "$(command -v "${util}")" ]
then
adb_fetchutil="${util}"
uci_set adblock global adb_fetchutil "${util}"
f_uci "adblock"
break
fi
fi
done
elif [ ! -x "$(command -v "${adb_fetchutil}")" ]
then
unset adb_fetchutil
fi
case "${adb_fetchutil}" in
"aria2c")
adb_fetchparm="${adb_fetchparm:-"--timeout=20 --allow-overwrite=true --auto-file-renaming=false --check-certificate=true --dir= -o"}"
;;
"curl")
adb_fetchparm="${adb_fetchparm:-"--connect-timeout 20 --silent --show-error --location -o"}"
;;
"uclient-fetch")
adb_fetchparm="${adb_fetchparm:-"--timeout=20 -O"}"
;;
"wget")
adb_fetchparm="${adb_fetchparm:-"--no-cache --no-cookies --max-redirect=0 --timeout=20 -O"}"
;;
esac
if [ -n "${adb_fetchutil}" ] && [ -n "${adb_fetchparm}" ]
then
adb_fetchutil="$(command -v "${adb_fetchutil}")"
else
f_log "err" "download utility with SSL support not found, please install 'uclient-fetch' with a 'libustream-*' variant or another download utility like 'wget', 'curl' or 'aria2'"
fi
f_log "debug" "f_fetch ::: fetch_util: ${adb_fetchutil:-"-"}, fetch_parm: ${adb_fetchparm:-"-"}"
}
# create temporary files, directories and set dependent options
#
f_temp()
{
local cpu core cores
cpu="$(grep -c '^processor' /proc/cpuinfo 2>/dev/null)"
core="$(grep -cm1 '^core id' /proc/cpuinfo 2>/dev/null)"
if [ "${cpu}" -eq 0 ]
then
cpu=1
fi
if [ "${core}" -eq 0 ]
then
core=1
fi
cores=$((cpu*core))
if [ -d "${adb_tmpbase}" ]
then
adb_tmpdir="$(mktemp -p "${adb_tmpbase}" -d)"
adb_tmpload="$(mktemp -p "${adb_tmpdir}" -tu)"
adb_tmpfile="$(mktemp -p "${adb_tmpdir}" -tu)"
adb_srtopts="--temporary-directory=${adb_tmpdir} --compress-program=gzip --batch-size=32 --parallel=${cores}"
else
f_log "err" "the temp base directory '${adb_tmpbase}' does not exist/is not mounted yet, please create the directory or raise the 'adb_triggerdelay' to defer the adblock start"
fi
if [ ! -s "${adb_pidfile}" ]
then
printf "%s" "${$}" > "${adb_pidfile}"
fi
f_log "debug" "f_temp ::: tmp_base: ${adb_tmpbase:-"-"}, tmp_dir: ${adb_tmpdir:-"-"}, cores: ${cores:-"-"}, sort_options: ${adb_srtopts}, pid_file: ${adb_pidfile:-"-"}"
}
# remove temporary files and directories
#
f_rmtemp()
{
if [ -d "${adb_tmpdir}" ]
then
rm -rf "${adb_tmpdir}"
fi
rm -f "${adb_srcfile}"
> "${adb_pidfile}"
f_log "debug" "f_rmtemp ::: tmp_dir: ${adb_tmpdir:-"-"}, src_file: ${adb_srcfile:-"-"}, pid_file: ${adb_pidfile:-"-"}"
}
# remove dns related files
#
f_rmdns()
{
local status
status="$(ubus -S call service list '{"name":"adblock"}' 2>/dev/null | jsonfilter -l1 -e '@["adblock"].instances.*.running' 2>/dev/null)"
if [ "${adb_dns}" = "raw" ] || { [ -n "${adb_dns}" ] && [ -n "${status}" ]; }
then
> "${adb_rtfile}"
if [ "${adb_backup}" -eq 1 ]
then
rm -f "${adb_backupdir}/${adb_dnsprefix}".*.gz
fi
printf "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}"
f_dnsup 4
fi
f_rmtemp
f_log "debug" "f_rmdns ::: dns: ${adb_dns}, status: ${status:-"-"}, dns_dir: ${adb_dnsdir}, dns_file: ${adb_dnsfile}, rt_file: ${adb_rtfile}, backup_dir: ${adb_backupdir:-"-"}"
}
# commit uci changes
#
f_uci()
{
local change config="${1}"
if [ -n "${config}" ]
then
change="$(uci -q changes "${config}" | "${adb_awk}" '{ORS=" "; print $0}')"
if [ -n "${change}" ]
then
uci_commit "${config}"
case "${config}" in
"firewall")
"/etc/init.d/firewall" reload >/dev/null 2>&1
;;
"resolver")
printf "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}"
f_count
f_jsnup "running"
"/etc/init.d/${adb_dns}" reload >/dev/null 2>&1
;;
esac
fi
f_log "debug" "f_uci ::: config: ${config}, change: ${change}"
fi
}
# get list counter
#
f_count()
{
local file mode="${1}" name="${2}"
adb_cnt=0
case "${mode}" in
"blacklist")
if [ -s "${adb_tmpfile}.${name}" ]
then
adb_cnt="$(wc -l 2>/dev/null < "${adb_tmpfile}.${name}")"
fi
;;
"whitelist")
if [ -s "${adb_tmpdir}/tmp.raw.${name}" ]
then
adb_cnt="$(wc -l 2>/dev/null < "${adb_tmpdir}/tmp.raw.${name}")"
rm -f "${adb_tmpdir}/tmp.raw.${name}"
fi
;;
"safesearch")
if [ -s "${adb_tmpdir}/tmp.safesearch.${name}" ]
then
adb_cnt="$(wc -l 2>/dev/null < "${adb_tmpdir}/tmp.safesearch.${name}")"
fi
;;
"merge")
if [ -s "${adb_tmpdir}/${adb_dnsfile}" ]
then
adb_cnt="$(wc -l 2>/dev/null < "${adb_tmpdir}/${adb_dnsfile}")"
fi
;;
"download"|"backup"|"restore")
if [ -s "${src_tmpfile}" ]
then
adb_cnt="$(wc -l 2>/dev/null < "${src_tmpfile}")"
fi
;;
"final")
if [ -s "${adb_dnsdir}/${adb_dnsfile}" ]
then
adb_cnt="$(wc -l 2>/dev/null < "${adb_dnsdir}/${adb_dnsfile}")"
if [ -s "${adb_tmpdir}/tmp.add.whitelist" ]
then
adb_cnt=$((adb_cnt-$(wc -l 2>/dev/null < "${adb_tmpdir}/tmp.add.whitelist")))
fi
for file in "${adb_tmpdir}/tmp.safesearch".*
do
if [ -r "${file}" ]
then
adb_cnt=$((adb_cnt-$(wc -l 2>/dev/null < "${file}")))
fi
done
if [ -n "${adb_dnsheader}" ]
then
adb_cnt=$(((adb_cnt-$(printf "${adb_dnsheader}" | grep -c "^"))/2))
fi
fi
;;
esac
}
# set external config options
#
f_extconf()
{
local config config_dir config_file port fwcfg
case "${adb_dns}" in
"dnsmasq")
config="dhcp"
config_dir="$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" confdir | grep -Fo "${adb_dnsdir}")"
if [ "${adb_enabled}" -eq 1 ] && [ -z "${config_dir}" ]
then
uci_set dhcp "@dnsmasq[${adb_dnsinstance}]" confdir "${adb_dnsdir}" 2>/dev/null
fi
;;
"kresd")
config="resolver"
config_file="$(uci_get resolver kresd rpz_file | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")"
if [ "${adb_enabled}" -eq 1 ] && [ -z "${config_file}" ]
then
uci -q add_list resolver.kresd.rpz_file="${adb_dnsdir}/${adb_dnsfile}"
elif [ "${adb_enabled}" -eq 0 ] && [ -n "${config_file}" ]
then
uci -q del_list resolver.kresd.rpz_file="${adb_dnsdir}/${adb_dnsfile}"
fi
;;
esac
f_uci "${config}"
config="firewall"
fwcfg="$(uci -qNX show "${config}")"
if [ "${adb_enabled}" -eq 1 ] && [ "${adb_forcedns}" -eq 1 ] && \
[ "$(/etc/init.d/firewall enabled; printf "%u" ${?})" -eq 0 ]
then
for port in ${adb_portlist}
do
if [ -z "$(printf "%s" "${fwcfg}" | grep -Fo -m1 "adblock_dns_${port}")" ]
then
uci -q batch <<-EOC
set firewall."adblock_dns_${port}"="redirect"
set firewall."adblock_dns_${port}".name="Adblock DNS, port ${port}"
set firewall."adblock_dns_${port}".src="lan"
set firewall."adblock_dns_${port}".proto="tcp udp"
set firewall."adblock_dns_${port}".src_dport="${port}"
set firewall."adblock_dns_${port}".dest_port="${port}"
set firewall."adblock_dns_${port}".target="DNAT"
EOC
fi
done
elif [ "${adb_enabled}" -eq 0 ] || [ "${adb_forcedns}" -eq 0 ]
then
for port in ${adb_portlist}
do
if [ -n "$(printf "%s" "${fwcfg}" | grep -Fo -m1 "adblock_dns_${port}")" ]
then
uci_remove firewall "adblock_dns_${port}"
fi
done
fi
f_uci "${config}"
}
# restart dns backend
#
f_dnsup()
{
local dns_service dns_up dns_pid dns_procfile restart_rc cnt=0 out_rc=4 in_rc="${1:-0}"
if [ "${adb_dns}" = "raw" ] || { [ "${in_rc}" -eq 0 ] && [ "${adb_dnsinotify}" -eq 1 ]; }
then
out_rc=0
else
"/etc/init.d/${adb_dns}" restart >/dev/null 2>&1
restart_rc="${?}"
fi
if [ "${restart_rc}" = "0" ]
then
while [ "${cnt}" -le "${adb_dnstimeout}" ]
do
dns_service="$(ubus -S call service list "{\"name\":\"${adb_dns}\"}")"
dns_up="$(printf "%s" "${dns_service}" | jsonfilter -l1 -e "@[\"${adb_dns}\"].instances.*.running")"
dns_pid="$(printf "%s" "${dns_service}" | jsonfilter -l1 -e "@[\"${adb_dns}\"].instances.*.pid")"
dns_procfile="$(ls -l "/proc/${dns_pid}/fd" 2>/dev/null | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")"
if [ "${dns_up}" = "true" ] && [ -n "${dns_pid}" ] && [ -z "${dns_procfile}" ]
then
if [ -x "${adb_lookupcmd}" ] && [ "${adb_lookupdomain}" != "false" ]
then
"${adb_lookupcmd}" "${adb_lookupdomain}" >/dev/null 2>&1
if [ "${?}" -eq 0 ]
then
out_rc=0
break
fi
else
sleep ${adb_dnstimeout}
cnt=${adb_dnstimeout}
out_rc=0
break
fi
fi
cnt=$((cnt+1))
sleep 1
done
fi
f_log "debug" "f_dnsup ::: lookup_util: ${adb_lookupcmd:-"-"}, lookup_domain: ${adb_lookupdomain:-"-"}, restart_rc: ${restart_rc:-"-"}, dns_timeout: ${adb_dnstimeout}, dns_cnt: ${cnt}, in_rc: ${in_rc}, out_rc: ${out_rc}"
return "${out_rc}"
}
# backup/restore/remove blocklists
#
f_list()
{
local hold file rset item array safe_url safe_ips safe_cname safe_domains out_rc mode="${1}" src_name="${2:-"${src_name}"}" in_rc="${src_rc:-0}" cnt=1 ffiles="-maxdepth 1 -name ${adb_dnsprefix}.*.gz"
case "${mode}" in
"blacklist"|"whitelist")
src_name="${mode}"
if [ "${src_name}" = "blacklist" ] && [ -s "${adb_blacklist}" ]
then
rset="/^([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}"
"${adb_awk}" "${rset}" "${adb_blacklist}" | \
"${adb_awk}" 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' > "${adb_tmpdir}/tmp.raw.${src_name}"
sort ${adb_srtopts} -u "${adb_tmpdir}/tmp.raw.${src_name}" 2>/dev/null > "${adb_tmpfile}.${src_name}"
out_rc="${?}"
rm -f "${adb_tmpdir}/tmp.raw.${src_name}"
elif [ "${src_name}" = "whitelist" ] && [ -s "${adb_whitelist}" ]
then
rset="/^([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}"
"${adb_awk}" "${rset}" "${adb_whitelist}" > "${adb_tmpdir}/tmp.raw.${src_name}"
out_rc="${?}"
if [ "${out_rc}" -eq 0 ]
then
rset="/^([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{gsub(\"\\\\.\",\"\\\\.\",\$1);print tolower(\"^(|.*\\\\.)\"\$1\"$\")}"
"${adb_awk}" "${rset}" "${adb_tmpdir}/tmp.raw.${src_name}" > "${adb_tmpdir}/tmp.rem.${src_name}"
out_rc="${?}"
if [ "${out_rc}" -eq 0 ] && [ "${adb_dnsallow}" != "1" ]
then
eval "${adb_dnsallow}" "${adb_tmpdir}/tmp.raw.${src_name}" > "${adb_tmpdir}/tmp.add.${src_name}"
out_rc="${?}"
if [ "${out_rc}" -eq 0 ] && [ "${adb_jail}" = "1" ] && [ "${adb_dnsstop}" != "0" ]
then
> "${adb_jaildir}/${adb_dnsjail}"
if [ -n "${adb_dnsheader}" ]
then
printf "${adb_dnsheader}" >> "${adb_jaildir}/${adb_dnsjail}"
fi
cat "${adb_tmpdir}/tmp.add.${src_name}" >> "${adb_jaildir}/${adb_dnsjail}"
printf "%s\n" "${adb_dnsstop}" >> "${adb_jaildir}/${adb_dnsjail}"
fi
fi
fi
fi
;;
"safesearch")
case "${src_name}" in
"google")
rset="/^(\\.[[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{printf \"%s\n%s\n\",tolower(\"www\"\$1),tolower(substr(\$1,2,length(\$1)))}"
safe_url="https://www.google.com/supported_domains"
safe_ips="216.239.38.120 2001:4860:4802:32::78"
safe_cname="forcesafesearch.google.com"
safe_domains="${adb_tmpdir}/tmp.load.safesearch.${src_name}"
if [ "${adb_backup}" -eq 1 ] && [ -s "${adb_backupdir}/safesearch.${src_name}.gz" ]
then
zcat "${adb_backupdir}/safesearch.${src_name}.gz" > "${safe_domains}"
out_rc="${?}"
else
"${adb_fetchutil}" ${adb_fetchparm} "${safe_domains}" "${safe_url}" 2>/dev/null
out_rc="${?}"
if [ "${adb_backup}" -eq 1 ] && [ "${out_rc}" -eq 0 ]
then
gzip -cf "${safe_domains}" > "${adb_backupdir}/safesearch.${src_name}.gz"
fi
fi
if [ "${out_rc}" -eq 0 ]
then
"${adb_awk}" "${rset}" "${safe_domains}" > "${adb_tmpdir}/tmp.raw.safesearch.${src_name}"
out_rc="${?}"
fi
;;
"bing")
safe_ips="204.79.197.220 ::FFFF:CC4F:C5DC"
safe_cname="strict.bing.com"
safe_domains="www.bing.com"
printf "%s\n" ${safe_domains} > "${adb_tmpdir}/tmp.raw.safesearch.${src_name}"
out_rc="${?}"
;;
"duckduckgo")
safe_ips="50.16.250.179 54.208.102.2 52.204.96.252"
safe_cname="safe.duckduckgo.com"
safe_domains="duckduckgo.com"
printf "%s\n" ${safe_domains} > "${adb_tmpdir}/tmp.raw.safesearch.${src_name}"
out_rc="${?}"
;;
"pixabay")
safe_ips="104.18.82.97 2606:4700::6812:8d57 2606:4700::6812:5261"
safe_cname="safesearch.pixabay.com"
safe_domains="pixabay.com"
printf "%s\n" ${safe_domains} > "${adb_tmpdir}/tmp.raw.safesearch.${src_name}"
out_rc="${?}"
;;
"yandex")
safe_ips="213.180.193.56"
safe_cname="familysearch.yandex.ru"
safe_domains="ya.ru yandex.ru yandex.com yandex.com.tr yandex.ua yandex.by yandex.ee yandex.lt yandex.lv yandex.md yandex.uz yandex.tm yandex.tj yandex.az"
printf "%s\n" ${safe_domains} > "${adb_tmpdir}/tmp.raw.safesearch.${src_name}"
out_rc="${?}"
;;
"youtube")
if [ "${adb_safesearchmod}" -eq 0 ]
then
safe_ips="216.239.38.120 2001:4860:4802:32::78"
safe_cname="restrict.youtube.com"
else
safe_ips="216.239.38.119 2001:4860:4802:32::77"
safe_cname="restrictmoderate.youtube.com"
fi
safe_domains="www.youtube.com m.youtube.com youtubei.googleapis.com youtube.googleapis.com www.youtube-nocookie.com"
printf "%s\n" ${safe_domains} > "${adb_tmpdir}/tmp.raw.safesearch.${src_name}"
out_rc="${?}"
;;
esac
if [ "${out_rc}" -eq 0 ]
then
> "${adb_tmpdir}/tmp.safesearch.${src_name}"
if [ "${adb_dns}" = "named" ]
then
array="${safe_cname}"
else
array="${safe_ips}"
fi
for item in ${array}
do
eval "${adb_dnssafesearch}" "${adb_tmpdir}/tmp.raw.safesearch.${src_name}" >> "${adb_tmpdir}/tmp.safesearch.${src_name}"
if [ "${?}" -ne 0 ]
then
rm -f "${adb_tmpdir}/tmp.safesearch.${src_name}"
break
fi
done
out_rc="${?}"
rm -f "${adb_tmpdir}/tmp.raw.safesearch.${src_name}"
fi
;;
"backup")
(
gzip -cf "${src_tmpfile}" > "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz"
out_rc="${?}"
)&
;;
"restore")
if [ -n "${src_name}" ] && [ -s "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" ]
then
zcat "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" > "${src_tmpfile}"
out_rc="${?}"
elif [ -z "${src_name}" ]
then
for file in "${adb_backupdir}/${adb_dnsprefix}".*.gz
do
if [ -r "${file}" ]
then
name="${file##*/}"
name="${name%.*}"
zcat "${file}" > "${adb_tmpfile}.${name}" &
hold=$((cnt%adb_maxqueue))
if [ "${hold}" -eq 0 ]
then
wait
fi
cnt=$((cnt+1))
fi
done
wait
out_rc="${?}"
else
out_rc=4
fi
if [ "${adb_action}" != "start" ] && [ "${adb_action}" != "resume" ] && [ -n "${src_name}" ] && [ "${out_rc}" -ne 0 ]
then
adb_sources="${adb_sources/${src_name}}"
fi
;;
"remove")
if [ "${adb_backup}" -eq 1 ]
then
rm "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" 2>/dev/null
fi
out_rc="${?}"
adb_sources="${adb_sources/${src_name}}"
;;
"merge")
if [ "${adb_backup}" -eq 1 ]
then
for src_name in ${adb_sources}
do
ffiles="${ffiles} -a ! -name ${adb_dnsprefix}.${src_name}.gz"
done
if [ "${adb_safesearch}" -eq 1 ] && [ "${adb_dnssafesearch}" != "0" ]
then
ffiles="${ffiles} -a ! -name safesearch.google.gz"
fi
find "${adb_backupdir}" ${ffiles} -print0 2>/dev/null | xargs -0 rm 2>/dev/null
fi
unset src_name
sort ${adb_srtopts} -mu "${adb_tmpfile}".* 2>/dev/null > "${adb_tmpdir}/${adb_dnsfile}"
out_rc="${?}"
rm -f "${adb_tmpfile}".*
;;
"final")
unset src_name
if [ -n "${adb_dnsheader}" ]
then
printf "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}"
else
> "${adb_dnsdir}/${adb_dnsfile}"
fi
if [ -s "${adb_tmpdir}/tmp.add.whitelist" ]
then
cat "${adb_tmpdir}/tmp.add.whitelist" >> "${adb_dnsdir}/${adb_dnsfile}"
fi
for file in "${adb_tmpdir}/tmp.safesearch".*
do
if [ -r "${file}" ]
then
cat "${file}" >> "${adb_dnsdir}/${adb_dnsfile}"
fi
done
if [ "${adb_dnsdeny}" != "0" ]
then
eval "${adb_dnsdeny}" "${adb_tmpdir}/${adb_dnsfile}" >> "${adb_dnsdir}/${adb_dnsfile}"
else
mv "${adb_tmpdir}/${adb_dnsfile}" "${adb_dnsdir}/${adb_dnsfile}"
fi
out_rc="${?}"
;;
esac
f_count "${mode}" "${src_name}"
out_rc="${out_rc:-"${in_rc}"}"
f_log "debug" "f_list ::: name: ${src_name:-"-"}, mode: ${mode}, cnt: ${adb_cnt}, in_rc: ${in_rc}, out_rc: ${out_rc}"
return "${out_rc}"
}
# top level domain compression
#
f_tld()
{
local cnt cnt_tld source="${1}" temp_tld="${1}.tld"
"${adb_awk}" '{if(NR==1){tld=$NF};while(getline){if(index($NF,tld".")==0){print tld;tld=$NF}}print tld}' "${source}" | \
"${adb_awk}" 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' > "${temp_tld}"
if [ "${?}" -eq 0 ]
then
mv -f "${temp_tld}" "${source}"
cnt_tld="$(wc -l 2>/dev/null < "${source}")"
else
rm -f "${temp_tld}"
fi
f_log "debug" "f_tld ::: source: ${source}, cnt: ${adb_cnt:-"-"}, cnt_tld: ${cnt_tld:-"-"}"
}
# suspend/resume adblock processing
#
f_switch()
{
local status entry done="false" mode="${1}"
json_load_file "${adb_rtfile}" >/dev/null 2>&1
json_select "data" >/dev/null 2>&1
json_get_var status "adblock_status"
if [ "${mode}" = "suspend" ] && [ "${status}" = "enabled" ]
then
f_env
printf "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}"
f_count
done="true"
elif [ "${mode}" = "resume" ] && [ "${status}" = "paused" ]
then
f_env
f_main
done="true"
fi
if [ "${done}" = "true" ]
then
if [ "${mode}" = "suspend" ]
then
f_bgserv "stop"
f_dnsup
fi
if [ "${mode}" = "resume" ]
then
f_bgserv "start"
fi
f_jsnup "${mode}"
f_log "info" "${mode} adblock processing"
fi
f_rmtemp
}
# query blocklist for certain (sub-)domains
#
f_query()
{
local search result prefix suffix field query_start query_end query_timeout=30 domain="${1}" tld="${1#*.}"
if [ -z "${domain}" ] || [ "${domain}" = "${tld}" ]
then
printf "%s\\n" "::: invalid input, please submit a single (sub-)domain :::"
else
case "${adb_dns}" in
"dnsmasq")
prefix=".*[\\/\\.]"
suffix="(\\/)"
field=2
;;
"unbound")
prefix=".*[\"\\.]"
suffix="(static)"
field=3
;;
"named")
prefix="[^\\*].*[\\.]"
suffix="( \\.)"
field=1
;;
"kresd")
prefix="[^\\*].*[\\.]"
suffix="( \\.)"
field=1
;;
"raw")
prefix=".*[\\.]"
suffix=""
field=1
;;
esac
query_start="$(date "+%s")"
if [ "${adb_dnsfilereset}" -eq 0 ]
then
while [ "${domain}" != "${tld}" ]
do
search="${domain//[+*~%\$&\"\']/}"
search="${search//./\\.}"
result="$("${adb_awk}" -F '/|\"|\t| ' "/^(${search}|${prefix}+${search}.*${suffix})$/{i++;if(i<=9){printf \" + %s\\n\",\$${field}}else if(i==10){printf \" + %s\\n\",\"[...]\";exit}}" "${adb_dnsdir}/${adb_dnsfile}")"
printf "%s\\n%s\\n%s\\n" ":::" "::: domain '${domain}' in active blocklist" ":::"
printf "%s\\n\\n" "${result:-" - no match"}"
domain="${tld}"
tld="${domain#*.}"
done
fi
if [ "${adb_backup}" -eq 1 ] && [ -d "${adb_backupdir}" ]
then
search="${1//[+*~%\$&\"\']/}"
search="${search//./\\.}"
printf "%s\\n%s\\n%s\\n" ":::" "::: domain '${1}' in backups and black-/whitelist" ":::"
for file in "${adb_backupdir}/${adb_dnsprefix}".*.gz "${adb_blacklist}" "${adb_whitelist}"
do
suffix="${file##*.}"
if [ "${suffix}" = "gz" ]
then
zcat "${file}" 2>/dev/null | \
"${adb_awk}" 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' | "${adb_awk}" -v f="${file##*/}" "BEGIN{rc=1};/^($search|.*\\.${search})$/{i++;if(i<=3){printf \" + %-30s%s\\n\",f,\$1;rc=0}else if(i==4){printf \" + %-30s%s\\n\",f,\"[...]\"}};END{exit rc}"
else
"${adb_awk}" -v f="${file##*/}" "BEGIN{rc=1};/^($search|.*\\.${search})$/{i++;if(i<=3){printf \" + %-30s%s\\n\",f,\$1;rc=0}else if(i==4){printf \" + %-30s%s\\n\",f,\"[...]\"}};END{exit rc}" "${file}"
fi
if [ "${?}" -eq 0 ]
then
result="true"
query_end="$(date "+%s")"
if [ "$((query_end-query_start))" -gt "${query_timeout}" ]
then
printf "%s\\n\\n" " - [...]"
break
fi
fi
done
if [ "${result}" != "true" ]
then
printf "%s\\n\\n" " - no match"
fi
fi
fi
}
# update runtime information
#
f_jsnup()
{
local runtime utils memory bg_pid status="${1:-"enabled"}"
case "${status}" in
"enabled"|"error")
adb_endtime="$(date "+%s")"
memory="$("${adb_awk}" '/^MemTotal|^MemFree|^MemAvailable/{ORS="/"; print int($2/1000)}' "/proc/meminfo" 2>/dev/null | "${adb_awk}" '{print substr($0,1,length($0)-1)}')"
if [ "$(( (adb_endtime-adb_starttime)/60 ))" -lt 60 ]
then
runtime="${adb_action}, $(( (adb_endtime-adb_starttime)/60 ))m $(( (adb_endtime-adb_starttime)%60 ))s, ${memory:-0}, $(date "+%d.%m.%Y %H:%M:%S")"
else
runtime="${adb_action}, n/a, ${memory:-0}, $(date "+%d.%m.%Y %H:%M:%S")"
fi
if [ "${status}" = "error" ]
then
adb_cnt=0
fi
;;
"suspend")
status="paused"
;;
"resume")
status=""
;;
esac
json_load_file "${adb_rtfile}" >/dev/null 2>&1
json_select "data" >/dev/null 2>&1
if [ "${?}" -eq 0 ]
then
if [ -z "${adb_fetchutil}" ] || [ -z "${adb_awk}" ]
then
json_get_var utils "utilities"
else
utils="${adb_fetchutil}, ${adb_awk}"
fi
if [ -z "${adb_cnt}" ]
then
json_get_var adb_cnt "blocked_domains"
adb_cnt="${adb_cnt%% *}"
fi
if [ -z "${runtime}" ]
then
json_get_var runtime "last_run"
fi
fi
> "${adb_rtfile}"
json_load_file "${adb_rtfile}" >/dev/null 2>&1
json_init
json_add_object "data"
json_add_string "adblock_status" "${status:-"enabled"}"
json_add_string "adblock_version" "${adb_ver}"
json_add_string "blocked_domains" "${adb_cnt:-0}"
json_add_array "active_sources"
for entry in ${adb_sources}
do
json_add_object
json_add_string "source" "${entry}"
json_close_object
done
json_close_array
json_add_string "dns_backend" "${adb_dns:-"-"}, ${adb_dnsdir:-"-"}"
json_add_string "run_utils" "${utils:-"-"}"
json_add_string "run_ifaces" "trigger: ${adb_trigger:-"-"}, report: ${adb_repiface:-"-"}"
json_add_string "run_directories" "base: ${adb_tmpbase}, backup: ${adb_backupdir}, report: ${adb_reportdir}, jail: ${adb_jaildir}"
json_add_string "run_flags" "backup: ${adb_backup}, reset: ${adb_dnsfilereset}, flush: ${adb_dnsflush}, force: ${adb_forcedns}, search: ${adb_safesearch}, report: ${adb_report}, mail: ${adb_mail}, jail: ${adb_jail}"
json_add_string "last_run" "${runtime:-"-"}"
json_add_string "system" "${adb_sysver}"
json_close_object
json_dump > "${adb_rtfile}"
if [ "${adb_mail}" -eq 1 ] && [ -x "${adb_mailservice}" ] && \
{ [ "${status}" = "error" ] || { [ "${status}" = "enabled" ] && [ "${adb_cnt}" -le "${adb_mailcnt}" ]; } }
then
( "${adb_mailservice}" "${adb_ver}" >/dev/null 2>&1 )&
bg_pid="${!}"
fi
f_log "debug" "f_jsnup ::: status: ${status:-"-"}, cnt: ${adb_cnt}, mail: ${adb_mail}, mail_service: ${adb_mailservice}, mail_cnt: ${adb_mailcnt}, mail_pid: ${bg_pid:-"-"}"
}
# write to syslog
#
f_log()
{
local class="${1}" log_msg="${2}"
if [ -n "${log_msg}" ] && { [ "${class}" != "debug" ] || [ "${adb_debug}" -eq 1 ]; }
then
if [ -x "${adb_loggercmd}" ]
then
"${adb_loggercmd}" -p "${class}" -t "adblock-${adb_ver}[${$}]" "${log_msg}"
else
printf "%s %s %s\\n" "${class}" "adblock-${adb_ver}[${$}]" "${log_msg}"
fi
if [ "${class}" = "err" ]
then
f_rmdns
f_bgserv "stop"
f_jsnup "error"
exit 1
fi
fi
}
# start ubus monitor service to trace dns backend events
#
f_bgserv()
{
local bg_pid status="${1}"
bg_pid="$(pgrep -f "^/bin/sh ${adb_ubusservice}.*|^/bin/ubus -S -M r -m invoke monitor|^grep -qF \"method\":\"set\",\"data\":\\{\"name\":\"${adb_dns}\"" | "${adb_awk}" '{ORS=" "; print $1}')"
if [ "${adb_dns}" != "raw" ] && [ -z "${bg_pid}" ] && [ "${status}" = "start" ] \
&& [ -x "${adb_ubusservice}" ] && [ "${adb_dnsfilereset}" -eq 1 ]
then
( "${adb_ubusservice}" "${adb_ver}" & )
elif [ -n "${bg_pid}" ] && [ "${status}" = "stop" ]
then
kill -HUP "${bg_pid}" 2>/dev/null
fi
f_log "debug" "f_bgserv ::: status: ${status:-"-"}, bg_pid: ${bg_pid:-"-"}, dns_filereset: ${adb_dnsfilereset:-"-"}, ubus_service: ${adb_ubusservice:-"-"}"
}
# main function for blocklist processing
#
f_main()
{
local src_tmpload src_tmpfile src_name src_rset src_url src_log src_arc src_cat src_item src_list src_entries src_suffix src_rc entry keylist memory cnt=1
memory="$("${adb_awk}" '/^MemTotal|^MemFree|^MemAvailable/{ORS="/"; print int($2/1000)}' "/proc/meminfo" 2>/dev/null | "${adb_awk}" '{print substr($0,1,length($0)-1)}')"
f_log "debug" "f_main ::: memory: ${memory:-0}, max_queue: ${adb_maxqueue}, safe_search: ${adb_safesearch}, force_dns: ${adb_forcedns}, awk: ${adb_awk}"
# white- and blacklist preparation
#
for entry in ${adb_locallist}
do
( f_list "${entry}" "${entry}" )&
done
# safe search preparation
#
if [ "${adb_safesearch}" -eq 1 ] && [ "${adb_dnssafesearch}" != "0" ]
then
if [ -z "${adb_safesearchlist}" ]
then
adb_safesearchlist="google bing duckduckgo pixabay yandex youtube"
fi
for entry in ${adb_safesearchlist}
do
( f_list safesearch "${entry}" )&
done
fi
wait
# main loop
#
for src_name in ${adb_sources}
do
json_select "${src_name}" >/dev/null 2>&1
if [ "${?}" -ne 0 ]
then
adb_sources="${adb_sources/${src_name}}"
continue
fi
json_get_var src_url "url" >/dev/null 2>&1
json_get_var src_rset "rule" >/dev/null 2>&1
json_get_values src_cat "categories" >/dev/null 2>&1
json_select ..
src_tmpload="${adb_tmpload}.${src_name}.load"
src_tmpsort="${adb_tmpload}.${src_name}.sort"
src_tmpfile="${adb_tmpfile}.${src_name}"
src_rc=4
# basic pre-checks
#
if [ -z "${src_url}" ] || [ -z "${src_rset}" ]
then
f_list remove
continue
fi
# backup mode
#
if [ "${adb_backup}" -eq 1 ] && { [ "${adb_action}" = "start" ] || [ "${adb_action}" = "resume" ]; }
then
f_list restore
if [ "${?}" -eq 0 ] && [ -s "${src_tmpfile}" ]
then
continue
fi
fi
# download queue processing
#
if [ -n "${src_cat}" ]
then
(
src_arc="${adb_tmpdir}/${src_url##*/}"
src_log="$("${adb_fetchutil}" ${adb_fetchparm} "${src_arc}" "${src_url}" 2>&1)"
src_rc="${?}"
if [ "${src_rc}" -eq 0 ] && [ -s "${src_arc}" ]
then
unset src_entries
src_suffix="$(eval printf "%s" \"\$\{adb_src_suffix_${src_name}:-\"domains\"\}\")"
src_list="$(tar -tzf "${src_arc}" 2>/dev/null)"
for src_item in ${src_cat}
do
src_entries="${src_entries} $(printf "%s" "${src_list}" | grep -E "${src_item}/${src_suffix}$")"
done
if [ -n "${src_entries}" ]
then
tar -xOzf "${src_arc}" ${src_entries} 2>/dev/null > "${src_tmpload}"
src_rc="${?}"
fi
rm -f "${src_arc}"
else
src_log="$(printf "%s" "${src_log}" | "${adb_awk}" '{ORS=" ";print $0}')"
f_log "info" "download of '${src_name}' failed, url: ${src_url}, rule: ${src_rset:-"-"}, categories: ${src_cat:-"-"}, rc: ${src_rc}, log: ${src_log:-"-"}"
fi
if [ "${src_rc}" -eq 0 ] && [ -s "${src_tmpload}" ]
then
if [ -s "${adb_tmpdir}/tmp.rem.whitelist" ]
then
"${adb_awk}" "${src_rset}" "${src_tmpload}" | sed "s/\r//g" | \
grep -Evf "${adb_tmpdir}/tmp.rem.whitelist" | "${adb_awk}" 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' > "${src_tmpsort}"
else
"${adb_awk}" "${src_rset}" "${src_tmpload}" | sed "s/\r//g" | \
"${adb_awk}" 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' > "${src_tmpsort}"
fi
rm -f "${src_tmpload}"
sort ${adb_srtopts} -u "${src_tmpsort}" 2>/dev/null > "${src_tmpfile}"
src_rc="${?}"
rm -f "${src_tmpsort}"
if [ "${src_rc}" -eq 0 ] && [ -s "${src_tmpfile}" ]
then
f_list download
if [ "${adb_backup}" -eq 1 ]
then
f_list backup
fi
elif [ "${adb_backup}" -eq 1 ] && [ "${adb_action}" != "start" ]
then
f_log "info" "archive preparation of '${src_name}' failed, categories: ${src_cat:-"-"}, entries: ${src_entries}, rc: ${src_rc}"
f_list restore
rm -f "${src_tmpfile}"
fi
elif [ "${adb_backup}" -eq 1 ] && [ "${adb_action}" != "start" ]
then
f_log "info" "archive extraction of '${src_name}' failed, categories: ${src_cat:-"-"}, entries: ${src_entries}, rc: ${src_rc}"
f_list restore
fi
)&
continue
else
(
src_log="$("${adb_fetchutil}" ${adb_fetchparm} "${src_tmpload}" "${src_url}" 2>&1)"
src_rc="${?}"
if [ "${src_rc}" -eq 0 ] && [ -s "${src_tmpload}" ]
then
if [ -s "${adb_tmpdir}/tmp.rem.whitelist" ]
then
"${adb_awk}" "${src_rset}" "${src_tmpload}" | sed "s/\r//g" | \
grep -Evf "${adb_tmpdir}/tmp.rem.whitelist" | "${adb_awk}" 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' > "${src_tmpsort}"
else
"${adb_awk}" "${src_rset}" "${src_tmpload}" | sed "s/\r//g" | \
"${adb_awk}" 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' > "${src_tmpsort}"
fi
rm -f "${src_tmpload}"
sort ${adb_srtopts} -u "${src_tmpsort}" 2>/dev/null > "${src_tmpfile}"
src_rc="${?}"
rm -f "${src_tmpsort}"
if [ "${src_rc}" -eq 0 ] && [ -s "${src_tmpfile}" ]
then
f_list download
if [ "${adb_backup}" -eq 1 ]
then
f_list backup
fi
elif [ "${adb_backup}" -eq 1 ] && [ "${adb_action}" != "start" ]
then
f_log "info" "preparation of '${src_name}' failed, rc: ${src_rc}"
f_list restore
rm -f "${src_tmpfile}"
fi
else
src_log="$(printf "%s" "${src_log}" | "${adb_awk}" '{ORS=" ";print $0}')"
f_log "info" "download of '${src_name}' failed, url: ${src_url}, rule: ${src_rset:-"-"}, categories: ${src_cat:-"-"}, rc: ${src_rc}, log: ${src_log:-"-"}"
if [ "${adb_backup}" -eq 1 ] && [ "${adb_action}" != "start" ]
then
f_list restore
fi
fi
)&
fi
hold=$((cnt%adb_maxqueue))
if [ "${hold}" -eq 0 ]
then
wait
fi
cnt=$((cnt+1))
done
wait
f_list merge
# tld compression and dns restart
#
if [ "${?}" -eq 0 ] && [ -s "${adb_tmpdir}/${adb_dnsfile}" ]
then
f_tld "${adb_tmpdir}/${adb_dnsfile}"
f_list final
else
printf "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}"
fi
chown "${adb_dnsuser}" "${adb_dnsdir}/${adb_dnsfile}" 2>/dev/null
f_dnsup
if [ "${?}" -eq 0 ]
then
if [ "${adb_action}" != "resume" ]
then
f_jsnup "enabled"
fi
if [ "${adb_dns}" != "raw" ] && [ "${adb_dnsfilereset}" -eq 1 ]
then
printf "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}"
f_log "info" "blocklist with overall ${adb_cnt} blocked domains loaded successfully and reset afterwards (${adb_sysver})"
f_bgserv "start"
else
f_log "info" "blocklist with overall ${adb_cnt} blocked domains loaded successfully (${adb_sysver})"
fi
else
f_log "err" "dns backend restart with adblock blocklist failed"
fi
f_rmtemp
}
# trace dns queries via tcpdump and prepare a report
#
f_report()
{
local iface bg_pid status total start end blocked percent top_list top array item index hold ports cnt=0 search="${1}" count="${2}" process="${3}" print="${4}"
if [ "${adb_report}" -eq 1 ] && [ ! -x "${adb_dumpcmd}" ]
then
f_log "info" "Please install the package 'tcpdump' or 'tcpdump-mini' to use the reporting feature"
elif [ "${adb_report}" -eq 0 ] && [ "${adb_action}" = "report" ]
then
f_log "info" "Please enable the 'DNS Report' option to use the reporting feature"
fi
if [ -x "${adb_dumpcmd}" ]
then
bg_pid="$(pgrep -f "^${adb_dumpcmd}.*adb_report\\.pcap$" | "${adb_awk}" '{ORS=" "; print $1}')"
if [ "${adb_report}" -eq 0 ] || { [ -n "${bg_pid}" ] && { [ "${adb_action}" = "stop" ] || [ "${adb_action}" = "restart" ]; } }
then
if [ -n "${bg_pid}" ]
then
kill -HUP "${bg_pid}" 2>/dev/null
while $(kill -0 "${bg_pid}" 2>/dev/null)
do
sleep 1
done
unset bg_pid
fi
fi
fi
if [ -x "${adb_dumpcmd}" ] && [ "${adb_report}" -eq 1 ]
then
if [ -z "${bg_pid}" ] && [ "${adb_action}" != "report" ] && [ "${adb_action}" != "stop" ]
then
for port in ${adb_replisten}
do
if [ -z "${ports}" ]
then
ports="port ${port}"
else
ports="${ports} or port ${port}"
fi
done
if [ -z "${adb_repiface}" ]
then
network_get_device iface "lan"
if [ -n "${iface}" ]
then
adb_repiface="${iface}"
else
network_get_physdev iface "lan"
if [ -n "${iface}" ]
then
adb_repiface="${iface}"
fi
fi
if [ -n "${adb_repiface}" ]
then
uci_set adblock global adb_repiface "${adb_repiface}"
f_uci "adblock"
fi
fi
if [ -n "${adb_reportdir}" ] && [ ! -d "${adb_reportdir}" ]
then
mkdir -p "${adb_reportdir}"
f_log "info" "report directory '${adb_reportdir}' created"
fi
if [ -n "${adb_repiface}" ] && [ -d "${adb_reportdir}" ]
then
( "${adb_dumpcmd}" -nn -s0 -l -i ${adb_repiface} ${ports} -C${adb_repchunksize} -W${adb_repchunkcnt} -w "${adb_reportdir}/adb_report.pcap" >/dev/null 2>&1 & )
bg_pid="$(pgrep -f "^${adb_dumpcmd}.*adb_report\\.pcap$" | "${adb_awk}" '{ORS=" "; print $1}')"
else
f_log "info" "Please set the name of the reporting network device 'adb_repiface' manually"
fi
fi
if [ "${adb_action}" = "report" ] && [ "${process}" = "true" ]
then
> "${adb_reportdir}/adb_report.raw"
for file in "${adb_reportdir}/adb_report.pcap"*
do
(
"${adb_dumpcmd}" -tttt -r "${file}" 2>/dev/null | \
"${adb_awk}" -v cnt="${cnt}" '!/\.lan\. |PTR\? | SOA\? /&&/ A[\? ]+|NXDomain|0\.0\.0\.0/{a=$1;b=substr($2,0,8);c=$4;sub(/\.[0-9]+$/,"",c);d=cnt $7;sub(/\*$/,"",d);
e=$(NF-1);sub(/[0-9]\/[0-9]\/[0-9]|0\.0\.0\.0/,"NX",e);sub(/\.$/,"",e);sub(/([0-9]{1,3}\.){3}[0-9]{1,3}/,"OK",e);printf "%s\t%s\t%s\t%s\t%s\n",d,e,a,b,c}' >> "${adb_reportdir}/adb_report.raw"
)&
hold=$((cnt%adb_maxqueue))
if [ "${hold}" -eq 0 ]
then
wait
fi
cnt=$((cnt+1))
done
wait
if [ -s "${adb_reportdir}/adb_report.raw" ]
then
sort ${adb_srtopts} -k1 -k3 -k4 -k5 -k1 -ur "${adb_reportdir}/adb_report.raw" | \
"${adb_awk}" '{currA=($1+0);currB=$1;currC=substr($1,length($1),1);if(reqA==currB){reqA=0;printf "%s\t%s\n",d,$2}else if(currC=="+"){reqA=currA;d=$3"\t"$4"\t"$5"\t"$2}}' | \
sort ${adb_srtopts} -k1 -k2 -k3 -k4 -ur > "${adb_reportdir}/adb_report.srt"
rm -f "${adb_reportdir}/adb_report.raw"
fi
if [ -s "${adb_reportdir}/adb_report.srt" ]
then
start="$("${adb_awk}" 'END{printf "%s_%s",$1,$2}' "${adb_reportdir}/adb_report.srt")"
end="$("${adb_awk}" 'NR==1{printf "%s_%s",$1,$2}' "${adb_reportdir}/adb_report.srt")"
total="$(wc -l < "${adb_reportdir}/adb_report.srt")"
blocked="$("${adb_awk}" '{if($5=="NX")cnt++}END{printf "%s",cnt}' "${adb_reportdir}/adb_report.srt")"
percent="$("${adb_awk}" -v t="${total}" -v b="${blocked}" 'BEGIN{printf "%.2f%s",b/t*100,"%"}')"
> "${adb_reportdir}/adb_report.json"
printf "%s" "{ \"data\": { " >> "${adb_reportdir}/adb_report.json"
printf "%s" "\"start_date\": \"${start%_*}\", " >> "${adb_reportdir}/adb_report.json"
printf "%s" "\"start_time\": \"${start#*_}\", " >> "${adb_reportdir}/adb_report.json"
printf "%s" "\"end_date\": \"${end%_*}\", " >> "${adb_reportdir}/adb_report.json"
printf "%s" "\"end_time\": \"${end#*_}\", " >> "${adb_reportdir}/adb_report.json"
printf "%s" "\"total\": \"${total}\", " >> "${adb_reportdir}/adb_report.json"
printf "%s" "\"blocked\": \"${blocked}\", " >> "${adb_reportdir}/adb_report.json"
printf "%s" "\"percent\": \"${percent}\", " >> "${adb_reportdir}/adb_report.json"
top_list="top_clients top_domains top_blocked"
for top in ${top_list}
do
printf "%s" " \"${top}\": [ " >> "${adb_reportdir}/adb_report.json"
case "${top}" in
"top_clients")
"${adb_awk}" '{print $3}' "${adb_reportdir}/adb_report.srt" | sort ${adb_srtopts} | uniq -c | \
sort ${adb_srtopts} -nr | "${adb_awk}" '{ORS=" ";if(NR==1)printf "{ \"count\": \"%s\", \"address\": \"%s\" }",$1,$2; else if(NR<10)printf ", { \"count\": \"%s\", \"address\": \"%s\" }",$1,$2}' >> "${adb_reportdir}/adb_report.json"
;;
"top_domains")
"${adb_awk}" '{if($5!="NX")print $4}' "${adb_reportdir}/adb_report.srt" | sort ${adb_srtopts} | uniq -c | \
sort ${adb_srtopts} -nr | "${adb_awk}" '{ORS=" ";if(NR==1)printf "{ \"count\": \"%s\", \"address\": \"%s\" }",$1,$2; else if(NR<10)printf ", { \"count\": \"%s\", \"address\": \"%s\" }",$1,$2}' >> "${adb_reportdir}/adb_report.json"
;;
"top_blocked")
"${adb_awk}" '{if($5=="NX")print $4}' "${adb_reportdir}/adb_report.srt" | sort ${adb_srtopts} | uniq -c | \
sort ${adb_srtopts} -nr | "${adb_awk}" '{ORS=" ";if(NR==1)printf "{ \"count\": \"%s\", \"address\": \"%s\" }",$1,$2; else if(NR<10)printf ", { \"count\": \"%s\", \"address\": \"%s\" }",$1,$2}' >> "${adb_reportdir}/adb_report.json"
;;
esac
printf "%s" " ], " >> "${adb_reportdir}/adb_report.json"
done
search="${search//./\\.}"
search="${search//[+*~%\$&\"\' ]/}"
"${adb_awk}" "BEGIN{i=0;printf \"\\\"requests\\\": [ \" }/(${search})/{i++;if(i==1)printf \"{ \\\"date\\\": \\\"%s\\\", \\\"time\\\": \\\"%s\\\", \\\"client\\\": \\\"%s\\\", \\\"domain\\\": \\\"%s\\\", \\\"rc\\\": \\\"%s\\\" }\",\$1,\$2,\$3,\$4,\$5;else if(i<=${count})printf \", { \\\"date\\\": \\\"%s\\\", \\\"time\\\": \\\"%s\\\", \\\"client\\\": \\\"%s\\\", \\\"domain\\\": \\\"%s\\\", \\\"rc\\\": \\\"%s\\\" }\",\$1,\$2,\$3,\$4,\$5}END{printf \" ] } }\n\"}" "${adb_reportdir}/adb_report.srt" >> "${adb_reportdir}/adb_report.json"
rm -f "${adb_reportdir}/adb_report.srt"
fi
fi
if [ -s "${adb_reportdir}/adb_report.json" ]
then
if [ "${print}" = "cli" ]
then
printf "%s\\n%s\\n%s\\n" ":::" "::: Adblock DNS-Query Report" ":::"
json_load_file "${adb_reportdir}/adb_report.json"
json_select "data"
json_get_keys keylist
for key in ${keylist}
do
json_get_var value "${key}"
eval "${key}=\"${value}\""
done
printf " + %s\\n + %s\\n" "Start ::: ${start_date}, ${start_time}" "End ::: ${end_date}, ${end_time}"
printf " + %s\\n + %s %s\\n" "Total ::: ${total}" "Blocked ::: ${blocked}" "(${percent})"
top_list="top_clients top_domains top_blocked requests"
for top in ${top_list}
do
case "${top}" in
"top_clients")
item="::: Top 10 Clients"
;;
"top_domains")
item="::: Top 10 Domains"
;;
"top_blocked")
item="::: Top 10 Blocked Domains"
;;
esac
if json_get_type status "${top}" && [ "${top}" != "requests" ] && [ "${status}" = "array" ]
then
printf "%s\\n%s\\n%s\\n" ":::" "${item}" ":::"
json_select "${top}"
index=1
while json_get_type status "${index}" && [ "${status}" = "object" ]
do
json_get_values item "${index}"
printf " + %-9s::: %s\\n" ${item}
index=$((index+1))
done
elif json_get_type status "${top}" && [ "${top}" = "requests" ] && [ "${status}" = "array" ]
then
printf "%s\\n%s\\n%s\\n" ":::" "::: Latest DNS Queries" ":::"
printf "%-15s%-15s%-45s%-50s%s\\n" "Date" "Time" "Client" "Domain" "Answer"
json_select "${top}"
index=1
while json_get_type status "${index}" && [ "${status}" = "object" ]
do
json_get_values item "${index}"
printf "%-15s%-15s%-45s%-50s%s\\n" ${item}
index=$((index+1))
done
fi
json_select ".."
done
elif [ "${print}" = "json" ]
then
cat "${adb_reportdir}/adb_report.json"
fi
fi
fi
f_log "debug" "f_report ::: action: ${adb_action}, report: ${adb_report}, search: ${1}, count: ${2}, process: ${3}, print: ${4}, dump_util: ${adb_dumpcmd}, repdir: ${adb_reportdir}, repiface: ${adb_repiface:-"-"}, replisten: ${adb_replisten}, repchunksize: ${adb_repchunksize}, repchunkcnt: ${adb_repchunkcnt}, bg_pid: ${bg_pid}"
}
# awk selection
#
adb_awk="$(command -v gawk)"
if [ -z "${adb_awk}" ]
then
adb_awk="$(command -v awk)"
fi
# source required system libraries
#
if [ -r "/lib/functions.sh" ] && [ -r "/lib/functions/network.sh" ] && [ -r "/usr/share/libubox/jshn.sh" ]
then
. "/lib/functions.sh"
. "/lib/functions/network.sh"
. "/usr/share/libubox/jshn.sh"
else
f_log "err" "system libraries not found"
fi
# handle different adblock actions
#
f_load
case "${adb_action}" in
"stop")
f_bgserv "stop"
f_report "+" "50" "false" "false"
f_rmdns
;;
"restart")
f_bgserv "stop"
f_report "+" "50" "false" "false"
f_rmdns
f_env
f_main
;;
"suspend")
if [ "${adb_dns}" != "raw" ]
then
f_switch suspend
fi
;;
"resume")
if [ "${adb_dns}" != "raw" ]
then
f_switch resume
fi
;;
"report")
f_report "${2}" "${3}" "${4}" "${5}"
;;
"query")
f_query "${2}"
;;
"start"|"reload")
f_bgserv "stop"
f_report "+" "50" "false" "false"
f_env
f_main
;;
esac
|
marcin1j/openwrt-packages
|
net/adblock/files/adblock.sh
|
Shell
|
gpl-2.0
| 52,482 |
#!/bin/sh
TZ=GMT export TZ
ipsec spi --clear
ipsec eroute --clear
enckey=0x4043434545464649494a4a4c4c4f4f515152525454575758
authkey=0x87658765876587658765876587658765
ipsec klipsdebug --set pfkey
#ipsec klipsdebug --set verbose
ipsec spi --af inet --edst 192.1.2.45 --spi 0x12345678 --proto esp --src 192.1.2.23 --esp 3des-md5-96 --enckey $enckey --authkey $authkey
ipsec spi --af inet --edst 192.1.2.45 --spi 0x12345678 --proto tun --src 192.1.2.23 --dst 192.1.2.45 --ip4
ipsec spigrp inet 192.1.2.45 0x12345678 tun inet 192.1.2.45 0x12345678 esp
ipsec eroute --add --eraf inet --src 192.0.2.0/24 --dst 192.0.1.0/24 --said [email protected]
ipsec tncfg --attach --virtual ipsec0 --physical eth1
ifconfig ipsec0 inet 192.1.2.23 netmask 0xffffff00 broadcast 192.1.2.255 up
arp -s 192.1.2.45 10:00:00:64:64:45
arp -s 192.1.2.254 10:00:00:64:64:45
ipsec look
# magic route command
route add -host 192.0.1.1 gw 192.1.2.45 dev ipsec0
|
kidmaple/CoolWall
|
openswan/testing/klips/east-espiv-02/spi1.sh
|
Shell
|
gpl-2.0
| 950 |
#!/bin/bash
# Clean up the Mess.
rm a
rm b
rm a.dump
rm b.dump
|
legacy-vault/tests
|
go/go-asm/4/clean.sh
|
Shell
|
gpl-3.0
| 64 |
#!/bin/bash
set -u
cat input.bn | $JOSHUA/bin/joshua-decoder -m 1g -threads 2 -c joshua.config > output 2> log
# Extract the translations and model scores
cat output | awk -F\| '{print $4 " ||| " $10}' > output.scores
# Compare
diff -u output.scores output.scores.gold > diff
if [ $? -eq 0 ]; then
rm -f diff output log output.scores
exit 0
else
exit 1
fi
|
lukeorland/joshua
|
test/bn-en/samt/test.sh
|
Shell
|
lgpl-2.1
| 369 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.