code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -e
tag=$1
cicd=${tag:0:8}
release=${tag:8}
if [ "${cicd}" == "igluctl/" ]; then
if [ "${release}" == "" ]; then
echo "Warning! No release specified! Ignoring."
exit 2
fi
exit 0
else
exit 1
fi
|
snowplow/iglu
|
.travis/is_igluctl_release_tag.sh
|
Shell
|
apache-2.0
| 228 |
#!/bin/sh
#
# Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization
# dedicated to making software imaging solutions freely available.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.imagemagick.org/script/license.php
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test for 'validate' utility.
#
set -e # Exit on any error
. ${srcdir}/tests/common.sh
${CONVERT} pnm:- 'null:' < ${REFERENCE_IMAGE}
${CONVERT} pnm:- miff:- < ${REFERENCE_IMAGE} | ${IDENTIFY} -
${CONVERT} pnm:- - < ${REFERENCE_IMAGE} | ${IDENTIFY} -
${CONVERT} - 'null:' < ${REFERENCE_IMAGE}
${CONVERT} - miff:- < ${REFERENCE_IMAGE} | ${IDENTIFY} -
${CONVERT} - - < ${REFERENCE_IMAGE} | ${IDENTIFY} -
${CONVERT} ${REFERENCE_IMAGE} - | ${IDENTIFY} -
${CONVERT} ${REFERENCE_IMAGE} miff:- | ${IDENTIFY} -
|
mikesplain/imagemagick
|
tests/validate-pipe.sh
|
Shell
|
apache-2.0
| 1,154 |
#!/bin/sh
echo "Running machine is `hostname`"
ls -l
echo "Dumping now input files"
echo "**********************"
cat *.txt
|
csuarez/jlite-cli-plus
|
test/collection/script_node2.sh
|
Shell
|
apache-2.0
| 125 |
#!/bin/bash
## \cond
#HEADSTART##############################################################
#
#PROJECT: UnifiedTraceAndLogManager
#AUTHOR: Arno-Can Uestuensoez - [email protected]
#MAINTAINER: Arno-Can Uestuensoez - [email protected]
#SHORT: utalm-bash
#LICENSE: Apache-2.0 + CCL-BY-SA-3.0
#
#
########################################################################
#
# Copyright [2007,2008,2010,2013] Arno-Can Uestuensoez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
#
# refer to source-package for unstripped sources
#
#HEADEND################################################################
#
#$Header$
#
##
## \endcond
## @file
##
## \cond
##
#
shopt -s nullglob
#
#Execution anchor
MYCALLPATHNAME=$0
MYCALLNAME=`basename $MYCALLPATHNAME`
MYCALLNAME=${MYCALLNAME%.sh}
MYCALLPATH=`dirname $MYCALLPATHNAME`
MYBOOTSTRAPFILE=$(getPathToBootstrapDir.sh)/bootstrap-03_03_001.sh
. ${MYBOOTSTRAPFILE}
if [ $? -ne 0 ];then
echo "ERROR:Missing bootstrap file:configuration: ${MYBOOTSTRAPFILE}">&2
exit 1
fi
setUTALMbash 1 $*
#
###
#
. $(getPathToLib.sh Makefile.lib/Nodeaction-test.sh)
#
###
#
DBG=3
_S=2
export F_DEFAULT=$((F_CALLNAME|F_SEVERITY|F_CODE|F_NOCOLOR))
export _F=$F_DEFAULT
#
UNIT_COUNTERRORS_OPTS=${UNIT_COUNTERRORS_OPTS:-flat=1 filter=1 sums=1}
_UNIT_COUNTERRORS_OPTS=${UNIT_COUNTERRORS_OPTS}
#
printINFO $D_DATA $LINENO $BASH_SOURCE 0 "expect=5"
UNIT_COUNTERRORS_OPTS="${_UNIT_COUNTERRORS_OPTS} expect=5" CallCase CASE000
#
printINFO $D_DATA $LINENO $BASH_SOURCE 0 "expect=3"
UNIT_COUNTERRORS_OPTS="${_UNIT_COUNTERRORS_OPTS} expect=3" CallCase CASE001
#
printINFO $D_DATA $LINENO $BASH_SOURCE 0 "expect=3"
UNIT_COUNTERRORS_OPTS="${_UNIT_COUNTERRORS_OPTS} expect=3" CallCase SubSums
exit 0
## \endcond
|
ArnoCan/utalm
|
src/utalm-bash/tests/utalm-make/ALPHA/cli-unittests/utalm-cli-unitest-demo/BETA/provokeError/SubSums/CallCase.sh
|
Shell
|
apache-2.0
| 2,357 |
#!/bin/bash
#
# You can debug the app from Command Line rather than Xcode
# Download http://github.com/phonegap/ios-deploy, run make , copy ios-deploy to /usr/local/bin/
# After running make at the Qt, then run the ios-deploy with "-b" and "-d"
PROJECT_NAME=quickiosexample
make
RESULT=$?
if [ $RESULT -eq 0 ]; then
/usr/local/bin/ios-deploy -b Debug-iphoneos/${PROJECT_NAME}.app -d
fi
|
hilarycheng/quickios
|
tests/quickiosexample/build_ios.sh
|
Shell
|
apache-2.0
| 391 |
#!/bin/bash
##
## Work in progress! The dependency installations need to be done to the
## container so that we don't need to install them here.
##
TEST=${TEST:="BitbarSampleAppTest.py"} #Name of the test file
##### Cloud testrun dependencies start
echo "Extracting tests.zip..."
unzip tests.zip
pip --version
echo "Installing pip for python"
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
sudo python get-pip.py
echo "Installing Appium Python Client 0.24 and xmlrunner 1.7.7"
chmod 0755 requirements.txt
sudo pip install -r requirements.txt
echo "Starting Appium ..."
appium-1.6 --log-no-colors --log-timestamp
##### Cloud testrun dependencies end.
export APPIUM_APPFILE=$PWD/application.apk #App file is at current working folder
## Desired capabilities:
export APPIUM_URL="http://localhost:4723/wd/hub" # Local & Cloud
export APPIUM_DEVICE="Local Device"
export APPIUM_PLATFORM="android"
APILEVEL=$(adb shell getprop ro.build.version.sdk)
APILEVEL="${APILEVEL//[$'\t\r\n']}"
echo "API level is: ${APILEVEL}"
## APPIUM_AUTOMATION
if [ "$APILEVEL" -gt "16" ]; then
echo "Setting APPIUM_AUTOMATION=Appium"
export APPIUM_AUTOMATION="Appium"
else
echo "Setting APPIUM_AUTOMATION=selendroid"
export APPIUM_AUTOMATION="Selendroid"
fi
## Run the test:
echo "Running test ${TEST}"
rm -rf screenshots
mkdir -p screenshots
python ${TEST}
ln -s test-reports/*.xml TEST-all.xml
|
piotr-kostecki-bitbar/testdroid-samples
|
self-play-tests/appium/python/run-tests_android.sh
|
Shell
|
apache-2.0
| 1,400 |
#!/bin/bash
#sudo apt-get install -y npm
#npm install elasticdump
#sudo ln -s /usr/bin/nodejs /usr/bin/node
elasticdump \
--input=kibana-export.json \
--output=$1 \
--type=data
|
ohsu-computational-biology/dms-es
|
elasticdump/restore.sh
|
Shell
|
apache-2.0
| 178 |
#!/usr/bin/env bash
set -eu
apt-get update
apt-get install python-dev python-pip
easy_install pip
pip install --upgrade --force pip
pip install python-barbicanclient
pip install python-keystoneclient
export PYTHONPATH=$PYTHONPATH:..
# don't forget to copy openrc to /opt
echo "source /etc/kolla/admin-openrc.sh" >> /root/.bashrc
# load new profile
exec bash
# create a fernet key and store it in Barbican
function generate_fernet_key {
python << EOL
from cryptography.fernet import Fernet
key = Fernet.generate_key()
print key
EOL
}
if ! grep -q 'export key_ref' /root/.bashrc; then
project_key=`generate_fernet_key`
key_metadata=`barbican secret store --name 'project_key' --payload $project_key --secret-type symmetric --algorithm fernet -f shell`
b=${key_metadata#*href=\"};
key_ref=${b%%\"*};
[[ -z "$key_ref" ]] && { echo "Warning: key reference is empty, please rexecute the script" ; exit 1; }
echo "export key_ref=$key_ref" | tee -a /root/.bashrc
exec bash
fi
|
ediardo/folsom
|
backend/prepare_environment.sh
|
Shell
|
apache-2.0
| 985 |
#!/bin/bash
ROUTE=${ROUTE:-localhost:8082}
curl ${ROUTE}/products -X POST -d '{"productId":"1","name":"Black Shirt"}' -H "Content-Type: application/json" -H "Authorization: Bearer eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0NDYwMjA3NjgsInVzZXJfbmFtZSI6ImlkdWdhbGljIiwiYXV0aG9yaXRpZXMiOlsiUk9MRV9BRE1JTiJdLCJqdGkiOiI3NDVjYjAyZi1mNTJjLTRmODQtYmRkOC0wMWY3ODVjNzFmYWQiLCJjbGllbnRfaWQiOiJhY21lIiwic2NvcGUiOlsib3BlbmlkIiwicmVhZF9jYXRhbG9nIiwid3JpdGVfY2F0YWxvZyIsInJlYWRfb3JkZXJzIiwid3JpdGVfb3JkZXJzIiwicmVhZF9yZXZpZXdzIiwid3JpdGVfcmV2aWV3cyIsInJlYWRfcmVjb21tZW5kYXRpb25zIiwid3JpdGVfcmVjb21tZW5kYXRpb25zIl19.QikKEtfA7xB3mNyx0h8bSyHFl0zCGMS852cgz1VZbuFpB2nsxFDfgqqxHrCCOZ-k2onAse28Ls_eXJ_wZzaFzBheVxrCvQ1qeImgzQp6sxjo3zSyk2dFRs-nsz5rUqyQ6zht15VROlRXcu7arp-uCgkmXlnP-bZWW0IYedYvxNWG6kMTwvOHA2851TzEg4TWF_A_5Lt-6rSpuodo_C3JadEAG65femmEfqa4Gmjg_pR1mNKgcMmo3Zxkl1jTin2nZ5X7Nkch8jhEjIddFvywV5TLL1RJd8Tlmh7U57EaRq1thdDEWUt8F7xb3e8k2hKSmJNVwWR2jwaCL7pyuYB9Zg"
curl ${ROUTE}/products -X POST -d '{"productId":"2","name":"Red Shirt"}' -H "Content-Type: application/json" -H "Authorization: Bearer eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0NDYwMjA3NjgsInVzZXJfbmFtZSI6ImlkdWdhbGljIiwiYXV0aG9yaXRpZXMiOlsiUk9MRV9BRE1JTiJdLCJqdGkiOiI3NDVjYjAyZi1mNTJjLTRmODQtYmRkOC0wMWY3ODVjNzFmYWQiLCJjbGllbnRfaWQiOiJhY21lIiwic2NvcGUiOlsib3BlbmlkIiwicmVhZF9jYXRhbG9nIiwid3JpdGVfY2F0YWxvZyIsInJlYWRfb3JkZXJzIiwid3JpdGVfb3JkZXJzIiwicmVhZF9yZXZpZXdzIiwid3JpdGVfcmV2aWV3cyIsInJlYWRfcmVjb21tZW5kYXRpb25zIiwid3JpdGVfcmVjb21tZW5kYXRpb25zIl19.QikKEtfA7xB3mNyx0h8bSyHFl0zCGMS852cgz1VZbuFpB2nsxFDfgqqxHrCCOZ-k2onAse28Ls_eXJ_wZzaFzBheVxrCvQ1qeImgzQp6sxjo3zSyk2dFRs-nsz5rUqyQ6zht15VROlRXcu7arp-uCgkmXlnP-bZWW0IYedYvxNWG6kMTwvOHA2851TzEg4TWF_A_5Lt-6rSpuodo_C3JadEAG65femmEfqa4Gmjg_pR1mNKgcMmo3Zxkl1jTin2nZ5X7Nkch8jhEjIddFvywV5TLL1RJd8Tlmh7U57EaRq1thdDEWUt8F7xb3e8k2hKSmJNVwWR2jwaCL7pyuYB9Zg"
curl ${ROUTE}/products -X POST -d '{"productId":"3","name":"Green Shirt"}' -H "Content-Type: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0NDYwMjA3NjgsInVzZXJfbmFtZSI6ImlkdWdhbGljIiwiYXV0aG9yaXRpZXMiOlsiUk9MRV9BRE1JTiJdLCJqdGkiOiI3NDVjYjAyZi1mNTJjLTRmODQtYmRkOC0wMWY3ODVjNzFmYWQiLCJjbGllbnRfaWQiOiJhY21lIiwic2NvcGUiOlsib3BlbmlkIiwicmVhZF9jYXRhbG9nIiwid3JpdGVfY2F0YWxvZyIsInJlYWRfb3JkZXJzIiwid3JpdGVfb3JkZXJzIiwicmVhZF9yZXZpZXdzIiwid3JpdGVfcmV2aWV3cyIsInJlYWRfcmVjb21tZW5kYXRpb25zIiwid3JpdGVfcmVjb21tZW5kYXRpb25zIl19.QikKEtfA7xB3mNyx0h8bSyHFl0zCGMS852cgz1VZbuFpB2nsxFDfgqqxHrCCOZ-k2onAse28Ls_eXJ_wZzaFzBheVxrCvQ1qeImgzQp6sxjo3zSyk2dFRs-nsz5rUqyQ6zht15VROlRXcu7arp-uCgkmXlnP-bZWW0IYedYvxNWG6kMTwvOHA2851TzEg4TWF_A_5Lt-6rSpuodo_C3JadEAG65femmEfqa4Gmjg_pR1mNKgcMmo3Zxkl1jTin2nZ5X7Nkch8jhEjIddFvywV5TLL1RJd8Tlmh7U57EaRq1thdDEWUt8F7xb3e8k2hKSmJNVwWR2jwaCL7pyuYB9Zg"
curl ${ROUTE}/products -X POST -d '{"productId":"4","name":"Black Hat"}' -H "Content-Type: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0NDYwMjA3NjgsInVzZXJfbmFtZSI6ImlkdWdhbGljIiwiYXV0aG9yaXRpZXMiOlsiUk9MRV9BRE1JTiJdLCJqdGkiOiI3NDVjYjAyZi1mNTJjLTRmODQtYmRkOC0wMWY3ODVjNzFmYWQiLCJjbGllbnRfaWQiOiJhY21lIiwic2NvcGUiOlsib3BlbmlkIiwicmVhZF9jYXRhbG9nIiwid3JpdGVfY2F0YWxvZyIsInJlYWRfb3JkZXJzIiwid3JpdGVfb3JkZXJzIiwicmVhZF9yZXZpZXdzIiwid3JpdGVfcmV2aWV3cyIsInJlYWRfcmVjb21tZW5kYXRpb25zIiwid3JpdGVfcmVjb21tZW5kYXRpb25zIl19.QikKEtfA7xB3mNyx0h8bSyHFl0zCGMS852cgz1VZbuFpB2nsxFDfgqqxHrCCOZ-k2onAse28Ls_eXJ_wZzaFzBheVxrCvQ1qeImgzQp6sxjo3zSyk2dFRs-nsz5rUqyQ6zht15VROlRXcu7arp-uCgkmXlnP-bZWW0IYedYvxNWG6kMTwvOHA2851TzEg4TWF_A_5Lt-6rSpuodo_C3JadEAG65femmEfqa4Gmjg_pR1mNKgcMmo3Zxkl1jTin2nZ5X7Nkch8jhEjIddFvywV5TLL1RJd8Tlmh7U57EaRq1thdDEWUt8F7xb3e8k2hKSmJNVwWR2jwaCL7pyuYB9Zg"
curl ${ROUTE}/products -X POST -d '{"productId":"5","name":"Brown Hat"}' -H "Content-Type: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0NDYwMjA3NjgsInVzZXJfbmFtZSI6ImlkdWdhbGljIiwiYXV0aG9yaXRpZXMiOlsiUk9MRV9BRE1JTiJdLCJqdGkiOiI3NDVjYjAyZi1mNTJjLTRmODQtYmRkOC0wMWY3ODVjNzFmYWQiLCJjbGllbnRfaWQiOiJhY21lIiwic2NvcGUiOlsib3BlbmlkIiwicmVhZF9jYXRhbG9nIiwid3JpdGVfY2F0YWxvZyIsInJlYWRfb3JkZXJzIiwid3JpdGVfb3JkZXJzIiwicmVhZF9yZXZpZXdzIiwid3JpdGVfcmV2aWV3cyIsInJlYWRfcmVjb21tZW5kYXRpb25zIiwid3JpdGVfcmVjb21tZW5kYXRpb25zIl19.QikKEtfA7xB3mNyx0h8bSyHFl0zCGMS852cgz1VZbuFpB2nsxFDfgqqxHrCCOZ-k2onAse28Ls_eXJ_wZzaFzBheVxrCvQ1qeImgzQp6sxjo3zSyk2dFRs-nsz5rUqyQ6zht15VROlRXcu7arp-uCgkmXlnP-bZWW0IYedYvxNWG6kMTwvOHA2851TzEg4TWF_A_5Lt-6rSpuodo_C3JadEAG65femmEfqa4Gmjg_pR1mNKgcMmo3Zxkl1jTin2nZ5X7Nkch8jhEjIddFvywV5TLL1RJd8Tlmh7U57EaRq1thdDEWUt8F7xb3e8k2hKSmJNVwWR2jwaCL7pyuYB9Zg"
|
idugalic/micro-ecommerce
|
microservices-recommendations/scripts/loadProducts.sh
|
Shell
|
apache-2.0
| 4,599 |
docker service create --name rabbitmq -p 5672:5672 -p 15672:15672 -p 9090:9090 --network=custom_overlay cproinger/rabbitmq-management-monitoring
|
SINCConcept/sanalytics
|
sampleapps/service-rabbitmq-with-monitoring.sh
|
Shell
|
apache-2.0
| 144 |
#!/bin/sh
#
# Copyright (c) 2017 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# CentOS and RHEL 7 use NetworkManager. Add a script to be automatically invoked when interface comes up.
#
cat > /etc/NetworkManager/dispatcher.d/12-register-dns <<"EOF"
#!/bin/bash
# NetworkManager Dispatch script
# Deployed by Cloudera Altus Director Bootstrap
#
# Expected arguments:
# $1 - interface
# $2 - action
#
# See for info: http://linux.die.net/man/8/networkmanager
# Register A and PTR records when interface comes up
# only execute on the primary nic
if [ "$1" != "eth0" ] || [ "$2" != "up" ]
then
exit 0;
fi
# when we have a new IP, perform nsupdate
new_ip_address="$DHCP4_IP_ADDRESS"
host=$(hostname -s)
domain=$(nslookup $(grep -i nameserver /etc/resolv.conf | head -n 1 | cut -d ' ' -f 2) | grep -i name | cut -d ' ' -f 3 | cut -d '.' -f 2- | rev | cut -c 2- | rev)
IFS='.' read -ra ipparts <<< "$new_ip_address"
ptrrec="$(printf %s "$new_ip_address." | tac -s.)in-addr.arpa"
nsupdatecmds=$(mktemp -t nsupdate.XXXXXXXXXX)
resolvconfupdate=$(mktemp -t resolvconfupdate.XXXXXXXXXX)
echo updating resolv.conf
grep -iv "search" /etc/resolv.conf > "$resolvconfupdate"
echo "search $domain" >> "$resolvconfupdate"
cat "$resolvconfupdate" > /etc/resolv.conf
echo "Attempting to register $host.$domain and $ptrrec"
{
echo "update delete $host.$domain a"
echo "update add $host.$domain 600 a $new_ip_address"
echo "send"
echo "update delete $ptrrec ptr"
echo "update add $ptrrec 600 ptr $host.$domain"
echo "send"
} > "$nsupdatecmds"
nsupdate "$nsupdatecmds"
exit 0;
EOF
chmod 755 /etc/NetworkManager/dispatcher.d/12-register-dns
service network restart
# Confirm DNS record has been updated, retry if update did not work
i=0
until [ $i -ge 5 ]
do
sleep 5
i=$((i+1))
hostname | nslookup && break
service network restart
done
if [ $i -ge 5 ]; then
echo "DNS update failed"
exit 1
fi
exit 0
|
cloudera/director-scripts
|
azure-dns-scripts/bootstrap_dns_nm.sh
|
Shell
|
apache-2.0
| 2,458 |
#!/usr/bin/env sh
# test_net_seg.bin test_proto pre_train_model label.txt outputfolder [CPU/GPU]
ROOTFILE=/nfs/hn46/xiaolonw/pose_cnncode/caffe-3dnormal_joint_pose
GLOG_logtostderr=1 /nfs/hn46/xiaolonw/pose_cnncode/caffe-3dnormal_joint_pose/build_compute-0-5/tools/test_net_pose.bin /nfs/hn46/xiaolonw/pose_cnncode/caffe-3dnormal_joint_pose/posescript/pose_flic_lecun_8x_col/pose_test.prototxt /nfs/hn38/users/xiaolonw/pose_models/flic_coarse_lecun_8x_col/pose__iter_20000 /nfs/hn38/users/xiaolonw/FLIC/data/testlist_256.txt /nfs/hn38/users/xiaolonw/FLIC/data/results_256
|
xiaolonw/caffe-3dnormal_joint_pose
|
posescript/pose_flic_lecun_8x_col/test_pose.sh
|
Shell
|
bsd-2-clause
| 676 |
#!/bin/sh
#
OURDIR="/path/to/exabgp-logger"
cd $OURDIR/bin
env exabgp.daemon.daemonize=true \
exabgp.daemon.pid=$OURDIR/exabgp/exabgp6.pid \
exabgp.daemon.user=theo \
exabgp.tcp.bind="" \
exabgp.tcp.port="179" \
exabgp.log.enable=true \
exabgp.log.all=false \
exabgp.log.destination=$OURDIR/exabgp/exabgp6.log \
exabgp.cache.attributes=false \
exabgp.cache.nexthops=false \
exabgp $OURDIR/exabgp/exabgp6.conf
|
tbaschak/exabgp-logger
|
exabgp/exabgp6.sh
|
Shell
|
bsd-2-clause
| 422 |
#!/usr/bin/env bash
export DEBIAN_FRONTEND=noninteractive
MYSQL_ROOT_PASSWORD='whyattend'
apt-get update
debconf-set-selections <<< "mysql-server-5.7 mysql-server/root_password password $MYSQL_ROOT_PASSWORD"
debconf-set-selections <<< "mysql-server-5.7 mysql-server/root_password_again password $MYSQL_ROOT_PASSWORD"
apt-get -y install mysql-server
sed -i 's/bind-address.*/bind-address = 0.0.0.0/' /etc/mysql/mysql.conf.d/mysqld.cnf
service mysql restart
mysql -uroot -pwhyattend -e "grant all privileges on *.* to 'whyattend'@'%' identified by 'whyattend'; flush privileges;"
|
ceari/whyattend
|
vagrant/bootstrap.sh
|
Shell
|
bsd-2-clause
| 586 |
attack_slope = 10000 # 10 slow, 1000 fast
decay_slope = 10000 # 10 slow, 1000 fast
sustain_vol = 100 # 0 silent, 100 full
sustain_time = 0 # in samples
release_slope = 40 # 10 slow, 1000 fast
# Main signal generator unit
osc1_type = square # triangle | saw | square | noise
osc1_freq = 500 # hz
osc1_freq_vel = -770 # hz / 200
# Secondary signal generator unit (modulates the signal from the first)
# osc2_type = triangle # triangle | saw | square | noise
# osc2_freq = 45 # hz
# osc2_freq_vel = -5 # hz
#
# modulation = add # multiply | add | fm
# lowpass_freq = 5500 # 0 to 20000 hz
# lowpass_freq_vel = -50
# lowpass_res = 0 # 100 high resonance, 0 low resonance
|
abainbridge/trex-warrior
|
assets/synth_sounds/bloop.sh
|
Shell
|
bsd-2-clause
| 739 |
#!/bin/bash
source debian/vars.sh
# We only support the DPkg:: version of these hook points, if that ever changes
# this will need updated here and in apt-universal-hooks.pl
mkdir -p $DEB_INSTALL_ROOT/etc/apt/apt.conf.d
echo 'DPkg::Tools::Options::/etc/apt/universal-hooks/apt-universal-hooks.pl::Version "2";
DPkg::Pre-Invoke {"/etc/apt/universal-hooks/apt-universal-hooks.pl Pre-Invoke || true";}
DPkg::Pre-Install-Pkgs {"/etc/apt/universal-hooks/apt-universal-hooks.pl Pre-Install-Pkgs || true";};
DPkg::Post-Invoke {"/etc/apt/universal-hooks/apt-universal-hooks.pl Post-Invoke || true";}' > apt-universal-hooks.conf
mkdir -p $DEB_INSTALL_ROOT/etc/apt/universal-hooks
chmod 755 apt-universal-hooks.pl
|
CpanelInc/yum-plugin-universal-hooks
|
debify/debian/override_dh_auto_install.sh
|
Shell
|
bsd-2-clause
| 709 |
#!/bin/bash
#
# Generates a basic system profile
#
# Stephen Lang
# Tue Mar 5 23:53:01 EST 2013
# Server Information
hostname=`hostname`
public_ip=`ifconfig eth0 |grep "inet addr:" | awk '{print $2}' | cut -d\: -f2`
private_ip=`ifconfig eth1 |grep "inet addr:" | awk '{print $2}' | cut -d\: -f2`
if [ -f /etc/redhat-release ]; then
os=`cat /etc/redhat-release`
elif [ -f /etc/lsb-release ]; then
os=`cat /etc/lsb-release |grep DESC |cut -d\= -f2 |sed -e 's/"//g'`
else
echo "Unsupported OS, exiting"
fi
arch=`uname -m`
kernel=`uname -r`
cpu_type=`cat /proc/cpuinfo |grep "model name" |cut -d\: -f2`
cpu_speed=`cat /proc/cpuinfo |grep "cpu MHz" |cut -d\: -f2`
mem_total=`free -m | grep "Mem:" | awk '{print $2}'`
swap_total=`free -m | grep "Swap:" | awk '{print $2}'`
# Partition Layout
df=`df -h`
# Memory Usage
mem_percent_used=`/usr/bin/free -m | grep Mem | awk '{print $3/$2 * 100.0}' | cut -d\. -f1`
swap_percent_used=`/usr/bin/free -m | grep Swap | awk '{print $3/$2 * 100.0}' | cut -d\. -f1`
apache_total_memory=`ps -ef |grep apache |grep -v ^root | awk '{print $2}' | xargs pmap -d |grep ^mapped: |awk '{sum += $4} END {print sum/1024}'`
mysql_total_memory=`ps -ef |grep mysql |grep -v ^root | awk '{print $2}' | xargs pmap -d |grep ^mapped: | awk '{sum += $4} END {print sum/1024}'`
# Networking Information
for i in `netstat -natp |grep LISTEN | awk '{print $4}' | sed -e 's/:://g' | cut -d\: -f2`; do echo -n $i,\ ; done > listening-ports.out
listening_ports=`cat listening-ports.out`
total_conn=`netstat -nat |wc -l`
# Apache Information
if [ -f /etc/redhat-release ]; then
apache_version=`httpd -v |grep version | cut -d\: -f2`
total_vhosts=`httpd -S | grep namevhost | wc -l`
apache_conn=`ps waux |grep apache | wc -l`
apache_max=`cat /etc/httpd/conf/httpd.conf |grep MaxClient | grep -v \# |head -1 |awk '{print $2}'`
elif [ -f /etc/lsb-release ]; then
apache_version=`apache2 -v |grep version | cut -d\: -f2`
total_vhosts=`apache2 -S | grep namevhost | wc -l`
apache_conn=`ps waux |grep apache | wc -l`
apache_max=`cat /etc/apache2/apache2.conf |grep MaxClient | grep -v \# |head -1 |awk '{print $2}'`
else
echo "Unsupported OS, exiting"
fi
# MySQL Information
mysql_version=`mysql -V | awk '{print $5}' |sed -e 's/,//g'`
total_databases=`echo "show databases;" | mysql | grep -v Database |grep -v information_schema |grep -v performance_schema | wc -l`
mysql_conn=`echo "show processlist;" | mysql | grep -v State | wc -l`
mysql_max=`echo "show variables;" | mysql | grep max_connections | awk '{print $2}'`
# Report
cat << EOF > /tmp/system-profiler.txt
---------------------------------------------------------------
Server Information
---------------------------------------------------------------
Hostname: $hostname
IP Address: $public_ip / $private_ip
Operating System: $os
Arch: $arch
Kernel: $kernel
CPU Type: $cpu_type $cpu_speed
Memory Installed: $mem_total MB
Swap Total: $swap_total MB
---------------------------------------------------------------
Partition Layout
---------------------------------------------------------------
$df
---------------------------------------------------------------
Memory Usage
---------------------------------------------------------------
Total Memory: $mem_total MB
Memory In Use: $mem_percent_used%
Total Swap: $swap_total MB
Swap In Use: $swap_percent_used%
Apache Total Memory: $apache_total_memory MB
MySQL Total Memory $mysql_total_memory MB
---------------------------------------------------------------
Network Information
---------------------------------------------------------------
Listening Ports: $listening_ports
Total Connections: $total_conn
---------------------------------------------------------------
Apache Statistics
---------------------------------------------------------------
Apache Version: $apache_version
Virtual Hosts: $total_vhosts
Connections Used: $apache_conn/$apache_max
---------------------------------------------------------------
MySQL Statistics
---------------------------------------------------------------
MySQL Version: $mysql_version
Total Databases: $total_databases
Connections Used: $mysql_conn/$mysql_max
---------------------------------------------------------------
EOF
cat /tmp/system-profiler.txt
rm /tmp/system-profiler.txt
# Clean up
rm listening-ports.out
echo "Press any key to continue..."
read -p "$*"
|
stephenlang/shelladmin
|
modules/system/system-profile.sh
|
Shell
|
bsd-2-clause
| 4,605 |
if [ $1 = 0 ]; then
if [ -f /lib/systemd/system/circonus-agent.service ]; then
/bin/systemctl disable circonus-agent
/bin/systemctl stop circonus-agent >/dev/null 2>&1
elif [ -f /etc/init.d/circonus-agent ]; then
/sbin/chkconfig --del circonus-agent
/sbin/service circonus-agent stop >/dev/null 2>&1
fi
fi
exit 0
|
maier/circonus-agent
|
package/rpm/preremove.sh
|
Shell
|
bsd-3-clause
| 357 |
#!/bin/bash
echo
echo
echo
echo HUSK: Det hjelper ikke å oppgradere noe uten TypeScript compile...
echo
echo
echo
pushd ../../..
svn update
popd
echo Deleting old files
rm -rf KNappen/assets/world/KNappen/*
echo Copying new files.
cp -a ../../KNappen_src/KNappen/KNappen.MobileSPA/* KNappen/assets/world/KNappen/
echo Done, ready to compile.
|
knreise/KNappen
|
PhoneGap/ios/updateARWeb.sh
|
Shell
|
bsd-3-clause
| 350 |
#!/usr/bin/env bash
TESTFILE1=$(mktemp -p .)
if [ -x "$(command -v python3)" ]; then
PYTHON=$(command -v python3)
else
PYTHON=$(command -v python)
fi
${PYTHON} << END
import random as rnd
import time as time
rnd.seed(time.time())
randnum = rnd.sample(range(1,101), 18)
f1 = open("${TESTFILE1}", "w+")
for m in randnum:
for n in randnum:
line = str(m) + '_' + str(n) + '_' \
+ str(m) + '_' + str(n) + '\n'
f1.write(line)
f1.close()
END
UNARY_OP=12
for i in `cat ${TESTFILE1}`
do
M=`echo ${i} | awk -F"_" '{print $1}'`
N=`echo ${i} | awk -F"_" '{print $2}'`
LDI=`echo ${i} | awk -F"_" '{print $3}'`
LDO=`echo ${i} | awk -F"_" '{print $4}'`
echo ${M} ${N} 100 100
for PREC_IN in 2 4
do
for PREC_COMP in 4
do
for PREC_OUT in 2 4
do
for BCAST_IN in 0 1 2 3
do
./eltwise_unary_simple ${UNARY_OP} ${BCAST_IN} ${PREC_IN} ${PREC_COMP} ${PREC_OUT} ${M} ${N} 100 100
done
done
done
done
done
rm ${TESTFILE1}
|
hfp/libxsmm
|
samples/eltwise/kernel_test/unary_gelu_inv_mixed_gtld.sh
|
Shell
|
bsd-3-clause
| 1,020 |
#!/bin/bash
### Use this script to do any pre-vendor-removal work, such as running specific tear-down procedures ###
### This script runs after DEPNotify is opened, but before prior management is removed ###
DEPNOTIFYLOG="/private/var/tmp/depnotify.log"
echo "Status: Running Pre-Migration script" >> $DEPNOTIFYLOG
exit 0
|
vmwaresamples/AirWatch-samples
|
macOS-Samples/Tools/Migration-Tool/payload/Library/Application Support/VMware/MigratorResources/premigration.sh
|
Shell
|
bsd-3-clause
| 325 |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/util.sh
usage ()
{
cat << EOF
Usage:
$0 [OPTIONS]
WebRTC build script.
OPTIONS:
-h Show this message
-d Debug mode. Print all executed commands.
-o OUTDIR Output directory. Default is 'out'
-b BRANCH Latest revision on git branch. Overrides -r. Common branch names are 'branch-heads/nn', where 'nn' is the release number.
-r REVISION Git SHA revision. Default is latest revision.
-t TARGET OS The target os for cross-compilation. Default is the host OS such as 'linux', 'mac', 'win'. Other values can be 'android', 'ios'.
-c TARGET CPU The target cpu for cross-compilation. Default is 'x64'. Other values can be 'x86', 'arm64', 'arm'.
-n CONFIGS Build configurations, space-separated. Default is 'Debug Release'. Other values can be 'Debug', 'Release'.
-e Compile WebRTC with RTTI enabled. Default is with RTTI not enabled.
-g [Linux] Compile 'Debug' WebRTC with iterator debugging disabled. Default is enabled but it might add significant overhead.
-D [Linux] Generate a debian package
-F PATTERN Allow customize package filename through a pattern
-P PATTERN Allow customize package name through a pattern
-V PATTERN Allow customize package version through a pattern
The PATTERN is a string that can use the following tokens:
%p% The system platform.
%to% Target os.
%tc% Target cpu.
%b% The branch if it was specified.
%r% Revision.
%sr% Short revision.
%rn% The associated revision number.
%da% Debian architecture.
EOF
}
while getopts :b:o:r:t:c:n:degDF:P:V: OPTION; do
case $OPTION in
o) OUTDIR=$OPTARG ;;
b) BRANCH=$OPTARG ;;
r) REVISION=$OPTARG ;;
t) TARGET_OS=$OPTARG ;;
c) TARGET_CPU=$OPTARG ;;
n) CONFIGS=$OPTARG ;;
d) DEBUG=1 ;;
e) ENABLE_RTTI=1 ;;
g) DISABLE_ITERATOR_DEBUG=1 ;;
D) PACKAGE_AS_DEBIAN=1 ;;
F) PACKAGE_FILENAME_PATTERN=$OPTARG ;;
P) PACKAGE_NAME_PATTERN=$OPTARG ;;
V) PACKAGE_VERSION_PATTERN=$OPTARG ;;
?) usage; exit 1 ;;
esac
done
OUTDIR=${OUTDIR:-out}
BRANCH=${BRANCH:-}
DEBUG=${DEBUG:-0}
ENABLE_RTTI=${ENABLE_RTTI:-0}
DISABLE_ITERATOR_DEBUG=${DISABLE_ITERATOR_DEBUG:-0}
PACKAGE_AS_DEBIAN=${PACKAGE_AS_DEBIAN:-0}
PACKAGE_FILENAME_PATTERN=${PACKAGE_FILENAME_PATTERN:-"webrtcbuilds-%rn%-%sr%-%to%-%tc%"}
PACKAGE_NAME_PATTERN=${PACKAGE_NAME_PATTERN:-"webrtcbuilds"}
PACKAGE_VERSION_PATTERN=${PACKAGE_VERSION_PATTERN:-"%rn%"}
CONFIGS=${CONFIGS:-Debug Release}
REPO_URL="https://webrtc.googlesource.com/src"
DEPOT_TOOLS_URL="https://chromium.googlesource.com/chromium/tools/depot_tools.git"
DEPOT_TOOLS_DIR=$DIR/depot_tools
DEPOT_TOOLS_WIN_TOOLCHAIN=0
PATH=$DEPOT_TOOLS_DIR:$DEPOT_TOOLS_DIR/python276_bin:$PATH
[ "$DEBUG" = 1 ] && set -x
mkdir -p $OUTDIR
OUTDIR=$(cd $OUTDIR && pwd -P)
detect-platform
TARGET_OS=${TARGET_OS:-$PLATFORM}
TARGET_CPU=${TARGET_CPU:-x64}
echo "Host OS: $PLATFORM"
echo "Target OS: $TARGET_OS"
echo "Target CPU: $TARGET_CPU"
check::platform $PLATFORM $TARGET_OS
echo Checking webrtcbuilds dependencies
check::webrtcbuilds::deps $PLATFORM
echo Checking depot-tools
check::depot-tools $PLATFORM $DEPOT_TOOLS_URL $DEPOT_TOOLS_DIR
if [ ! -z $BRANCH ]; then
REVISION=$(git ls-remote $REPO_URL --heads $BRANCH | head -n1 | cut -f1) || \
{ echo "Cound not get branch revision for $BRANCH" && exit 1; }
[ -z $REVISION ] && echo "Cound not get branch revision for $BRANCH" && exit 1
echo "Building branch: $BRANCH"
else
REVISION=${REVISION:-$(latest-rev $REPO_URL)} || \
{ echo "Could not get latest revision" && exit 1; }
fi
echo "Building revision: $REVISION"
REVISION_NUMBER=$(revision-number $REPO_URL $REVISION) || \
{ echo "Could not get revision number" && exit 1; }
echo "Associated revision number: $REVISION_NUMBER"
echo "Checking out WebRTC revision (this will take awhile): $REVISION"
checkout "$TARGET_OS" $OUTDIR $REVISION
echo Checking WebRTC dependencies
check::webrtc::deps $PLATFORM $OUTDIR "$TARGET_OS"
echo Patching WebRTC source
patch $PLATFORM $OUTDIR $ENABLE_RTTI
echo Compiling WebRTC
compile $PLATFORM $OUTDIR "$TARGET_OS" "$TARGET_CPU" "$CONFIGS" "$DISABLE_ITERATOR_DEBUG"
echo Packaging WebRTC
PACKAGE_FILENAME=$(interpret-pattern "$PACKAGE_FILENAME_PATTERN" "$PLATFORM" "$OUTDIR" "$TARGET_OS" "$TARGET_CPU" "$BRANCH" "$REVISION" "$REVISION_NUMBER")
PACKAGE_NAME=$(interpret-pattern "$PACKAGE_NAME_PATTERN" "$PLATFORM" "$OUTDIR" "$TARGET_OS" "$TARGET_CPU" "$BRANCH" "$REVISION" "$REVISION_NUMBER")
PACKAGE_VERSION=$(interpret-pattern "$PACKAGE_VERSION_PATTERN" "$PLATFORM" "$OUTDIR" "$TARGET_OS" "$TARGET_CPU" "$BRANCH" "$REVISION" "$REVISION_NUMBER")
package::prepare $PLATFORM $OUTDIR $PACKAGE_FILENAME $DIR/resource "$CONFIGS" $REVISION_NUMBER
if [ "$PACKAGE_AS_DEBIAN" = 1 ]; then
package::debian $OUTDIR $PACKAGE_FILENAME $PACKAGE_NAME $PACKAGE_VERSION "$(debian-arch $TARGET_CPU)"
else
package::zip $PLATFORM $OUTDIR $PACKAGE_FILENAME
fi
echo Build successful
|
vsimon/webrtcbuilds
|
build.sh
|
Shell
|
bsd-3-clause
| 5,182 |
#!/bin/sh
self=$0
usage() {
cat <<EOF >&2
Usage: $self [options] FILE
Reads the Run Time CPU Detections definitions from FILE and generates a
C header file on stdout.
Options:
--arch=ARCH Architecture to generate defs for (required)
--disable-EXT Disable support for EXT extensions
--require-EXT Require support for EXT extensions
--sym=SYMBOL Unique symbol to use for RTCD initialization function
--config=FILE File with CONFIG_FOO=yes lines to parse
EOF
exit 1
}
die() {
echo "$@" >&2
exit 1
}
die_argument_required() {
die "Option $opt requires argument"
}
for opt; do
optval="${opt#*=}"
case "$opt" in
--arch) die_argument_required;;
--arch=*) arch=${optval};;
--disable-*) eval "disable_${opt#--disable-}=true";;
--require-*) REQUIRES="${REQUIRES}${opt#--require-} ";;
--sym) die_argument_required;;
--sym=*) symbol=${optval};;
--config=*) config_file=${optval};;
-h|--help)
usage
;;
-*)
die "Unrecognized option: ${opt%%=*}"
;;
*)
defs_file="$defs_file $opt"
;;
esac
shift
done
for f in $defs_file; do [ -f "$f" ] || usage; done
[ -n "$arch" ] || usage
# Import the configuration
[ -f "$config_file" ] && eval $(grep CONFIG_ "$config_file")
#
# Routines for the RTCD DSL to call
#
prototype() {
local rtyp
case "$1" in
unsigned) rtyp="$1 "; shift;;
esac
rtyp="${rtyp}$1"
local fn="$2"
local args="$3"
eval "${2}_rtyp='$rtyp'"
eval "${2}_args='$3'"
ALL_FUNCS="$ALL_FUNCS $fn"
specialize $fn c
}
specialize() {
local fn="$1"
shift
for opt in "$@"; do
eval "${fn}_${opt}=${fn}_${opt}"
done
}
require() {
for fn in $ALL_FUNCS; do
for opt in "$@"; do
local ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
# if we already have a default, then we can disable it, as we know
# we can do better.
local best=$(eval "echo \$${fn}_default")
local best_ofn=$(eval "echo \$${best}")
[ -n "$best" ] && [ "$best_ofn" != "$ofn" ] && eval "${best}_link=false"
eval "${fn}_default=${fn}_${opt}"
eval "${fn}_${opt}_link=true"
done
done
}
forward_decls() {
ALL_FORWARD_DECLS="$ALL_FORWARD_DECLS $1"
}
#
# Include the user's directives
#
for f in $defs_file; do
. $f
done
#
# Process the directives according to the command line
#
process_forward_decls() {
for fn in $ALL_FORWARD_DECLS; do
eval $fn
done
}
determine_indirection() {
[ "$CONFIG_RUNTIME_CPU_DETECT" = "yes" ] || require $ALL_ARCHS
for fn in $ALL_FUNCS; do
local n=""
local rtyp="$(eval "echo \$${fn}_rtyp")"
local args="$(eval "echo \"\$${fn}_args\"")"
local dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
for opt in "$@"; do
local ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
local link=$(eval "echo \$${fn}_${opt}_link")
[ "$link" = "false" ] && continue
n="${n}x"
done
if [ "$n" = "x" ]; then
eval "${fn}_indirect=false"
else
eval "${fn}_indirect=true"
fi
done
}
declare_function_pointers() {
for fn in $ALL_FUNCS; do
local rtyp="$(eval "echo \$${fn}_rtyp")"
local args="$(eval "echo \"\$${fn}_args\"")"
local dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
for opt in "$@"; do
local ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
echo "$rtyp ${ofn}($args);"
done
if [ "$(eval "echo \$${fn}_indirect")" = "false" ]; then
echo "#define ${fn} ${dfn}"
else
echo "RTCD_EXTERN $rtyp (*${fn})($args);"
fi
echo
done
}
set_function_pointers() {
for fn in $ALL_FUNCS; do
local n=""
local rtyp="$(eval "echo \$${fn}_rtyp")"
local args="$(eval "echo \"\$${fn}_args\"")"
local dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
if $(eval "echo \$${fn}_indirect"); then
echo " $fn = $dfn;"
for opt in "$@"; do
local ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
[ "$ofn" = "$dfn" ] && continue;
local link=$(eval "echo \$${fn}_${opt}_link")
[ "$link" = "false" ] && continue
local cond="$(eval "echo \$have_${opt}")"
echo " if (${cond}) $fn = $ofn;"
done
fi
echo
done
}
filter() {
local filtered
for opt in "$@"; do
[ -z $(eval "echo \$disable_${opt}") ] && filtered="$filtered $opt"
done
echo $filtered
}
#
# Helper functions for generating the arch specific RTCD files
#
common_top() {
local outfile_basename=$(basename ${outfile:-rtcd.h})
local include_guard=$(echo -n $outfile_basename | tr '[a-z]' '[A-Z]' | tr -c '[A-Z]' _)
cat <<EOF
#ifndef ${include_guard}
#define ${include_guard}
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
$(process_forward_decls)
$(declare_function_pointers c $ALL_ARCHS)
EOF
}
common_bottom() {
cat <<EOF
#endif
EOF
}
x86() {
determine_indirection c $ALL_ARCHS
# Assign the helper variable for each enabled extension
for opt in $ALL_ARCHS; do
local uc=$(echo -n $opt | tr '[a-z]' '[A-Z]')
eval "have_${opt}=\"flags & HAS_${uc}\""
done
cat <<EOF
$(common_top)
void ${symbol:-rtcd}(void);
#ifdef RTCD_C
#include "vpx_ports/x86.h"
void ${symbol:-rtcd}(void)
{
int flags = x86_simd_caps();
(void)flags;
$(set_function_pointers c $ALL_ARCHS)
}
#endif
$(common_bottom)
EOF
}
arm() {
determine_indirection c $ALL_ARCHS
# Assign the helper variable for each enabled extension
for opt in $ALL_ARCHS; do
local uc=$(echo -n $opt | tr '[a-z]' '[A-Z]')
eval "have_${opt}=\"flags & HAS_${uc}\""
done
cat <<EOF
$(common_top)
#include "vpx_config.h"
void ${symbol:-rtcd}(void);
#ifdef RTCD_C
#include "vpx_ports/arm.h"
void ${symbol:-rtcd}(void)
{
int flags = arm_cpu_caps();
(void)flags;
$(set_function_pointers c $ALL_ARCHS)
}
#endif
$(common_bottom)
EOF
}
unoptimized() {
determine_indirection c
cat <<EOF
$(common_top)
#include "vpx_config.h"
void ${symbol:-rtcd}(void);
#ifdef RTCD_C
void ${symbol:-rtcd}(void)
{
$(set_function_pointers c)
}
#endif
$(common_bottom)
EOF
}
#
# Main Driver
#
require c
case $arch in
x86)
ALL_ARCHS=$(filter mmx sse sse2 sse3 ssse3 sse4_1)
x86
;;
x86_64)
ALL_ARCHS=$(filter mmx sse sse2 sse3 ssse3 sse4_1)
REQUIRES=${REQUIRES:-mmx sse sse2}
require $(filter $REQUIRES)
x86
;;
armv5te)
ALL_ARCHS=$(filter edsp)
arm
;;
armv6)
ALL_ARCHS=$(filter edsp media)
arm
;;
armv7)
ALL_ARCHS=$(filter edsp media neon)
arm
;;
*)
unoptimized
;;
esac
|
awatry/libvpx.opencl
|
build/make/rtcd.sh
|
Shell
|
bsd-3-clause
| 6,660 |
#!/bin/bash
RETCODE=$(fw_exists /usr/local/openresty/nginx/sbin/nginx)
[ ! "$RETCODE" == 0 ] || { return 0; }
fw_depends nginx
fw_get http://openresty.org/download/ngx_openresty-1.5.8.1.tar.gz
fw_untar ngx_openresty-1.5.8.1.tar.gz
cd ngx_openresty-1.5.8.1
./configure --with-luajit --with-http_postgres_module
make
sudo make install
|
seem-sky/FrameworkBenchmarks
|
toolset/setup/linux/webservers/openresty.sh
|
Shell
|
bsd-3-clause
| 334 |
#!/bin/sh
# (c) Copyright 2009 - 2010 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
set work work
#--------------------------------------------------------------------------------
mkdir work
ncvlog -work work ../../implement/results/routed.v
echo "Compiling Test Bench Files"
ncvhdl -v93 -work work ../bmg_tb_pkg.vhd
ncvhdl -v93 -work work ../random.vhd
ncvhdl -v93 -work work ../data_gen.vhd
ncvhdl -v93 -work work ../addr_gen.vhd
ncvhdl -v93 -work work ../checker.vhd
ncvhdl -v93 -work work ../bmg_stim_gen.vhd
ncvhdl -v93 -work work ../ram_16x8k_dp_synth.vhd
ncvhdl -v93 -work work ../ram_16x8k_dp_tb.vhd
echo "Compiling SDF file"
ncsdfc ../../implement/results/routed.sdf -output ./routed.sdf.X
echo "Generating SDF command file"
echo 'COMPILED_SDF_FILE = "routed.sdf.X",' > sdf.cmd
echo 'SCOPE = :ram_16x8k_dp_synth_inst:BMG_PORT,' >> sdf.cmd
echo 'MTM_CONTROL = "MAXIMUM";' >> sdf.cmd
echo "Elaborating Design"
ncelab -access +rwc glbl -sdf_cmd_file sdf.cmd $work.ram_16x8k_dp_tb
echo "Simulating Design"
ncsim -gui -input @"simvision -input wave_ncsim.sv" $work.ram_16x8k_dp_tb
|
olgirard/openmsp430
|
fpga/xilinx_avnet_lx9microbard/rtl/verilog/coregen/ram_16x8k_dp/simulation/timing/simulate_ncsim.sh
|
Shell
|
bsd-3-clause
| 3,218 |
#!/bin/sh
# Package
PACKAGE="headphones-custom"
DNAME="Headphones Custom"
# Others
INSTALL_DIR="/usr/local/${PACKAGE}"
SSS="/var/packages/${PACKAGE}/scripts/start-stop-status"
PYTHON_DIR="/usr/local/python"
GIT_DIR="/usr/local/git"
PATH="${INSTALL_DIR}/bin:${INSTALL_DIR}/env/bin:${PYTHON_DIR}/bin:${GIT_DIR}/bin:${PATH}"
USER="headphones-custom"
GROUP="users"
GIT="${GIT_DIR}/bin/git"
VIRTUALENV="${PYTHON_DIR}/bin/virtualenv"
TMP_DIR="${SYNOPKG_PKGDEST}/../../@tmp"
preinst ()
{
# Check fork
if [ "${SYNOPKG_PKG_STATUS}" == "INSTALL" ] && ! ${GIT} ls-remote --heads --exit-code ${wizard_fork_url:=git://github.com/rembo10/headphones.git} ${wizard_fork_branch:=master} > /dev/null 2>&1; then
echo "Incorrect fork"
exit 1
fi
exit 0
}
postinst ()
{
# Link
ln -s ${SYNOPKG_PKGDEST} ${INSTALL_DIR}
# Create a Python virtualenv
${VIRTUALENV} --system-site-packages ${INSTALL_DIR}/env > /dev/null
# Clone the repository
${GIT} clone -q -b ${wizard_fork_branch:=master} ${wizard_fork_url:=git://github.com/rembo10/headphones.git} ${INSTALL_DIR}/var/Headphones
# Create user
adduser -h ${INSTALL_DIR}/var -g "${DNAME} User" -G ${GROUP} -s /bin/sh -S -D ${USER}
# Correct the files ownership
chown -R ${USER}:root ${SYNOPKG_PKGDEST}
exit 0
}
preuninst ()
{
# Stop the package
${SSS} stop > /dev/null
# Remove the user if uninstalling
if [ "${SYNOPKG_PKG_STATUS}" == "UNINSTALL" ]; then
delgroup ${USER} ${GROUP}
deluser ${USER}
fi
exit 0
}
postuninst ()
{
# Remove link
rm -f ${INSTALL_DIR}
exit 0
}
preupgrade ()
{
# Stop the package
${SSS} stop > /dev/null
# Save some stuff
rm -fr ${TMP_DIR}/${PACKAGE}
mkdir -p ${TMP_DIR}/${PACKAGE}
mv ${INSTALL_DIR}/var ${TMP_DIR}/${PACKAGE}/
exit 0
}
postupgrade ()
{
# Restore some stuff
rm -fr ${INSTALL_DIR}/var
mv ${TMP_DIR}/${PACKAGE}/var ${INSTALL_DIR}/
rm -fr ${TMP_DIR}/${PACKAGE}
exit 0
}
|
hadess/spksrc
|
spk/headphones-custom/src/installer.sh
|
Shell
|
bsd-3-clause
| 2,027 |
#!/bin/bash
die_if_error()
{
local err=$?
if [ "$err" != "0" ]; then
echo "$*"
exit $err
fi
}
createconfigdbfile()
{
echo ""; echo $(date "+%Y-%m-%d %T") "- Creating the database config file"
(cat <<EOF_DBAAS
# mongodb.conf 4.2
########################################
## Storage configuration
########################################
storage:
# Location of the database files
dbPath: /data/data/
# Alternative directory structure, in which files for each database are kept in a unique directory
directoryPerDB: true
# Storage Engine
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: {{ configuration.wiredTiger_engineConfig_cacheSizeGB.value }}
# disable journal
journal:
enabled: true
########################################
## Process Management configuration
########################################
processManagement:
# Fork the server process and run in background
fork: true
########################################
## Log Options
########################################
systemLog:
destination: file
path: /data/logs/mongodb.log
quiet: {{ configuration.quiet.value }}
verbosity: {{ configuration.logLevel.value }}
########################################
## Net Options
########################################
net:
bindIp: {{HOSTADDRESS}}
{% if PORT %}
port: {{ PORT }}
{% endif %}
{% if SSL_CONFIGURED %}
tls:
#mode: allowTLS #step 1
#mode: preferTLS #step 2
#mode: requireTLS #step 3
mode: {% if SSL_MODE_ALLOW %}allowTLS{% endif %}{% if SSL_MODE_PREFER %}preferTLS{% endif %}{% if SSL_MODE_REQUIRE %}requireTLS{% endif %}
certificateKeyFile: {{ INFRA_SSL_CERT }}
CAFile: {{ MASTER_SSL_CA }}
allowConnectionsWithoutCertificates: true
{% endif %}
########################################
## Security
########################################
security:
{% if 'mongodb_replica_set' in DRIVER_NAME %}
# File used to authenticate in replica set environment
keyFile: /data/mongodb.key
{% if SSL_CONFIGURED %}
#clusterAuthMode: sendKeyFile #step 1
#clusterAuthMode: sendX509 #setp 2
#clusterAuthMode: x509 #step 3
clusterAuthMode: {% if SSL_MODE_ALLOW %}sendKeyFile{% endif %}{% if SSL_MODE_PREFER %}sendX509{% endif %}{% if SSL_MODE_REQUIRE %}x509{% endif %}
{% endif %}
{% else %}
authorization: enabled
{% endif %}
{% if 'mongodb_replica_set' in DRIVER_NAME %}
########################################
## Replica Set
########################################
replication:
# Use replica sets with the specified logical set name
replSetName: {{ REPLICASETNAME }}
# Custom size for replication operation log in MB.
oplogSizeMB: {{ configuration.oplogSize.value }}
{% endif %}
EOF_DBAAS
) > {{ CONFIG_FILE_PATH|default:"/data/mongodb.conf" }}
die_if_error "Error setting mongodb.conf"
chown mongodb:mongodb {{ CONFIG_FILE_PATH|default:"/data/mongodb.conf" }}
die_if_error "Error changing mongodb conf file owner"
}
createconfigdbrsyslogfile()
{
echo ""; echo $(date "+%Y-%m-%d %T") "- Creating the rsyslog config file"
(cat <<EOF_DBAAS
#mongodb.conf 4.0 rsyslog.d configuration
\$ModLoad imfile
\$InputFileName /data/logs/mongodb.log
\$InputFileTag mongod.27017:
\$InputFileStateFile mongodb-log-dbaas
\$InputFileSeverity info
\$InputFileFacility local0
\$InputRunFileMonitor
EOF_DBAAS
) > /etc/rsyslog.d/mongodb.conf
die_if_error "Error setting mongodb.conf"
}
createmongodbkeyfile()
{
echo ""; echo $(date "+%Y-%m-%d %T") "- Creating the mongodb key file"
(cat <<EOF_DBAAS
{{MONGODBKEY}}
EOF_DBAAS
) > /data/mongodb.key
die_if_error "Error setting mongodb key file"
chown mongodb:mongodb /data/mongodb.key
die_if_error "Error changing mongodb key file owner"
chmod 600 /data/mongodb.key
die_if_error "Error changing mongodb key file permission"
}
{% if CONFIGFILE_ONLY %}
createconfigdbfile
{% else %}
createconfigdbfile
createconfigdbrsyslogfile
createmongodbkeyfile
{% endif %}
exit 0
|
globocom/database-as-a-service
|
dbaas/physical/scripts/mongodb_42_configuration.sh
|
Shell
|
bsd-3-clause
| 4,130 |
#!/bin/bash
set -e
set -v
##############################################################################################
# Add any additional firewall ports below this line in this format:
# sudo firewall-cmd --zone=public --add-port=####/tcp --permanent
# sudo firewall-cmd --zone=public --add-port=####/udp --permanent
##############################################################################################
# Firewall ports for mariadb and postgresql
sudo firewall-cmd --zone=public --add-port=3306/tcp --permanent
sudo firewall-cmd --zone=public --add-port=5432/tcp --permanent
sudo firewall-cmd --reload
|
jhajek/packer-vagrant-build-scripts
|
packer/scripts/proxmox/focal-database/post_install_prxmx_ubuntu_firewall-additions.sh
|
Shell
|
bsd-3-clause
| 619 |
#!/bin/bash
# Copyright 2020 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Usage: ./nucleus/pip_package/build_pip_package.sh [optional_dir]
#
# If [optional_dir] is supplied, the created wheel file is placed there.
#
# Important: You must run
# source install.sh
# before running this script. In addition, if you make any changes to the
# source code, you should rebuild:
# bazel build -c opt $BAZEL_FLAGS nucleus/pip_package:build_pip_package
set -e
set -x
# When changing NUCLEUS_VERSION, be sure to also change it in
# egg_files/PKG-INFO.
NUCLEUS_VERSION="0.6.0"
PACKAGE_NAME="google_nucleus-${NUCLEUS_VERSION}"
PYTHON_VERSION="3.8"
TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXXX)
TOPDIR="${TMPDIR}/${PACKAGE_NAME}"
mkdir -p "${TOPDIR}"
echo $(date) : "=== Copying files to ${TOPDIR}"
RUNFILES=bazel-bin/nucleus/pip_package/build_pip_package.runfiles/nucleus
# $RUNFILES has three subdirectories, each of which gets treated a bit
# differently.
# Subdirectory #1: Copy /nucleus to top level.
cp -L -R "${RUNFILES}/nucleus" "${TOPDIR}"
# Subdirectory #2: Copy /third_party to /nucleus/third_party.
mkdir -p "${TOPDIR}/nucleus/third_party"
cp -L -R "${RUNFILES}"/third_party/* "${TOPDIR}/nucleus/third_party"
# Subdirectory #3: /external. The only thing we need from it is our
# version of protobuf, which we need to import as google.protobuf.
# See also the top level __init__.py that sets the sys.path to make this
# version of protobuf have precedence over any other installed versions while
# running Nucleus code.
cp -L -R "${RUNFILES}/external/com_google_protobuf/python/google" "${TOPDIR}/google"
# Copy top level files to /nucleus.
cp LICENSE "${TOPDIR}/nucleus"
# Copy setup.py to top level.
cp "${RUNFILES}/nucleus/pip_package/setup.py" "${TOPDIR}"
cp "${RUNFILES}/nucleus/pip_package/setup.cfg" "${TOPDIR}"
# Create egg-info directory.
EGG_DIR="${TOPDIR}/${PACKAGE_NAME}-py${PYTHON_VERSION}.egg-info"
mkdir -p "${EGG_DIR}"
cp "${RUNFILES}/nucleus/pip_package/egg_files"/* "${EGG_DIR}"
pushd "${TOPDIR}"
find . -type f -print > "${EGG_DIR}/SOURCES.txt"
popd
# Fix symbolic links -- any .so file in Nucleus should point to
# google/protobuf/pyext/_message.so with a relative link.
pushd "${TOPDIR}"
find "nucleus" -name '*.so' -exec ln -f -s -r "google/protobuf/pyext/_message.so" {} \;
popd
# Some versions of protobuf have _message.so files named
# _message.cpython-34m.so or _message.cpython-35m-x86_64-linux-gnu.so
# so we create a symbolic link at those filenames so that we overwrite them.
pushd "${TOPDIR}/google/protobuf/pyext"
ln -f -s "_message.so" "_message.cpython-34m.so"
ln -f -s "_message.so" "_message.cpython-35m-x86_64-linux-gnu.so"
popd
# Create tar file
TAR_NAME="${PACKAGE_NAME}.tar.gz"
pushd "${TMPDIR}"
echo $(date) : "=== Building tar file ${TAR_NAME}"
tar cvzf "${TAR_NAME}" "${PACKAGE_NAME}"
# ls the tarfile to see how large it is.
# redacted
# allowed to be over 100M, and it's usually a sign of other trouble when
# it is over that size.
ls -lh "${TAR_NAME}"
if [ $# -gt 0 ]; then
DEST=$1
mkdir -p "${DEST}"
cp "${TAR_NAME}" "${DEST}"
else
DEST="${TAR_NAME}"
fi
popd
echo "Output tar file is in ${DEST}"
|
google/deepvariant
|
third_party/nucleus/pip_package/build_pip_package.sh
|
Shell
|
bsd-3-clause
| 4,634 |
#!/bin/sh
# $Id$
if [ "$1" = "--force" ];
then
FORCE=--force
NOFORCE=
FORCE_MISSING=--force-missing
else
FORCE=
NOFORCE=--no-force
FORCE_MISSING=
fi
if which glibtoolize 2> /dev/null; then
glibtoolize --automake --copy $FORCE 2>&1 | sed '/^You should/d' || {
echo "libtoolize failed!"
exit 1
}
else
libtoolize --automake --copy $FORCE 2>&1 | sed '/^You should/d' || {
echo "libtoolize failed!"
exit 1
}
fi
aclocal $FORCE || {
echo "aclocal failed!"
exit 1
}
autoheader $FORCE || {
echo "autoheader failed!"
exit 1
}
automake -a -c $NOFORCE || {
echo "automake failed!"
exit 1
}
autoconf $FORCE || {
echo "autoconf failed!"
exit 1
}
|
lucastheis/cisa
|
code/liblbfgs/autogen.sh
|
Shell
|
mit
| 706 |
#!/bin/bash
for i in $( ls ); do
find $i -name "*.java" -or -name "*.aj" | xargs more | grep -E includePhoto > $i.txt
done
|
leotizzei/MobileMedia-Cosmos-v7
|
src/br/unicamp/ic/sed/mobilemedia/script-video.sh
|
Shell
|
mit
| 146 |
jest $(
[[ $JEST_NO_DEFAULT_OPTIONS == 'true' ]] && exit 0
[[ $JEST_NO_COVERAGE == 'true' ]] || echo --coverage
) $JEST_ARGV $@
|
cuong8321/react-hello-world
|
sh/jest.sh
|
Shell
|
mit
| 132 |
socat TCP4-LISTEN:3306,fork,reuseaddr TCP4:mysql:3306 &
sudo mkdir /var/run/mysqld
sudo chown travis /var/run/mysqld
socat UNIX-LISTEN:/var/run/mysqld/mysqld.sock,fork,reuseaddr TCP4:mysql:3306 &
mysql -uroot -e "GRANT ALL PRIVILEGES ON *.* TO travis@$(hostname --ip-address) IDENTIFIED BY ''"
|
Codegyre/RoboCI
|
recipes/mysql/link.sh
|
Shell
|
mit
| 293 |
/*
* Copyright 2011-2014 Branimir Karadzic. All rights reserved.
* License: http://www.opensource.org/licenses/BSD-2-Clause
*/
#include <bgfx_shader.sh>
#include "shaderlib.sh"
|
tangrams/bgfxVectorTile
|
include/common/common.sh
|
Shell
|
mit
| 181 |
#!/bin/bash
rm -rf node_modules/restful-redux/lib/*
cd ../..
npm run build
cp -rf lib examples/02-github-project-search/node_modules/restful-redux
|
jhudson8/react-redux-model
|
examples/02-github-project-search/local-copy.sh
|
Shell
|
mit
| 148 |
#!/bin/bash
# Start GPIO inputs
sudo /home/pi/installed/Adafruit-Retrogame/retrogame &
# Go to project
cd /home/pi/installed/determined-dill/;
# Check if internet is connected
wget -q --tries=10 --timeout=20 --spider http://google.com > /dev/null
if [[ $? -eq 0 ]]; then
git pull origin master;
npm install;
else
echo "Offline";
fi
# Start HTTP server
forever start ./node_modules/http-server/bin/http-server $(pwd) -p 8080;
sleep 5;
# Some serious hackery to get rid of the "Restore Session" warning
# if things are exited gracefully
# https://groups.google.com/a/chromium.org/forum/#!topic/chromium-reviews/HdvP8PttOLM
sed -i 's/"exited_cleanly": false/"exited_cleanly": true/' \
~/.config/chromium/Default/Preferences;
# Start chromium
chromium --kiosk --allow-file-access-from-files --disable-java --disable-restore-session-state --disable-sync --disable-translate --ignore-certificate-errors http://127.0.0.1:8080/;
|
zzolo/determined-dill
|
raspberry-pi/startup.sh
|
Shell
|
mit
| 935 |
set -e
if [ $TRAVIS_PULL_REQUEST == 'false' ]
then
echo "Build $TRAVIS_JOB_NUMBER"
echo "Git: $TRAVIS_COMMIT [$TRAVIS_BRANCH]"
echo "Root dir: $TRAVIS_BUILD_DIR"
cd $TRAVIS_BUILD_DIR
echo "Build promoted."
|
ObeoNetwork/Capella-Extensions
|
travis-promote.sh
|
Shell
|
epl-1.0
| 206 |
#!/bin/bash
echo "[`date '+%Y/%m/%d %H:%M:%S'`] Wait for the completion of the job."
# echo "check reboot flag file: '$1' ..."
while [ -e "$1" ]; do
sleep 1
done
echo "[`date '+%Y/%m/%d %H:%M:%S'`] Start the reboot."
sync
sync
sync
/sbin/shutdown -r now
|
mastering-jaz/jobarranger
|
jaconf/extendedjob/jareboot.sh
|
Shell
|
gpl-2.0
| 260 |
#!/bin/sh
# Copyright (C) 2013,2014,2015 Curt Brune <[email protected]>
# Copyright (C) 2014,2015,2016 david_yang <[email protected]>
# Copyright (C) 2014 Mandeep Sandhu <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0
#
# Script to create an ONIE binary installer, suitable for downloading
# to a running ONIE system during "update" mode.
#
arch=$1
machine_dir=$2
machine_conf=$3
installer_dir=$4
output_file=$5
shift 5
[ -d "$machine_dir" ] || {
echo "ERROR: machine directory '$machine_dir' does not exist."
exit 1
}
if [ "$arch" = "x86_64" ] ; then
# installer_conf is required for x86_64
installer_conf="${machine_dir}/installer.conf"
[ -r "$installer_conf" ] || {
echo "ERROR: unable to read machine installer file: $installer_conf"
exit 1
}
fi
[ -r "$machine_conf" ] || {
echo "ERROR: unable to read machine configuration file: $machine_conf"
exit 1
}
[ -d "$installer_dir" ] || {
echo "ERROR: installer directory does not exist: $installer_dir"
exit 1
}
if [ "$arch" = "powerpc-softfloat" -o "$arch" = "armv7a" ] ; then
# Both of these architectures share common installer code as they
# are both based on u-boot.
arch_dir="u-boot-arch"
else
arch_dir="$arch"
fi
[ -d "$installer_dir/$arch_dir" ] || {
echo "ERROR: arch specific installer directory does not exist: $installer_dir/$arch"
exit 1
}
touch $output_file || {
echo "ERROR: unable to create output file: $output_file"
exit 1
}
rm -f $output_file
[ $# -gt 0 ] || {
echo "Error: No ONIE update image files found"
exit 1
}
tmp_dir=
clean_up()
{
rm -rf $tmp_dir
}
trap clean_up EXIT
# make the data archive
# contents:
# - OS image files
# - $machine_conf
echo -n "Building self-extracting ONIE installer image ."
tmp_dir=$(mktemp --directory)
tmp_installdir="$tmp_dir/installer"
mkdir $tmp_installdir || exit 1
tmp_tardir="$tmp_dir/tar"
mkdir $tmp_tardir || exit 1
for f in $* ; do
cp -rL "$f" $tmp_tardir || exit 1
echo -n "."
done
# Bundle data into a tar file
tar -C $tmp_tardir -cJf $tmp_installdir/onie-update.tar.xz $(ls $tmp_tardir) || exit 1
echo -n "."
cp $installer_dir/install.sh $tmp_installdir || exit 1
echo -n "."
cp -r $installer_dir/$arch_dir/* $tmp_installdir
[ -r $machine_dir/installer/install-platform ] && {
cp $machine_dir/installer/install-platform $tmp_installdir
}
# Massage install-arch
if [ "$arch_dir" = "u-boot-arch" ] ; then
sed -e "s/%%UPDATER_UBOOT_NAME%%/$UPDATER_UBOOT_NAME/" \
-i $tmp_installdir/install-arch
fi
echo -n "."
# Add optional installer configuration files
if [ "$arch" = "x86_64" ] ; then
cp "$installer_conf" $tmp_installdir || exit 1
echo -n "."
if [ "$SERIAL_CONSOLE_ENABLE" = "yes" ] ; then
DEFAULT_GRUB_SERIAL_COMMAND="serial --port=$CONSOLE_PORT --speed=$CONSOLE_SPEED --word=8 --parity=no --stop=1"
DEFAULT_GRUB_CMDLINE_LINUX="console=tty0 console=ttyS${CONSOLE_DEV},${CONSOLE_SPEED}n8"
DEFAULT_GRUB_TERMINAL_INPUT="serial"
DEFAULT_GRUB_TERMINAL_OUTPUT="serial"
else
DEFAULT_GRUB_SERIAL_COMMAND=""
DEFAULT_GRUB_CMDLINE_LINUX=""
DEFAULT_GRUB_TERMINAL_INPUT="console"
DEFAULT_GRUB_TERMINAL_OUTPUT="console"
fi
GRUB_DEFAULT_CONF="$tmp_installdir/grub/grub-variables"
cat <<EOF >> $GRUB_DEFAULT_CONF
## Begin grub-variables
# default variables
DEFAULT_GRUB_SERIAL_COMMAND="$DEFAULT_GRUB_SERIAL_COMMAND"
DEFAULT_GRUB_CMDLINE_LINUX="$DEFAULT_GRUB_CMDLINE_LINUX"
DEFAULT_GRUB_TERMINAL_INPUT="$DEFAULT_GRUB_TERMINAL_INPUT"
DEFAULT_GRUB_TERMINAL_OUTPUT="$DEFAULT_GRUB_TERMINAL_OUTPUT"
# overridden if they have been defined in the environment
GRUB_SERIAL_COMMAND=\${GRUB_SERIAL_COMMAND:-"\$DEFAULT_GRUB_SERIAL_COMMAND"}
GRUB_TERMINAL_INPUT=\${GRUB_TERMINAL_INPUT:-"\$DEFAULT_GRUB_TERMINAL_INPUT"}
GRUB_TERMINAL_OUTPUT=\${GRUB_TERMINAL_OUTPUT:-"\$DEFAULT_GRUB_TERMINAL_OUTPUT"}
GRUB_CMDLINE_LINUX=\${GRUB_CMDLINE_LINUX:-"\$DEFAULT_GRUB_CMDLINE_LINUX"}
export GRUB_SERIAL_COMMAND
export GRUB_TERMINAL_INPUT
export GRUB_TERMINAL_OUTPUT
export GRUB_CMDLINE_LINUX
# variables for ONIE itself
GRUB_ONIE_SERIAL_COMMAND=\$GRUB_SERIAL_COMMAND
export GRUB_ONIE_SERIAL_COMMAND
## End grub-variables
EOF
echo -n "."
GRUB_MACHINE_CONF="$tmp_installdir/grub/grub-machine.cfg"
echo "## Begin grub-machine.cfg" > $GRUB_MACHINE_CONF
# make sure each var is 'exported' for GRUB shell
sed -e 's/\(.*\)=\(.*$\)/\1=\2\nexport \1/' $machine_conf >> $GRUB_MACHINE_CONF
echo "## End grub-machine.cfg" >> $GRUB_MACHINE_CONF
echo -n "."
GRUB_EXTRA_CMDLINE_CONF="$tmp_installdir/grub/grub-extra.cfg"
echo "## Begin grub-extra.cfg" > $GRUB_EXTRA_CMDLINE_CONF
echo "ONIE_EXTRA_CMDLINE_LINUX=\"$EXTRA_CMDLINE_LINUX\"" >> $GRUB_EXTRA_CMDLINE_CONF
echo "export ONIE_EXTRA_CMDLINE_LINUX" >> $GRUB_EXTRA_CMDLINE_CONF
echo "## End grub-extra.cfg" >> $GRUB_EXTRA_CMDLINE_CONF
echo -n "."
fi
sed -e 's/onie_/image_/' $machine_conf > $tmp_installdir/machine.conf || exit 1
echo -n "."
sharch="$tmp_dir/sharch.tar"
tar -C $tmp_dir -cf $sharch installer || {
echo "Error: Problems creating $sharch archive"
exit 1
}
echo -n "."
[ -f "$sharch" ] || {
echo "Error: $sharch not found"
exit 1
}
sha1=$(cat $sharch | sha1sum | awk '{print $1}')
echo -n "."
cp $installer_dir/sharch_body.sh $output_file || {
echo "Error: Problems copying sharch_body.sh"
exit 1
}
# Replace variables in the sharch template
sed -i -e "s/%%IMAGE_SHA1%%/$sha1/" $output_file
echo -n "."
cat $sharch >> $output_file
rm -rf $tmp_dir
echo " Done."
echo "Success: ONIE install image is ready in ${output_file}:"
ls -l ${output_file}
|
shengzhou/onie
|
build-config/scripts/onie-mk-installer.sh
|
Shell
|
gpl-2.0
| 5,730 |
PACKAGE_NAME="gtkglext"
PACKAGE_VERNAME="gtkglext-1.2.0"
PACKAGE_ARCHIVE="${PACKAGE_VERNAME}.tar.bz2"
PACKAGE_DIR="${PACKAGE_VERNAME}"
sgn_carefully sgn_untar_bz2
sgn_carefully sgn_builddir sgn_byuser ./configure --prefix="$SGN_PREFIX"
sgn_carefully sgn_builddir sgn_byuser make $SGN_MAKEFLAGS
sgn_carefully sgn_builddir sgn_make_install
sgn_carefully sgn_cleanup
sgn_finstat
|
erdizz/sovereign
|
depot/gtkglext/gtkglext-1.2.0.sh
|
Shell
|
gpl-2.0
| 381 |
#!/bin/bash
set -e
set -o errexit
for i in *.minion; do
valgrind --error-exitcode=1 --leak-check=full $* -notimers $i -nodelimit 10
done
|
LeslieW/minion
|
test_instances/do_valgrind_tests.sh
|
Shell
|
gpl-2.0
| 139 |
#! /usr/bin/env bash
$EXTRACTRC *.ui >> rc.cpp
$XGETTEXT *.cpp -o $podir/plasma_applet_gasCalculator.pot
|
Anumittal/Kalzium
|
plasmoid/applet/gasPlasmoid/Messages.sh
|
Shell
|
gpl-2.0
| 105 |
#!/bin/sh
VERSION_FILE=$1
if [ $# -ne 2 ]; then
echo "Usage: $0 <filename> <version>"
exit 1
fi
CURR_VERSION=$2
FILE_VERSION=
GIT_VERSION=
if test -f ${VERSION_FILE}; then
FILE_VERSION=$(cat ${VERSION_FILE} 2>/dev/null | cut -d'"' -f2)
fi
if test -d .git -a -n "`git --version 2>/dev/null`"; then
# update current version using git tags
GIT_VERSION=`git describe --tags --abbrev=4 --match="v[0-9].[0-9]*" 2>/dev/null`
CURR_VERSION=${GIT_VERSION}
fi
if test -z "${GIT_VERSION}" -a -n "${FILE_VERSION}"; then
# do not update file version if git version is not avaiable
exit 0
fi
if test -z "${FILE_VERSION}" -o "${CURR_VERSION}" != "${FILE_VERSION}"; then
# update file version only if it's different
echo "#define UFTRACE_VERSION \"${CURR_VERSION}\"" > ${VERSION_FILE}
echo " GEN " ${VERSION_FILE#${objdir}/}
exit 0
fi
|
JIMyungSik/uftrace
|
misc/version.sh
|
Shell
|
gpl-2.0
| 879 |
#!/bin/sh
source /etc/profile
trap "" hup
clear
exec /usr/bin/gmenu2x.bin
|
anarsoul/gmenu2x-zipit
|
data/platform/zipitz2/gmenu2x.sh
|
Shell
|
gpl-2.0
| 75 |
#!/bin/bash
#
# Run libardour test suite.
#
if [ ! -f './tempo.cc' ]; then
echo "This script must be run from within the libs/ardour directory";
exit 1;
fi
cd ../..
top=`pwd`
cd build
libs='libs'
export LD_LIBRARY_PATH=$libs/audiographer:$libs/vamp-sdk:$libs/surfaces:$libs/surfaces/control_protocol:$libs/ardour:$libs/midi++2:$libs/pbd:$libs/rubberband:$libs/soundtouch:$libs/gtkmm2ext:$libs/appleutility:$libs/taglib:$libs/evoral:$libs/evoral/src/libsmf:$libs/timecode:/usr/local/lib:/usr/local/lib64:$LD_LIBRARY_PATH
export ARDOUR_CONFIG_PATH=$top:$top/gtk2_ardour:$libs/..:$libs/../gtk2_ardour
export ARDOUR_PANNER_PATH=$libs/panners/2in2out:$libs/panners/1in2out:$libs/panners/vbap
export ARDOUR_SURFACES_PATH=$libs/surfaces/osc:$libs/surfaces/generic_midi:$libs/surfaces/tranzport:$libs/surfaces/powermate:$libs/surfaces/mackie
export ARDOUR_MCP_PATH="../mcp"
export ARDOUR_DLL_PATH=$libs
export ARDOUR_DATA_PATH=$top/gtk2_ardour:$top/build/gtk2_ardour:.
if [ "$1" == "--debug" ]; then
gdb ./libs/ardour/run-tests
elif [ "$1" == "--valgrind" ]; then
valgrind ./libs/ardour/run-tests
else
./libs/ardour/run-tests $*
fi
|
cth103/ardour-cth103
|
libs/ardour/run-tests.sh
|
Shell
|
gpl-2.0
| 1,162 |
#! /bin/sh -x
aclocal -I m4
autoheader
autoconf
libtoolize --automake
automake -a
|
premutos/gteditor
|
autogen.sh
|
Shell
|
gpl-3.0
| 83 |
#!/bin/bash
cd `dirname $0`
z=$(basename $(pwd))
while true
do
z2="$z.glade"
if [ -f "$z2" ]
then
break
fi
z2="$z.ui"
if [ -f "$z2" ]
then
break
fi
z2="ui.glade"
if [ -f "$z2" ]
then
break
fi
exit
done
../../new_o/gtkmmsh $z2
|
zzzzzzzzzzz0/zhscript
|
app/huitu/z.sh
|
Shell
|
gpl-3.0
| 245 |
#!/bin/sh
# Verify that an erroneous use of sed -i no longer leaves behind
# a temporary file.
# Copyright (C) 2015-2022 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
. "${srcdir=.}/testsuite/init.sh"; path_prepend_ ./sed
print_ver_ sed
# The input file must have at least one line.
echo > in || framework_failure_
printf 'sed: -e expression #1, char 0: no previous regular expression\n' \
> exp || framework_failure_
# Before sed-4.3, this would create a file named sed??????
returns_ 1 sed -i s//b/ in > out 2> err || fail=1
compare /dev/null out || fail=1
compare exp err || fail=1
# Ensure that no other file has been created in this directory.
files=$(echo *)
test "$files" = "err exp in out" || fail=1
Exit $fail
|
agordon/sed
|
testsuite/temp-file-cleanup.sh
|
Shell
|
gpl-3.0
| 1,346 |
#!/bin/sh
################
# example that runs psisearch2_msa.pl iteratively through 5 iterations using psiblast instead of ssearch
# Equivalent to:
# psisearch2_msa.pl --pgm psiblast --query query.aa --num_iter 5 --db /slib2/bl_dbs/qfo78
#
PS_BIN=~/Devel/fa36_v3.8/psisearch2
q_file=$1
db=/slib2/bl_dbs/pir1
pssm_eval=1e-10
m_format='m8CB'
SRC_QDIR=../hum_1dom200_queries
iters='2 3 4 5'
# iters=''
for q_file_p in $*; do
q_file=${q_file_p##*/}
echo $q_file
# iteration 1:
# echo "$PS_BIN/psisearch2_msa.pl --pgm psiblast --query $SRC_QDIR/$q_file --num_iter 1 --db $db --int_mask query --end_mask query --out_suffix q_pblt --m_format $m_format --save_list asnbin"
$PS_BIN/psisearch2_msa.pl --pgm psiblast --query $q_file --num_iter 1 --pssm_eval $pssm_eval --db $db --int_mask query --end_mask query --out_suffix q_pblt --m_format $m_format --save_list asntxt
# iteration 2 - 5
for it in $iters; do
prev=$(($it-1))
$PS_BIN/psisearch2_msa.pl --pgm psiblast --query $q_file --num_iter 1 --pssm_eval $pssm_eval --db $db --int_mask query --end_mask query --out_suffix q_pblt --this_iter $it --prev_m89res $q_file.it${prev}.q_pblt --m_format $m_format --save_list asntxt
done
done
|
uwbmrb/BMRB-API
|
server/wsgi/bmrbapi/submodules/fasta36/psisearch2/psisearch2_msa_iter_bl.sh
|
Shell
|
gpl-3.0
| 1,220 |
#!/bin/sh
# Check that the two places with family enums are in good condition.
# The next-best thing to the compiler enforcing it.
fam=`cut -d, -f3 include/r600_pci_ids.h | cut -d\) -f1 | sed -e 's@^ *@@' -e '/^$/d' | uniq`
for i in $fam; do
grep -q $i family_str.c || echo $i missing from family_str.c
grep -q $i include/radeontop.h || echo $i missing from include/radeontop.h
done
|
clbr/radeontop
|
familycheck.sh
|
Shell
|
gpl-3.0
| 387 |
#! /bin/sh
# Copyright (C) 2011 Red Hat, Inc.
# This file is part of elfutils.
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# elfutils is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. $srcdir/test-subr.sh
# testfile52.c:
# #include <stdlib.h>
# int foo() { exit(0); }
#
# gcc -m32 -g -shared testfile52-32.c -o testfile52-32.so
# eu-strip -f testfile52-32.so.debug testfile52-32.so
# cp testfile52-32.so testfile52-32.prelink.so
# prelink -N testfile52-32.prelink.so
# cp testfile52-32.so testfile52-32.noshdrs.so
# prelink -r 0x42000000 testfile52-32.noshdrs.so
# eu-strip --remove-comment --strip-sections testfile52-32.noshdrs.so
testfiles testfile52-32.so testfile52-32.so.debug
testfiles testfile52-32.prelink.so testfile52-32.noshdrs.so
tempfiles testmaps52-32 testfile52-32.noshdrs.so.debug
ln -s testfile52-32.so.debug testfile52-32.noshdrs.so.debug
cat > testmaps52-32 <<EOF
00111000-00112000 r-xp 00000000 fd:01 1 `pwd`/testfile52-32.so
00112000-00113000 rw-p 00000000 fd:01 1 `pwd`/testfile52-32.so
41000000-41001000 r-xp 00000000 fd:01 2 `pwd`/testfile52-32.prelink.so
41001000-41002000 rw-p 00000000 fd:01 2 `pwd`/testfile52-32.prelink.so
42000000-42001000 r-xp 00000000 fd:01 3 `pwd`/testfile52-32.noshdrs.so
42001000-42002000 rw-p 00000000 fd:01 3 `pwd`/testfile52-32.noshdrs.so
EOF
# Prior to commit 1743d7f, libdwfl would fail on the second address,
# because it didn't notice that prelink added a 0x20-byte offset from
# what the .debug file reports.
testrun_compare ../src/addr2line -S -M testmaps52-32 \
0x11140c 0x4100042d 0x4200040e <<\EOF
foo
/home/jistone/src/elfutils/tests/testfile52-32.c:2
foo+0x1
/home/jistone/src/elfutils/tests/testfile52-32.c:2
foo+0x2
/home/jistone/src/elfutils/tests/testfile52-32.c:2
EOF
# Repeat testfile52 for -m64. The particular REL>RELA issue doesn't exist, but
# we'll make sure the rest works anyway.
testfiles testfile52-64.so testfile52-64.so.debug
testfiles testfile52-64.prelink.so testfile52-64.noshdrs.so
tempfiles testmaps52-64 testfile52-64.noshdrs.so.debug
ln -s testfile52-64.so.debug testfile52-64.noshdrs.so.debug
cat > testmaps52-64 <<EOF
1000000000-1000001000 r-xp 00000000 fd:11 1 `pwd`/testfile52-64.so
1000001000-1000200000 ---p 00001000 fd:11 1 `pwd`/testfile52-64.so
1000200000-1000201000 rw-p 00000000 fd:11 1 `pwd`/testfile52-64.so
3000000000-3000001000 r-xp 00000000 fd:11 2 `pwd`/testfile52-64.prelink.so
3000001000-3000200000 ---p 00001000 fd:11 2 `pwd`/testfile52-64.prelink.so
3000200000-3000201000 rw-p 00000000 fd:11 2 `pwd`/testfile52-64.prelink.so
3800000000-3800001000 r-xp 00000000 fd:11 3 `pwd`/testfile52-64.noshdrs.so
3800001000-3800200000 ---p 00001000 fd:11 3 `pwd`/testfile52-64.noshdrs.so
3800200000-3800201000 rw-p 00000000 fd:11 3 `pwd`/testfile52-64.noshdrs.so
EOF
testrun_compare ../src/addr2line -S -M testmaps52-64 \
0x100000056c 0x300000056d 0x380000056e <<\EOF
foo
/home/jistone/src/elfutils/tests/testfile52-64.c:2
foo+0x1
/home/jistone/src/elfutils/tests/testfile52-64.c:2
foo+0x2
/home/jistone/src/elfutils/tests/testfile52-64.c:2
EOF
# testfile53.c:
# char foo[0x1000];
# int main() { return 0; }
#
# gcc -m32 -g testfile53-32.c -o testfile53-32
# eu-strip -f testfile53-32.debug testfile53-32
# cp testfile53-32 testfile53-32.prelink
# prelink -N testfile53-32.prelink
testfiles testfile53-32 testfile53-32.debug testfile53-32.prelink
testrun_compare ../src/addr2line -S -e testfile53-32 0x8048394 0x8048395 <<\EOF
main
/home/jistone/src/elfutils/tests/testfile53-32.c:2
main+0x1
/home/jistone/src/elfutils/tests/testfile53-32.c:2
EOF
# prelink shuffled some of the sections, but .text is in the same place.
testrun_compare ../src/addr2line -S -e testfile53-32.prelink 0x8048396 0x8048397 <<\EOF
main+0x2
/home/jistone/src/elfutils/tests/testfile53-32.c:2
main+0x3
/home/jistone/src/elfutils/tests/testfile53-32.c:2
EOF
# Repeat testfile53 in 64-bit, except use foo[0x800] to achieve the same
# prelink section shuffling.
testfiles testfile53-64 testfile53-64.debug testfile53-64.prelink
testrun_compare ../src/addr2line -S -e testfile53-64 0x400474 0x400475 <<\EOF
main
/home/jistone/src/elfutils/tests/testfile53-64.c:2
main+0x1
/home/jistone/src/elfutils/tests/testfile53-64.c:2
EOF
testrun_compare ../src/addr2line -S -e testfile53-64.prelink 0x400476 0x400477 <<\EOF
main+0x2
/home/jistone/src/elfutils/tests/testfile53-64.c:2
main+0x3
/home/jistone/src/elfutils/tests/testfile53-64.c:2
EOF
# testfile54.c:
# extern void * stdin;
# static void * pstdin = &stdin;
# void * const foo = &pstdin;
#
# gcc -m32 -g -shared -nostartfiles testfile54-32.c -o testfile54-32.so
# eu-strip -f testfile54-32.so.debug testfile54-32.so
# cp testfile54-32.so testfile54-32.prelink.so
# prelink -N testfile54-32.prelink.so
# cp testfile54-32.so testfile54-32.noshdrs.so
# prelink -r 0x42000000 testfile54-32.noshdrs.so
# eu-strip --remove-comment --strip-sections testfile54-32.noshdrs.so
testfiles testfile54-32.so testfile54-32.so.debug
testfiles testfile54-32.prelink.so testfile54-32.noshdrs.so
tempfiles testmaps54-32
# Note we have no testfile54-32.noshdrs.so.debug link here, so
# this is testing finding the symbols in .dynsym via PT_DYNAMIC.
cat > testmaps54-32 <<EOF
00111000-00112000 r--p 00000000 fd:01 1 `pwd`/testfile54-32.so
00112000-00113000 rw-p 00000000 fd:01 1 `pwd`/testfile54-32.so
41000000-41001000 r--p 00000000 fd:01 2 `pwd`/testfile54-32.prelink.so
41001000-41002000 rw-p 00000000 fd:01 2 `pwd`/testfile54-32.prelink.so
42000000-42001000 r--p 00000000 fd:01 3 `pwd`/testfile54-32.noshdrs.so
42001000-42002000 rw-p 00000000 fd:01 3 `pwd`/testfile54-32.noshdrs.so
EOF
testrun_compare ../src/addr2line -S -M testmaps54-32 \
0x1111fc 0x1122a4 0x410001fd 0x410012a5 0x420001fe <<\EOF
foo
??:0
pstdin
??:0
foo+0x1
??:0
pstdin+0x1
??:0
foo+0x2
??:0
EOF
# Repeat testfile64 in 64-bit
testfiles testfile54-64.so testfile54-64.so.debug
testfiles testfile54-64.prelink.so testfile54-64.noshdrs.so
tempfiles testmaps54-64
# Note we have no testfile54-64.noshdrs.so.debug link here, so
# this is testing finding the symbols in .dynsym via PT_DYNAMIC.
cat > testmaps54-64 <<EOF
1000000000-1000001000 r--p 00000000 fd:11 1 `pwd`/testfile54-64.so
1000001000-1000200000 ---p 00001000 fd:11 1 `pwd`/testfile54-64.so
1000200000-1000201000 rw-p 00000000 fd:11 1 `pwd`/testfile54-64.so
3000000000-3000001000 r--p 00000000 fd:11 2 `pwd`/testfile54-64.prelink.so
3000001000-3000200000 ---p 00001000 fd:11 2 `pwd`/testfile54-64.prelink.so
3000200000-3000201000 rw-p 00000000 fd:11 2 `pwd`/testfile54-64.prelink.so
3800000000-3800001000 r--p 00000000 fd:11 3 `pwd`/testfile54-64.noshdrs.so
3800001000-3800200000 ---p 00001000 fd:11 3 `pwd`/testfile54-64.noshdrs.so
3800200000-3800201000 rw-p 00000000 fd:11 3 `pwd`/testfile54-64.noshdrs.so
EOF
testrun_compare ../src/addr2line -S -M testmaps54-64 \
0x10000002f8 0x1000200448 0x30000002f9 0x3000200449 0x38000002fa <<\EOF
foo
??:0
pstdin
??:0
foo+0x1
??:0
pstdin+0x1
??:0
foo+0x2
??:0
EOF
# testfile55.c:
# extern void *stdin;
# int main() { return !stdin; }
#
# gcc -m32 -g testfile55-32.c -o testfile55-32
# eu-strip -f testfile55-32.debug testfile55-32
# cp testfile55-32 testfile55-32.prelink
# prelink -N testfile55-32.prelink
testfiles testfile55-32 testfile55-32.debug testfile55-32.prelink
testrun_compare ../src/addr2line -S -e testfile55-32 0x80483b4 0x80483b5 <<\EOF
main
/home/jistone/src/elfutils/tests/testfile55-32.c:2
main+0x1
/home/jistone/src/elfutils/tests/testfile55-32.c:2
EOF
# prelink splits .bss into .dynbss+.bss, so the start of .bss changes, but the
# total size remains the same, and .text doesn't move at all.
testrun_compare ../src/addr2line -S -e testfile55-32.prelink 0x80483b6 0x80483b7 <<\EOF
main+0x2
/home/jistone/src/elfutils/tests/testfile55-32.c:2
main+0x3
/home/jistone/src/elfutils/tests/testfile55-32.c:2
EOF
# Repeat testfile55 in 64-bit
testfiles testfile55-64 testfile55-64.debug testfile55-64.prelink
testrun_compare ../src/addr2line -S -e testfile55-64 0x4004b4 0x4004b5 <<\EOF
main
/home/jistone/src/elfutils/tests/testfile55-64.c:2
main+0x1
/home/jistone/src/elfutils/tests/testfile55-64.c:2
EOF
testrun_compare ../src/addr2line -S -e testfile55-64.prelink 0x4004b6 0x4004b7 <<\EOF
main+0x2
/home/jistone/src/elfutils/tests/testfile55-64.c:2
main+0x3
/home/jistone/src/elfutils/tests/testfile55-64.c:2
EOF
|
groleo/elfutils
|
tests/run-prelink-addr-test.sh
|
Shell
|
gpl-3.0
| 8,895 |
#!/bin/bash
rev1=$1;
rev2=$2;
get_revision(){
if [ ! -d "bem-xjst-$1" ]; then
curl https://codeload.github.com/bem/bem-xjst/zip/$1 > $1.zip && unzip $1.zip && rm $1.zip
cd bem-xjst-$1 && npm i && npm run make && cd ../
fi
}
get_revision "$rev1"
get_revision "$rev2"
|
awinogradov/bem-xjst
|
bench/prepare.sh
|
Shell
|
mpl-2.0
| 293 |
#!/bin/bash
cp -v ${GROUP}/060-duplicate.in . || exit 1
exit 0
|
swift-lang/swift-k
|
tests/language-behaviour/IO/060-duplicate.setup.sh
|
Shell
|
apache-2.0
| 65 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=""
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
XCASSET_FILES="$XCASSET_FILES '$1'"
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "FontAwesome+iOS/Resources/FontAwesome.ttf"
install_resource "FontAwesomeKit/FontAwesomeKit/FontAwesome.otf"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "FontAwesome+iOS/Resources/FontAwesome.ttf"
install_resource "FontAwesomeKit/FontAwesomeKit/FontAwesome.otf"
fi
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n $XCASSET_FILES ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
echo $XCASSET_FILES | xargs actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
GoldyMark/iOS-app-demo
|
appDemo/Pods/Target Support Files/Pods/Pods-resources.sh
|
Shell
|
apache-2.0
| 4,308 |
#!/bin/bash -e
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o pipefail
BASE_DIR="$TEST_SRCDIR/kythe/go/serving/tools/testdata"
OUT_DIR="$TEST_TMPDIR"
TEST_ENTRIES="$TEST_SRCDIR/kythe/javatests/com/google/devtools/kythe/analyzers/java/testdata/pkg/generics_tests.entries"
source "kythe/cxx/common/testdata/start_http_service.sh"
jq () { "third_party/jq/jq" -e "$@" <<<"$JSON"; }
kwazthis() { "kythe/go/serving/tools/kwazthis" --ignore_local_repo --api "http://$LISTEN_AT" "$@"; }
PATH=kythe/javatests/com/google/devtools/kythe/analyzers/java/testdata/pkg/Generics.java
JSON=$(kwazthis --path $PATH --offset 783)
jq --slurp 'length == 2'
jq --slurp '.[0].span.text == "Generics<String>"'
jq --slurp '.[1].span.text == "String"'
jq --slurp '.[].kind == "ref"'
jq --slurp '.[].node.ticket
and .[].node.ticket != ""'
jq --slurp '.[].node.kind
and .[].node.kind != ""'
JSON=$(kwazthis --path $PATH --offset 558)
jq --slurp 'length == 1'
jq '.kind == "defines"'
jq '.span.text == "g"'
jq '.span.start == 558'
jq '.span.end == 559'
jq '.node.ticket'
jq '.node.ticket != ""'
jq '.node.kind == "function"'
jq '(.node.names | length) == 1'
|
Acidburn0zzz/kythe
|
kythe/go/serving/tools/testdata/kwazthis_test.sh
|
Shell
|
apache-2.0
| 1,705 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generates pod and secret to deploy origin against configured Kubernetes provider
set -o errexit
set -o nounset
set -o pipefail
ORIGIN=$(dirname "${BASH_SOURCE}")
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/kubectl.sh" > /dev/null 2>&1
# Check all prerequisites are on the path
HAVE_JQ=$(which jq)
if [[ -z ${HAVE_JQ} ]]; then
echo "Please install jq"
exit 1
fi
HAVE_BASE64=$(which base64)
if [[ -z ${HAVE_BASE64} ]]; then
echo "Please install base64"
exit 1
fi
# Capture information about your kubernetes cluster
TEMPLATE="--template=\"{{ index . \"current-context\" }}\""
CURRENT_CONTEXT=$( "${kubectl}" "${config[@]:+${config[@]}}" config view -o template "${TEMPLATE}" )
TEMPLATE="--template=\"{{range .contexts}}{{ if eq .name ${CURRENT_CONTEXT} }}{{ .context.cluster }}{{end}}{{end}}\""
CURRENT_CLUSTER=$( "${kubectl}" "${config[@]:+${config[@]}}" config view -o template "${TEMPLATE}" )
TEMPLATE="--template=\"{{range .contexts}}{{ if eq .name ${CURRENT_CONTEXT} }}{{ .context.user }}{{end}}{{end}}\""
CURRENT_USER=$( "${kubectl}" "${config[@]:+${config[@]}}" config view -o template "${TEMPLATE}" )
TEMPLATE="--template=\"{{range .clusters}}{{ if eq .name ${CURRENT_CLUSTER} }}{{ index . \"cluster\" \"certificate-authority\" }}{{end}}{{end}}\""
CERTIFICATE_AUTHORITY=$( "${kubectl}" "${config[@]:+${config[@]}}" config view -o template "${TEMPLATE}" )
TEMPLATE="--template=\"{{range .clusters}}{{ if eq .name ${CURRENT_CLUSTER} }}{{ .cluster.server }}{{end}}{{end}}\""
KUBE_MASTER=$( "${kubectl}" "${config[@]:+${config[@]}}" config view -o template "${TEMPLATE}" )
TEMPLATE="--template=\"{{range .users}}{{ if eq .name ${CURRENT_USER} }}{{ index . \"user\" \"auth-path\" }}{{end}}{{end}}\""
AUTH_PATH=$( "${kubectl}" "${config[@]:+${config[@]}}" config view -o template "${TEMPLATE}" )
# Build an auth_path file to embed as a secret
AUTH_PATH_DATA=$(cat ${AUTH_PATH} )
KUBE_USER=$( echo ${AUTH_PATH_DATA} | jq '.User' )
KUBE_PASSWORD=$( echo ${AUTH_PATH_DATA} | jq '.Password' )
KUBE_CERT_FILE=$( echo ${AUTH_PATH_DATA} | jq '.CertFile' )
KUBE_KEY_FILE=$( echo ${AUTH_PATH_DATA} | jq '.KeyFile' )
cat <<EOF >"${ORIGIN}/origin-auth-path"
{
"User": ${KUBE_USER},
"Password": ${KUBE_PASSWORD},
"CAFile": "/etc/secret-volume/kube-ca",
"CertFile": "/etc/secret-volume/kube-cert",
"KeyFile": "/etc/secret-volume/kube-key"
}
EOF
# Collect all the secrets and encode as base64
ORIGIN_KUBECONFIG_DATA=$( cat ${ORIGIN}/origin-kubeconfig.yaml | base64 --wrap=0)
ORIGIN_CERTIFICATE_AUTHORITY_DATA=$(cat ${CERTIFICATE_AUTHORITY} | base64 --wrap=0)
ORIGIN_AUTH_PATH_DATA=$(cat ${ORIGIN}/origin-auth-path | base64 --wrap=0)
ORIGIN_CERT_FILE=$( cat ${KUBE_CERT_FILE//\"/} | base64 --wrap=0)
ORIGIN_KEY_FILE=$( cat ${KUBE_KEY_FILE//\"/} | base64 --wrap=0)
cat <<EOF >"${ORIGIN}/secret.json"
{
"apiVersion": "v1beta2",
"kind": "Secret",
"id": "kubernetes-secret",
"data": {
"kubeconfig": "${ORIGIN_KUBECONFIG_DATA}",
"kube-ca": "${ORIGIN_CERTIFICATE_AUTHORITY_DATA}",
"kube-auth-path": "${ORIGIN_AUTH_PATH_DATA}",
"kube-cert": "${ORIGIN_CERT_FILE}",
"kube-key": "${ORIGIN_KEY_FILE}"
}
}
EOF
echo "Generated Kubernetes Secret file: ${ORIGIN}/secret.json"
# Generate an OpenShift Origin pod
# TODO: In future, move this to a replication controller when we are not running etcd in container
cat <<EOF >"${ORIGIN}/pod.json"
{
"apiVersion": "v1beta1",
"id": "openshift",
"kind": "Pod",
"labels": {"name": "origin"},
"desiredState": {
"manifest": {
"containers": [
{
"command": [
"start",
"master",
"--kubernetes=${KUBE_MASTER}",
"--kubeconfig=/etc/secret-volume/kubeconfig",
"--public-kubernetes=https://10.245.1.3:8443",
"--public-master=https://10.245.1.3:8443",
],
"image": "openshift/origin:latest",
"imagePullPolicy": "PullIfNotPresent",
"name": "origin",
"ports": [
{
"name": "https-api",
"containerPort": 8443,
"hostPort": 8443,
},
{
"name": "https-ui",
"containerPort": 8444,
"hostPort": 8444,
}
],
"volumeMounts": [
{
"mountPath": "/etc/secret-volume",
"name": "secret-volume",
"readOnly": true
}
]
}
],
"restartPolicy": {
"never": {}
},
"version": "v1beta2",
"volumes": [
{
"name": "secret-volume",
"source": {
"secret": {
"target": {
"kind": "Secret",
"name": "kubernetes-secret",
"namespace": "default"
}
}
}
}
]
}
}
}
EOF
echo "Generated Kubernetes Pod file: ${ORIGIN}/pod.json"
cat <<EOF >"${ORIGIN}/api-service.json"
{
"apiVersion": "v1beta1",
"kind": "Service",
"id": "origin-api",
"port": 8443,
"containerPort": 8443,
"selector": { "name": "origin" },
}
EOF
echo "Generated Kubernetes Service file: ${ORIGIN}/api-service.json"
cat <<EOF >"${ORIGIN}/ui-service.json"
{
"apiVersion": "v1beta1",
"kind": "Service",
"id": "origin-ui",
"port": 8444,
"containerPort": 8444,
"selector": { "name": "origin" },
}
EOF
echo "Generated Kubernetes Service file: ${ORIGIN}/ui-service.json"
|
shakamunyi/kubernetes
|
examples/openshift-origin/resource-generator.sh
|
Shell
|
apache-2.0
| 6,045 |
#!/bin/bash
if [ ! -f "$1" ] || [ "" == "$2" ]; then
echo "Usage: $0 <cer file> <alias>"
exit 1
fi
mkdir -p target
keytool -importcert -noprompt -storepass jetty6 -file "${1}" -keystore target/jetty-ssl.keystore -alias "${2}"
|
FITeagle/ft1
|
delivery/interfaces/src/main/bin/ssl_add_user_cert.sh
|
Shell
|
apache-2.0
| 234 |
step="calico-service"
printf "Starting to run ${step}\n"
set -e
set +x
. /etc/sysconfig/heat-params
set -x
if [ "$NETWORK_DRIVER" = "calico" ]; then
_prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/}
CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml
[ -f ${CALICO_DEPLOY} ] || {
echo "Writing File: $CALICO_DEPLOY"
mkdir -p $(dirname ${CALICO_DEPLOY})
set +x
cat << EOF > ${CALICO_DEPLOY}
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPConfiguration
listKind: BGPConfigurationList
plural: bgpconfigurations
singular: bgpconfiguration
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: BGPConfiguration contains the configuration for any BGP routing.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BGPConfigurationSpec contains the values of the BGP configuration.
properties:
asNumber:
description: 'ASNumber is the default AS number used by a node. [Default:
64512]'
format: int32
type: integer
communities:
description: Communities is a list of BGP community values and their
arbitrary names for tagging routes.
items:
description: Community contains standard or large community value
and its name.
properties:
name:
description: Name given to community value.
type: string
value:
description: Value must be of format `aa:nn` or `aa:nn:mm`.
For standard community use `aa:nn` format, where `aa` and
`nn` are 16 bit number. For large community use `aa:nn:mm`
format, where `aa`, `nn` and `mm` are 32 bit number. Where,
`aa` is an AS Number, `nn` and `mm` are per-AS identifier.
pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$
type: string
type: object
type: array
listenPort:
description: ListenPort is the port where BGP protocol should listen.
Defaults to 179
maximum: 65535
minimum: 1
type: integer
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: INFO]'
type: string
nodeToNodeMeshEnabled:
description: 'NodeToNodeMeshEnabled sets whether full node to node
BGP mesh is enabled. [Default: true]'
type: boolean
prefixAdvertisements:
description: PrefixAdvertisements contains per-prefix advertisement
configuration.
items:
description: PrefixAdvertisement configures advertisement properties
for the specified CIDR.
properties:
cidr:
description: CIDR for which properties should be advertised.
type: string
communities:
description: Communities can be list of either community names
already defined in `Specs.Communities` or community value
of format `aa:nn` or `aa:nn:mm`. For standard community use
`aa:nn` format, where `aa` and `nn` are 16 bit number. For
large community use `aa:nn:mm` format, where `aa`, `nn` and
`mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and
`mm` are per-AS identifier.
items:
type: string
type: array
type: object
type: array
serviceClusterIPs:
description: ServiceClusterIPs are the CIDR blocks from which service
cluster IPs are allocated. If specified, Calico will advertise these
blocks, as well as any cluster IPs within them.
items:
description: ServiceClusterIPBlock represents a single allowed ClusterIP
CIDR block.
properties:
cidr:
type: string
type: object
type: array
serviceExternalIPs:
description: ServiceExternalIPs are the CIDR blocks for Kubernetes
Service External IPs. Kubernetes Service ExternalIPs will only be
advertised if they are within one of these blocks.
items:
description: ServiceExternalIPBlock represents a single allowed
External IP CIDR block.
properties:
cidr:
type: string
type: object
type: array
serviceLoadBalancerIPs:
description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes
Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress
IPs will only be advertised if they are within one of these blocks.
items:
description: ServiceLoadBalancerIPBlock represents a single allowed
LoadBalancer IP CIDR block.
properties:
cidr:
type: string
type: object
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPPeer
listKind: BGPPeerList
plural: bgppeers
singular: bgppeer
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BGPPeerSpec contains the specification for a BGPPeer resource.
properties:
asNumber:
description: The AS Number of the peer.
format: int32
type: integer
keepOriginalNextHop:
description: Option to keep the original nexthop field when routes
are sent to a BGP Peer. Setting "true" configures the selected BGP
Peers node to use the "next hop keep;" instead of "next hop self;"(default)
in the specific branch of the Node on "bird.cfg".
type: boolean
maxRestartTime:
description: Time to allow for software restart. When specified,
this is configured as the graceful restart timeout. When not specified,
the BIRD default of 120s is used.
type: string
node:
description: The node name identifying the Calico node instance that
is targeted by this peer. If this is not set, and no nodeSelector
is specified, then this BGP peer selects all nodes in the cluster.
type: string
nodeSelector:
description: Selector for the nodes that should have this peering. When
this is set, the Node field must be empty.
type: string
password:
description: Optional BGP password for the peerings generated by this
BGPPeer resource.
properties:
secretKeyRef:
description: Selects a key of a secret in the node pod's namespace.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
type: object
peerIP:
description: The IP address of the peer followed by an optional port
number to peer with. If port number is given, format should be `[<IPv6>]:port`
or `<IPv4>:<port>` for IPv4. If optional port number is not set,
and this peer IP and ASNumber belongs to a calico/node with ListenPort
set in BGPConfiguration, then we use that port to peer.
type: string
peerSelector:
description: Selector for the remote nodes to peer with. When this
is set, the PeerIP and ASNumber fields must be empty. For each
peering between the local node and selected remote nodes, we configure
an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified,
and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The
remote AS number comes from the remote node's NodeBGPSpec.ASNumber,
or the global default if that is not set.
type: string
sourceAddress:
description: Specifies whether and how to configure a source address
for the peerings generated by this BGPPeer resource. Default value
"UseNodeIP" means to configure the node IP as the source address. "None"
means not to configure a source address.
type: string
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BlockAffinity
listKind: BlockAffinityList
plural: blockaffinities
singular: blockaffinity
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BlockAffinitySpec contains the specification for a BlockAffinity
resource.
properties:
cidr:
type: string
deleted:
description: Deleted indicates that this block affinity is being deleted.
This field is a string for compatibility with older releases that
mistakenly treat this field as a string.
type: string
node:
type: string
state:
type: string
required:
- cidr
- deleted
- node
- state
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: (devel)
creationTimestamp: null
name: caliconodestatuses.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: CalicoNodeStatus
listKind: CalicoNodeStatusList
plural: caliconodestatuses
singular: caliconodestatus
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus
resource.
properties:
classes:
description: Classes declares the types of information to monitor
for this calico/node, and allows for selective status reporting
about certain subsets of information.
items:
type: string
type: array
node:
description: The node name identifies the Calico node instance for
node status.
type: string
updatePeriodSeconds:
description: UpdatePeriodSeconds is the period at which CalicoNodeStatus
should be updated. Set to 0 to disable CalicoNodeStatus refresh.
Maximum update period is one day.
format: int32
type: integer
type: object
status:
description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus.
No validation needed for status since it is updated by Calico.
properties:
agent:
description: Agent holds agent status on the node.
properties:
birdV4:
description: BIRDV4 represents the latest observed status of bird4.
properties:
lastBootTime:
description: LastBootTime holds the value of lastBootTime
from bird.ctl output.
type: string
lastReconfigurationTime:
description: LastReconfigurationTime holds the value of lastReconfigTime
from bird.ctl output.
type: string
routerID:
description: Router ID used by bird.
type: string
state:
description: The state of the BGP Daemon.
type: string
version:
description: Version of the BGP daemon
type: string
type: object
birdV6:
description: BIRDV6 represents the latest observed status of bird6.
properties:
lastBootTime:
description: LastBootTime holds the value of lastBootTime
from bird.ctl output.
type: string
lastReconfigurationTime:
description: LastReconfigurationTime holds the value of lastReconfigTime
from bird.ctl output.
type: string
routerID:
description: Router ID used by bird.
type: string
state:
description: The state of the BGP Daemon.
type: string
version:
description: Version of the BGP daemon
type: string
type: object
type: object
bgp:
description: BGP holds node BGP status.
properties:
numberEstablishedV4:
description: The total number of IPv4 established bgp sessions.
type: integer
numberEstablishedV6:
description: The total number of IPv6 established bgp sessions.
type: integer
numberNotEstablishedV4:
description: The total number of IPv4 non-established bgp sessions.
type: integer
numberNotEstablishedV6:
description: The total number of IPv6 non-established bgp sessions.
type: integer
peersV4:
description: PeersV4 represents IPv4 BGP peers status on the node.
items:
description: CalicoNodePeer contains the status of BGP peers
on the node.
properties:
peerIP:
description: IP address of the peer whose condition we are
reporting.
type: string
since:
description: Since the state or reason last changed.
type: string
state:
description: State is the BGP session state.
type: string
type:
description: Type indicates whether this peer is configured
via the node-to-node mesh, or via en explicit global or
per-node BGPPeer object.
type: string
type: object
type: array
peersV6:
description: PeersV6 represents IPv6 BGP peers status on the node.
items:
description: CalicoNodePeer contains the status of BGP peers
on the node.
properties:
peerIP:
description: IP address of the peer whose condition we are
reporting.
type: string
since:
description: Since the state or reason last changed.
type: string
state:
description: State is the BGP session state.
type: string
type:
description: Type indicates whether this peer is configured
via the node-to-node mesh, or via en explicit global or
per-node BGPPeer object.
type: string
type: object
type: array
required:
- numberEstablishedV4
- numberEstablishedV6
- numberNotEstablishedV4
- numberNotEstablishedV6
type: object
lastUpdated:
description: LastUpdated is a timestamp representing the server time
when CalicoNodeStatus object last updated. It is represented in
RFC3339 form and is in UTC.
format: date-time
nullable: true
type: string
routes:
description: Routes reports routes known to the Calico BGP daemon
on the node.
properties:
routesV4:
description: RoutesV4 represents IPv4 routes on the node.
items:
description: CalicoNodeRoute contains the status of BGP routes
on the node.
properties:
destination:
description: Destination of the route.
type: string
gateway:
description: Gateway for the destination.
type: string
interface:
description: Interface for the destination
type: string
learnedFrom:
description: LearnedFrom contains information regarding
where this route originated.
properties:
peerIP:
description: If sourceType is NodeMesh or BGPPeer, IP
address of the router that sent us this route.
type: string
sourceType:
description: Type of the source where a route is learned
from.
type: string
type: object
type:
description: Type indicates if the route is being used for
forwarding or not.
type: string
type: object
type: array
routesV6:
description: RoutesV6 represents IPv6 routes on the node.
items:
description: CalicoNodeRoute contains the status of BGP routes
on the node.
properties:
destination:
description: Destination of the route.
type: string
gateway:
description: Gateway for the destination.
type: string
interface:
description: Interface for the destination
type: string
learnedFrom:
description: LearnedFrom contains information regarding
where this route originated.
properties:
peerIP:
description: If sourceType is NodeMesh or BGPPeer, IP
address of the router that sent us this route.
type: string
sourceType:
description: Type of the source where a route is learned
from.
type: string
type: object
type:
description: Type indicates if the route is being used for
forwarding or not.
type: string
type: object
type: array
type: object
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: ClusterInformation
listKind: ClusterInformationList
plural: clusterinformations
singular: clusterinformation
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ClusterInformation contains the cluster specific information.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ClusterInformationSpec contains the values of describing
the cluster.
properties:
calicoVersion:
description: CalicoVersion is the version of Calico that the cluster
is running
type: string
clusterGUID:
description: ClusterGUID is the GUID of the cluster
type: string
clusterType:
description: ClusterType describes the type of the cluster
type: string
datastoreReady:
description: DatastoreReady is used during significant datastore migrations
to signal to components such as Felix that it should wait before
accessing the datastore.
type: boolean
variant:
description: Variant declares which variant of Calico should be active.
type: string
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: FelixConfiguration
listKind: FelixConfigurationList
plural: felixconfigurations
singular: felixconfiguration
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: Felix Configuration contains the configuration for Felix.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: FelixConfigurationSpec contains the values of the Felix configuration.
properties:
allowIPIPPacketsFromWorkloads:
description: 'AllowIPIPPacketsFromWorkloads controls whether Felix
will add a rule to drop IPIP encapsulated traffic from workloads
[Default: false]'
type: boolean
allowVXLANPacketsFromWorkloads:
description: 'AllowVXLANPacketsFromWorkloads controls whether Felix
will add a rule to drop VXLAN encapsulated traffic from workloads
[Default: false]'
type: boolean
awsSrcDstCheck:
description: 'Set source-destination-check on AWS EC2 instances. Accepted
value must be one of "DoNothing", "Enable" or "Disable". [Default:
DoNothing]'
enum:
- DoNothing
- Enable
- Disable
type: string
bpfConnectTimeLoadBalancingEnabled:
description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode,
controls whether Felix installs the connection-time load balancer. The
connect-time load balancer is required for the host to be able to
reach Kubernetes services and it improves the performance of pod-to-service
connections. The only reason to disable it is for debugging purposes. [Default:
true]'
type: boolean
bpfDataIfacePattern:
description: BPFDataIfacePattern is a regular expression that controls
which interfaces Felix should attach BPF programs to in order to
catch traffic to/from the network. This needs to match the interfaces
that Calico workload traffic flows over as well as any interfaces
that handle incoming traffic to nodeports and services from outside
the cluster. It should not match the workload interfaces (usually
named cali...).
type: string
bpfDisableUnprivileged:
description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled
sysctl to disable unprivileged use of BPF. This ensures that unprivileged
users cannot access Calico''s BPF maps and cannot insert their own
BPF programs to interfere with Calico''s. [Default: true]'
type: boolean
bpfEnabled:
description: 'BPFEnabled, if enabled Felix will use the BPF dataplane.
[Default: false]'
type: boolean
bpfExtToServiceConnmark:
description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
mark that is set on connections from an external client to a local
service. This mark allows us to control how packets of that connection
are routed within the host and how is routing intepreted by RPF
check. [Default: 0]'
type: integer
bpfExternalServiceMode:
description: 'BPFExternalServiceMode in BPF mode, controls how connections
from outside the cluster to services (node ports and cluster IPs)
are forwarded to remote workloads. If set to "Tunnel" then both
request and response traffic is tunneled to the remote node. If
set to "DSR", the request traffic is tunneled but the response traffic
is sent directly from the remote node. In "DSR" mode, the remote
node appears to use the IP of the ingress node; this requires a
permissive L2 network. [Default: Tunnel]'
type: string
bpfKubeProxyEndpointSlicesEnabled:
description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls
whether Felix's embedded kube-proxy accepts EndpointSlices or not.
type: boolean
bpfKubeProxyIptablesCleanupEnabled:
description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF
mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s
iptables chains. Should only be enabled if kube-proxy is not running. [Default:
true]'
type: boolean
bpfKubeProxyMinSyncPeriod:
description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the
minimum time between updates to the dataplane for Felix''s embedded
kube-proxy. Lower values give reduced set-up latency. Higher values
reduce Felix CPU usage by batching up more work. [Default: 1s]'
type: string
bpfLogLevel:
description: 'BPFLogLevel controls the log level of the BPF programs
when in BPF dataplane mode. One of "Off", "Info", or "Debug". The
logs are emitted to the BPF trace pipe, accessible with the command
`tc exec bpf debug`. [Default: Off].'
type: string
chainInsertMode:
description: 'ChainInsertMode controls whether Felix hooks the kernel''s
top-level iptables chains by inserting a rule at the top of the
chain or by appending a rule at the bottom. insert is the safe default
since it prevents Calico''s rules from being bypassed. If you switch
to append mode, be sure that the other rules in the chains signal
acceptance by falling through to the Calico rules, otherwise the
Calico policy will be bypassed. [Default: insert]'
type: string
dataplaneDriver:
type: string
debugDisableLogDropping:
type: boolean
debugMemoryProfilePath:
type: string
debugSimulateCalcGraphHangAfter:
type: string
debugSimulateDataplaneHangAfter:
type: string
defaultEndpointToHostAction:
description: 'DefaultEndpointToHostAction controls what happens to
traffic that goes from a workload endpoint to the host itself (after
the traffic hits the endpoint egress policy). By default Calico
blocks traffic from workload endpoints to the host itself with an
iptables "DROP" action. If you want to allow some or all traffic
from endpoint to host, set this parameter to RETURN or ACCEPT. Use
RETURN if you have your own rules in the iptables "INPUT" chain;
Calico will insert its rules at the top of that chain, then "RETURN"
packets to the "INPUT" chain once it has completed processing workload
endpoint egress policy. Use ACCEPT to unconditionally accept packets
from workloads after processing workload endpoint egress policy.
[Default: Drop]'
type: string
deviceRouteProtocol:
description: This defines the route protocol added to programmed device
routes, by default this will be RTPROT_BOOT when left blank.
type: integer
deviceRouteSourceAddress:
description: This is the source address to use on programmed device
routes. By default the source address is left blank, leaving the
kernel to choose the source address used.
type: string
disableConntrackInvalidCheck:
type: boolean
endpointReportingDelay:
type: string
endpointReportingEnabled:
type: boolean
externalNodesList:
description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes
which may source tunnel traffic and have the tunneled traffic be
accepted at calico nodes.
items:
type: string
type: array
failsafeInboundHostPorts:
description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports
and CIDRs that Felix will allow incoming traffic to host endpoints
on irrespective of the security policy. This is useful to avoid
accidentally cutting off a host with incorrect configuration. For
back-compatibility, if the protocol is not specified, it defaults
to "tcp". If a CIDR is not specified, it will allow traffic from
all addresses. To disable all inbound host ports, use the value
none. The default value allows ssh access and DHCP. [Default: tcp:22,
udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]'
items:
description: ProtoPort is combination of protocol, port, and CIDR.
Protocol and port must be specified.
properties:
net:
type: string
port:
type: integer
protocol:
type: string
required:
- port
- protocol
type: object
type: array
failsafeOutboundHostPorts:
description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports
and CIDRs that Felix will allow outgoing traffic from host endpoints
to irrespective of the security policy. This is useful to avoid
accidentally cutting off a host with incorrect configuration. For
back-compatibility, if the protocol is not specified, it defaults
to "tcp". If a CIDR is not specified, it will allow traffic from
all addresses. To disable all outbound host ports, use the value
none. The default value opens etcd''s standard ports to ensure that
Felix does not get cut off from etcd as well as allowing DHCP and
DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666,
tcp:6667, udp:53, udp:67]'
items:
description: ProtoPort is combination of protocol, port, and CIDR.
Protocol and port must be specified.
properties:
net:
type: string
port:
type: integer
protocol:
type: string
required:
- port
- protocol
type: object
type: array
featureDetectOverride:
description: FeatureDetectOverride is used to override the feature
detection. Values are specified in a comma separated list with no
spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=".
"true" or "false" will force the feature, empty or omitted values
are auto-detected.
type: string
genericXDPEnabled:
description: 'GenericXDPEnabled enables Generic XDP so network cards
that don''t support XDP offload or driver modes can use XDP. This
is not recommended since it doesn''t provide better performance
than iptables. [Default: false]'
type: boolean
healthEnabled:
type: boolean
healthHost:
type: string
healthPort:
type: integer
interfaceExclude:
description: 'InterfaceExclude is a comma-separated list of interfaces
that Felix should exclude when monitoring for host endpoints. The
default value ensures that Felix ignores Kubernetes'' IPVS dummy
interface, which is used internally by kube-proxy. If you want to
exclude multiple interface names using a single value, the list
supports regular expressions. For regular expressions you must wrap
the value with ''/''. For example having values ''/^kube/,veth1''
will exclude all interfaces that begin with ''kube'' and also the
interface ''veth1''. [Default: kube-ipvs0]'
type: string
interfacePrefix:
description: 'InterfacePrefix is the interface name prefix that identifies
workload endpoints and so distinguishes them from host endpoint
interfaces. Note: in environments other than bare metal, the orchestrators
configure this appropriately. For example our Kubernetes and Docker
integrations set the ''cali'' value, and our OpenStack integration
sets the ''tap'' value. [Default: cali]'
type: string
interfaceRefreshInterval:
description: InterfaceRefreshInterval is the period at which Felix
rescans local interfaces to verify their state. The rescan can be
disabled by setting the interval to 0.
type: string
ipipEnabled:
type: boolean
ipipMTU:
description: 'IPIPMTU is the MTU to set on the tunnel device. See
Configuring MTU [Default: 1440]'
type: integer
ipsetsRefreshInterval:
description: 'IpsetsRefreshInterval is the period at which Felix re-checks
all iptables state to ensure that no other process has accidentally
broken Calico''s rules. Set to 0 to disable iptables refresh. [Default:
90s]'
type: string
iptablesBackend:
description: IptablesBackend specifies which backend of iptables will
be used. The default is legacy.
type: string
iptablesFilterAllowAction:
type: string
iptablesLockFilePath:
description: 'IptablesLockFilePath is the location of the iptables
lock file. You may need to change this if the lock file is not in
its standard location (for example if you have mapped it into Felix''s
container at a different path). [Default: /run/xtables.lock]'
type: string
iptablesLockProbeInterval:
description: 'IptablesLockProbeInterval is the time that Felix will
wait between attempts to acquire the iptables lock if it is not
available. Lower values make Felix more responsive when the lock
is contended, but use more CPU. [Default: 50ms]'
type: string
iptablesLockTimeout:
description: 'IptablesLockTimeout is the time that Felix will wait
for the iptables lock, or 0, to disable. To use this feature, Felix
must share the iptables lock file with all other processes that
also take the lock. When running Felix inside a container, this
requires the /run directory of the host to be mounted into the calico/node
or calico/felix container. [Default: 0s disabled]'
type: string
iptablesMangleAllowAction:
type: string
iptablesMarkMask:
description: 'IptablesMarkMask is the mask that Felix selects its
IPTables Mark bits from. Should be a 32 bit hexadecimal number with
at least 8 bits set, none of which clash with any other mark bits
in use on the system. [Default: 0xff000000]'
format: int32
type: integer
iptablesNATOutgoingInterfaceFilter:
type: string
iptablesPostWriteCheckInterval:
description: 'IptablesPostWriteCheckInterval is the period after Felix
has done a write to the dataplane that it schedules an extra read
back in order to check the write was not clobbered by another process.
This should only occur if another application on the system doesn''t
respect the iptables lock. [Default: 1s]'
type: string
iptablesRefreshInterval:
description: 'IptablesRefreshInterval is the period at which Felix
re-checks the IP sets in the dataplane to ensure that no other process
has accidentally broken Calico''s rules. Set to 0 to disable IP
sets refresh. Note: the default for this value is lower than the
other refresh intervals as a workaround for a Linux kernel bug that
was fixed in kernel version 4.11. If you are using v4.11 or greater
you may want to set this to, a higher value to reduce Felix CPU
usage. [Default: 10s]'
type: string
ipv6Support:
type: boolean
kubeNodePortRanges:
description: 'KubeNodePortRanges holds list of port ranges used for
service node ports. Only used if felix detects kube-proxy running
in ipvs mode. Felix uses these ranges to separate host and workload
traffic. [Default: 30000:32767].'
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
logFilePath:
description: 'LogFilePath is the full path to the Felix log. Set to
none to disable file logging. [Default: /var/log/calico/felix.log]'
type: string
logPrefix:
description: 'LogPrefix is the log prefix that Felix uses when rendering
LOG rules. [Default: calico-packet]'
type: string
logSeverityFile:
description: 'LogSeverityFile is the log severity above which logs
are sent to the log file. [Default: Info]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: Info]'
type: string
logSeveritySys:
description: 'LogSeveritySys is the log severity above which logs
are sent to the syslog. Set to None for no logging to syslog. [Default:
Info]'
type: string
maxIpsetSize:
type: integer
metadataAddr:
description: 'MetadataAddr is the IP address or domain name of the
server that can answer VM queries for cloud-init metadata. In OpenStack,
this corresponds to the machine running nova-api (or in Ubuntu,
nova-api-metadata). A value of none (case insensitive) means that
Felix should not set up any NAT rule for the metadata path. [Default:
127.0.0.1]'
type: string
metadataPort:
description: 'MetadataPort is the port of the metadata server. This,
combined with global.MetadataAddr (if not ''None''), is used to
set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort.
In most cases this should not need to be changed [Default: 8775].'
type: integer
mtuIfacePattern:
description: MTUIfacePattern is a regular expression that controls
which interfaces Felix should scan in order to calculate the host's
MTU. This should not match workload interfaces (usually named cali...).
type: string
natOutgoingAddress:
description: NATOutgoingAddress specifies an address to use when performing
source NAT for traffic in a natOutgoing pool that is leaving the
network. By default the address used is an address on the interface
the traffic is leaving on (ie it uses the iptables MASQUERADE target)
type: string
natPortRange:
anyOf:
- type: integer
- type: string
description: NATPortRange specifies the range of ports that is used
for port mapping when doing outgoing NAT. When unset the default
behavior of the network stack is used.
pattern: ^.*
x-kubernetes-int-or-string: true
netlinkTimeout:
type: string
openstackRegion:
description: 'OpenstackRegion is the name of the region that a particular
Felix belongs to. In a multi-region Calico/OpenStack deployment,
this must be configured somehow for each Felix (here in the datamodel,
or in felix.cfg or the environment on each compute node), and must
match the [calico] openstack_region value configured in neutron.conf
on each node. [Default: Empty]'
type: string
policySyncPathPrefix:
description: 'PolicySyncPathPrefix is used to by Felix to communicate
policy changes to external services, like Application layer policy.
[Default: Empty]'
type: string
prometheusGoMetricsEnabled:
description: 'PrometheusGoMetricsEnabled disables Go runtime metrics
collection, which the Prometheus client does by default, when set
to false. This reduces the number of metrics reported, reducing
Prometheus load. [Default: true]'
type: boolean
prometheusMetricsEnabled:
description: 'PrometheusMetricsEnabled enables the Prometheus metrics
server in Felix if set to true. [Default: false]'
type: boolean
prometheusMetricsHost:
description: 'PrometheusMetricsHost is the host that the Prometheus
metrics server should bind to. [Default: empty]'
type: string
prometheusMetricsPort:
description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. [Default: 9091]'
type: integer
prometheusProcessMetricsEnabled:
description: 'PrometheusProcessMetricsEnabled disables process metrics
collection, which the Prometheus client does by default, when set
to false. This reduces the number of metrics reported, reducing
Prometheus load. [Default: true]'
type: boolean
prometheusWireGuardMetricsEnabled:
description: 'PrometheusWireGuardMetricsEnabled disables wireguard
metrics collection, which the Prometheus client does by default,
when set to false. This reduces the number of metrics reported,
reducing Prometheus load. [Default: true]'
type: boolean
removeExternalRoutes:
description: Whether or not to remove device routes that have not
been programmed by Felix. Disabling this will allow external applications
to also add device routes. This is enabled by default which means
we will remove externally added routes.
type: boolean
reportingInterval:
description: 'ReportingInterval is the interval at which Felix reports
its status into the datastore or 0 to disable. Must be non-zero
in OpenStack deployments. [Default: 30s]'
type: string
reportingTTL:
description: 'ReportingTTL is the time-to-live setting for process-wide
status reports. [Default: 90s]'
type: string
routeRefreshInterval:
description: 'RouteRefreshInterval is the period at which Felix re-checks
the routes in the dataplane to ensure that no other process has
accidentally broken Calico''s rules. Set to 0 to disable route refresh.
[Default: 90s]'
type: string
routeSource:
description: 'RouteSource configures where Felix gets its routing
information. - WorkloadIPs: use workload endpoints to construct
routes. - CalicoIPAM: the default - use IPAM data to construct routes.'
type: string
routeTableRange:
description: Calico programs additional Linux route tables for various
purposes. RouteTableRange specifies the indices of the route tables
that Calico should use.
properties:
max:
type: integer
min:
type: integer
required:
- max
- min
type: object
serviceLoopPrevention:
description: 'When service IP advertisement is enabled, prevent routing
loops to service IPs that are not in use, by dropping or rejecting
packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled",
in which case such routing loops continue to be allowed. [Default:
Drop]'
type: string
sidecarAccelerationEnabled:
description: 'SidecarAccelerationEnabled enables experimental sidecar
acceleration [Default: false]'
type: boolean
usageReportingEnabled:
description: 'UsageReportingEnabled reports anonymous Calico version
number and cluster size to projectcalico.org. Logs warnings returned
by the usage server. For example, if a significant security vulnerability
has been discovered in the version of Calico being used. [Default:
true]'
type: boolean
usageReportingInitialDelay:
description: 'UsageReportingInitialDelay controls the minimum delay
before Felix makes a report. [Default: 300s]'
type: string
usageReportingInterval:
description: 'UsageReportingInterval controls the interval at which
Felix makes reports. [Default: 86400s]'
type: string
useInternalDataplaneDriver:
type: boolean
vxlanEnabled:
type: boolean
vxlanMTU:
description: 'VXLANMTU is the MTU to set on the tunnel device. See
Configuring MTU [Default: 1440]'
type: integer
vxlanPort:
type: integer
vxlanVNI:
type: integer
wireguardEnabled:
description: 'WireguardEnabled controls whether Wireguard is enabled.
[Default: false]'
type: boolean
wireguardHostEncryptionEnabled:
description: 'WireguardHostEncryptionEnabled controls whether Wireguard
host-to-host encryption is enabled. [Default: false]'
type: boolean
wireguardInterfaceName:
description: 'WireguardInterfaceName specifies the name to use for
the Wireguard interface. [Default: wg.calico]'
type: string
wireguardListeningPort:
description: 'WireguardListeningPort controls the listening port used
by Wireguard. [Default: 51820]'
type: integer
wireguardMTU:
description: 'WireguardMTU controls the MTU on the Wireguard interface.
See Configuring MTU [Default: 1420]'
type: integer
wireguardRoutingRulePriority:
description: 'WireguardRoutingRulePriority controls the priority value
to use for the Wireguard routing rule. [Default: 99]'
type: integer
xdpEnabled:
description: 'XDPEnabled enables XDP acceleration for suitable untracked
incoming deny rules. [Default: true]'
type: boolean
xdpRefreshInterval:
description: 'XDPRefreshInterval is the period at which Felix re-checks
all XDP state to ensure that no other process has accidentally broken
Calico''s BPF maps or attached programs. Set to 0 to disable XDP
refresh. [Default: 90s]'
type: string
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkPolicy
listKind: GlobalNetworkPolicyList
plural: globalnetworkpolicies
singular: globalnetworkpolicy
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
applyOnForward:
description: ApplyOnForward indicates to apply the rules in this policy
on forward traffic.
type: boolean
doNotTrack:
description: DoNotTrack indicates whether packets matched by the rules
in this policy should go through the data plane's connection tracking,
such as Linux conntrack. If True, the rules in this policy are
applied before any data plane connection tracking, and packets allowed
by this policy are marked as not to be tracked.
type: boolean
egress:
description: The ordered set of egress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with \"Not\". All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: <path>: which matches
the path exactly or prefix: <path-prefix>: which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
type: object
type: array
ingress:
description: The ordered set of ingress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with \"Not\". All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: <path>: which matches
the path exactly or prefix: <path-prefix>: which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
type: object
type: array
namespaceSelector:
description: NamespaceSelector is an optional field for an expression
used to select a pod based on namespaces.
type: string
order:
description: Order is an optional field that specifies the order in
which the policy is applied. Policies with higher "order" are applied
after those with lower order. If the order is omitted, it may be
considered to be "infinite" - i.e. the policy will be applied last. Policies
with identical order will be applied in alphanumerical order based
on the Policy "Name".
type: number
preDNAT:
description: PreDNAT indicates to apply the rules in this policy before
any DNAT.
type: boolean
selector:
description: "The selector is an expression used to pick pick out
the endpoints that the policy should be applied to. \n Selector
expressions follow this syntax: \n \tlabel == \"string_literal\"
\ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
\ -> not equal; also matches if label is not present \tlabel in
{ \"a\", \"b\", \"c\", ... } -> true if the value of label X is
one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\",
... } -> true if the value of label X is not one of \"a\", \"b\",
\"c\" \thas(label_name) -> True if that label is present \t! expr
-> negation of expr \texpr && expr -> Short-circuit and \texpr
|| expr -> Short-circuit or \t( expr ) -> parens for grouping \tall()
or the empty selector -> matches all endpoints. \n Label names are
allowed to contain alphanumerics, -, _ and /. String literals are
more permissive but they do not support escape characters. \n Examples
(with made-up labels): \n \ttype == \"webserver\" && deployment
== \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
\"dev\" \t! has(label_name)"
type: string
serviceAccountSelector:
description: ServiceAccountSelector is an optional field for an expression
used to select a pod based on service accounts.
type: string
types:
description: "Types indicates whether this policy applies to ingress,
or to egress, or to both. When not explicitly specified (and so
the value on creation is empty or nil), Calico defaults Types according
to what Ingress and Egress rules are present in the policy. The
default is: \n - [ PolicyTypeIngress ], if there are no Egress rules
(including the case where there are also no Ingress rules) \n
- [ PolicyTypeEgress ], if there are Egress rules but no Ingress
rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are
both Ingress and Egress rules. \n When the policy is read back again,
Types will always be one of these values, never empty or nil."
items:
description: PolicyType enumerates the possible values of the PolicySpec
Types field.
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkSet
listKind: GlobalNetworkSetList
plural: globalnetworksets
singular: globalnetworkset
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs
that share labels to allow rules to refer to them via selectors. The labels
of GlobalNetworkSet are not namespaced.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: GlobalNetworkSetSpec contains the specification for a NetworkSet
resource.
properties:
nets:
description: The list of IP networks that belong to this set.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: HostEndpoint
listKind: HostEndpointList
plural: hostendpoints
singular: hostendpoint
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: HostEndpointSpec contains the specification for a HostEndpoint
resource.
properties:
expectedIPs:
description: "The expected IP addresses (IPv4 and IPv6) of the endpoint.
If \"InterfaceName\" is not present, Calico will look for an interface
matching any of the IPs in the list and apply policy to that. Note:
\tWhen using the selector match criteria in an ingress or egress
security Policy \tor Profile, Calico converts the selector into
a set of IP addresses. For host \tendpoints, the ExpectedIPs field
is used for that purpose. (If only the interface \tname is specified,
Calico does not learn the IPs of the interface for use in match
\tcriteria.)"
items:
type: string
type: array
interfaceName:
description: "Either \"*\", or the name of a specific Linux interface
to apply policy to; or empty. \"*\" indicates that this HostEndpoint
governs all traffic to, from or through the default network namespace
of the host named by the \"Node\" field; entering and leaving that
namespace via any interface, including those from/to non-host-networked
local workloads. \n If InterfaceName is not \"*\", this HostEndpoint
only governs traffic that enters or leaves the host through the
specific interface named by InterfaceName, or - when InterfaceName
is empty - through the specific interface that has one of the IPs
in ExpectedIPs. Therefore, when InterfaceName is empty, at least
one expected IP must be specified. Only external interfaces (such
as \"eth0\") are supported here; it isn't possible for a HostEndpoint
to protect traffic through a specific local workload interface.
\n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints;
initially just pre-DNAT policy. Please check Calico documentation
for the latest position."
type: string
node:
description: The node name identifying the Calico node instance.
type: string
ports:
description: Ports contains the endpoint's named ports, which may
be referenced in security policy rules.
items:
properties:
name:
type: string
port:
type: integer
protocol:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
required:
- name
- port
- protocol
type: object
type: array
profiles:
description: A list of identifiers of security Profile objects that
apply to this endpoint. Each profile is applied in the order that
they appear in this list. Profile rules are applied after the selector-based
security policy.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMBlock
listKind: IPAMBlockList
plural: ipamblocks
singular: ipamblock
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMBlockSpec contains the specification for an IPAMBlock
resource.
properties:
affinity:
type: string
allocations:
items:
type: integer
# TODO: This nullable is manually added in. We should update controller-gen
# to handle []*int properly itself.
nullable: true
type: array
attributes:
items:
properties:
handle_id:
type: string
secondary:
additionalProperties:
type: string
type: object
type: object
type: array
cidr:
type: string
deleted:
type: boolean
strictAffinity:
type: boolean
unallocated:
items:
type: integer
type: array
required:
- allocations
- attributes
- cidr
- strictAffinity
- unallocated
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMConfig
listKind: IPAMConfigList
plural: ipamconfigs
singular: ipamconfig
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMConfigSpec contains the specification for an IPAMConfig
resource.
properties:
autoAllocateBlocks:
type: boolean
maxBlocksPerHost:
description: MaxBlocksPerHost, if non-zero, is the max number of blocks
that can be affine to each host.
type: integer
strictAffinity:
type: boolean
required:
- autoAllocateBlocks
- strictAffinity
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMHandle
listKind: IPAMHandleList
plural: ipamhandles
singular: ipamhandle
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMHandleSpec contains the specification for an IPAMHandle
resource.
properties:
block:
additionalProperties:
type: integer
type: object
deleted:
type: boolean
handleID:
type: string
required:
- block
- handleID
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPPool
listKind: IPPoolList
plural: ippools
singular: ippool
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPPoolSpec contains the specification for an IPPool resource.
properties:
allowedUses:
description: AllowedUse controls what the IP pool will be used for. If
not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility
items:
type: string
type: array
blockSize:
description: The block size to use for IP address assignments from
this pool. Defaults to 26 for IPv4 and 112 for IPv6.
type: integer
cidr:
description: The pool CIDR.
type: string
disabled:
description: When disabled is true, Calico IPAM will not assign addresses
from this pool.
type: boolean
disableBGPExport:
description: 'Disable exporting routes from this IP Pool’s CIDR over
BGP. [Default: false]'
type: boolean
ipip:
description: 'Deprecated: this field is only used for APIv1 backwards
compatibility. Setting this field is not allowed, this field is
for internal use only.'
properties:
enabled:
description: When enabled is true, ipip tunneling will be used
to deliver packets to destinations within this pool.
type: boolean
mode:
description: The IPIP mode. This can be one of "always" or "cross-subnet". A
mode of "always" will also use IPIP tunneling for routing to
destination IP addresses within this pool. A mode of "cross-subnet"
will only use IPIP tunneling when the destination node is on
a different subnet to the originating node. The default value
(if not specified) is "always".
type: string
type: object
ipipMode:
description: Contains configuration for IPIP tunneling for this pool.
If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling
is disabled).
type: string
nat-outgoing:
description: 'Deprecated: this field is only used for APIv1 backwards
compatibility. Setting this field is not allowed, this field is
for internal use only.'
type: boolean
natOutgoing:
description: When nat-outgoing is true, packets sent from Calico networked
containers in this pool to destinations outside of this pool will
be masqueraded.
type: boolean
nodeSelector:
description: Allows IPPool to allocate for a specific node by label
selector.
type: string
vxlanMode:
description: Contains configuration for VXLAN tunneling for this pool.
If not specified, then this is defaulted to "Never" (i.e. VXLAN
tunneling is disabled).
type: string
required:
- cidr
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipreservations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPReservation
listKind: IPReservationList
plural: ipreservations
singular: ipreservation
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPReservationSpec contains the specification for an IPReservation
resource.
properties:
reservedCIDRs:
description: ReservedCIDRs is a list of CIDRs and/or IP addresses
that Calico IPAM will exclude from new allocations.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: kubecontrollersconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: KubeControllersConfiguration
listKind: KubeControllersConfigurationList
plural: kubecontrollersconfigurations
singular: kubecontrollersconfiguration
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: KubeControllersConfigurationSpec contains the values of the
Kubernetes controllers configuration.
properties:
controllers:
description: Controllers enables and configures individual Kubernetes
controllers
properties:
namespace:
description: Namespace enables and configures the namespace controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
node:
description: Node enables and configures the node controller.
Enabled by default, set to nil to disable.
properties:
hostEndpoint:
description: HostEndpoint controls syncing nodes to host endpoints.
Disabled by default, set to nil to disable.
properties:
autoCreate:
description: 'AutoCreate enables automatic creation of
host endpoints for every node. [Default: Disabled]'
type: string
type: object
leakGracePeriod:
description: 'LeakGracePeriod is the period used by the controller
to determine if an IP address has been leaked. Set to 0
to disable IP garbage collection. [Default: 15m]'
type: string
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
syncLabels:
description: 'SyncLabels controls whether to copy Kubernetes
node labels to Calico nodes. [Default: Enabled]'
type: string
type: object
policy:
description: Policy enables and configures the policy controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
serviceAccount:
description: ServiceAccount enables and configures the service
account controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
workloadEndpoint:
description: WorkloadEndpoint enables and configures the workload
endpoint controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
type: object
etcdV3CompactionPeriod:
description: 'EtcdV3CompactionPeriod is the period between etcdv3
compaction requests. Set to 0 to disable. [Default: 10m]'
type: string
healthChecks:
description: 'HealthChecks enables or disables support for health
checks [Default: Enabled]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: Info]'
type: string
prometheusMetricsPort:
description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. Set to 0 to disable. [Default: 9094]'
type: integer
required:
- controllers
type: object
status:
description: KubeControllersConfigurationStatus represents the status
of the configuration. It's useful for admins to be able to see the actual
config that was applied, which can be modified by environment variables
on the kube-controllers process.
properties:
environmentVars:
additionalProperties:
type: string
description: EnvironmentVars contains the environment variables on
the kube-controllers that influenced the RunningConfig.
type: object
runningConfig:
description: RunningConfig contains the effective config that is running
in the kube-controllers pod, after merging the API resource with
any environment variables.
properties:
controllers:
description: Controllers enables and configures individual Kubernetes
controllers
properties:
namespace:
description: Namespace enables and configures the namespace
controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
node:
description: Node enables and configures the node controller.
Enabled by default, set to nil to disable.
properties:
hostEndpoint:
description: HostEndpoint controls syncing nodes to host
endpoints. Disabled by default, set to nil to disable.
properties:
autoCreate:
description: 'AutoCreate enables automatic creation
of host endpoints for every node. [Default: Disabled]'
type: string
type: object
leakGracePeriod:
description: 'LeakGracePeriod is the period used by the
controller to determine if an IP address has been leaked.
Set to 0 to disable IP garbage collection. [Default:
15m]'
type: string
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
syncLabels:
description: 'SyncLabels controls whether to copy Kubernetes
node labels to Calico nodes. [Default: Enabled]'
type: string
type: object
policy:
description: Policy enables and configures the policy controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
serviceAccount:
description: ServiceAccount enables and configures the service
account controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
workloadEndpoint:
description: WorkloadEndpoint enables and configures the workload
endpoint controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
type: object
etcdV3CompactionPeriod:
description: 'EtcdV3CompactionPeriod is the period between etcdv3
compaction requests. Set to 0 to disable. [Default: 10m]'
type: string
healthChecks:
description: 'HealthChecks enables or disables support for health
checks [Default: Enabled]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which
logs are sent to the stdout. [Default: Info]'
type: string
prometheusMetricsPort:
description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. Set to 0 to disable. [Default:
9094]'
type: integer
required:
- controllers
type: object
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkPolicy
listKind: NetworkPolicyList
plural: networkpolicies
singular: networkpolicy
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
egress:
description: The ordered set of egress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with \"Not\". All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: <path>: which matches
the path exactly or prefix: <path-prefix>: which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
type: object
type: array
ingress:
description: The ordered set of ingress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with \"Not\". All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: <path>: which matches
the path exactly or prefix: <path-prefix>: which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
type: object
type: array
order:
description: Order is an optional field that specifies the order in
which the policy is applied. Policies with higher "order" are applied
after those with lower order. If the order is omitted, it may be
considered to be "infinite" - i.e. the policy will be applied last. Policies
with identical order will be applied in alphanumerical order based
on the Policy "Name".
type: number
selector:
description: "The selector is an expression used to pick pick out
the endpoints that the policy should be applied to. \n Selector
expressions follow this syntax: \n \tlabel == \"string_literal\"
\ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
\ -> not equal; also matches if label is not present \tlabel in
{ \"a\", \"b\", \"c\", ... } -> true if the value of label X is
one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\",
... } -> true if the value of label X is not one of \"a\", \"b\",
\"c\" \thas(label_name) -> True if that label is present \t! expr
-> negation of expr \texpr && expr -> Short-circuit and \texpr
|| expr -> Short-circuit or \t( expr ) -> parens for grouping \tall()
or the empty selector -> matches all endpoints. \n Label names are
allowed to contain alphanumerics, -, _ and /. String literals are
more permissive but they do not support escape characters. \n Examples
(with made-up labels): \n \ttype == \"webserver\" && deployment
== \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
\"dev\" \t! has(label_name)"
type: string
serviceAccountSelector:
description: ServiceAccountSelector is an optional field for an expression
used to select a pod based on service accounts.
type: string
types:
description: "Types indicates whether this policy applies to ingress,
or to egress, or to both. When not explicitly specified (and so
the value on creation is empty or nil), Calico defaults Types according
to what Ingress and Egress are present in the policy. The default
is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including
the case where there are also no Ingress rules) \n - [ PolicyTypeEgress
], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress,
PolicyTypeEgress ], if there are both Ingress and Egress rules.
\n When the policy is read back again, Types will always be one
of these values, never empty or nil."
items:
description: PolicyType enumerates the possible values of the PolicySpec
Types field.
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkSet
listKind: NetworkSetList
plural: networksets
singular: networkset
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NetworkSetSpec contains the specification for a NetworkSet
resource.
properties:
nets:
description: The list of IP networks that belong to this set.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are watched to check for existence as part of IPAM controller.
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- ipreservations
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- watch
# kube-controllers manages hostendpoints.
- apiGroups: ["crd.projectcalico.org"]
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
# KubeControllersConfiguration is where it gets its config
- apiGroups: ["crd.projectcalico.org"]
resources:
- kubecontrollersconfigurations
verbs:
# read its own config
- get
# create a default if none exists
- create
# update status
- update
# watch for changes
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipreservations
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
- caliconodestatuses
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico must update some CRDs.
- apiGroups: [ "crd.projectcalico.org" ]
resources:
- caliconodestatuses
verbs:
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: "${_prefix}cni:${CALICO_TAG}"
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: "${_prefix}cni:${CALICO_TAG}"
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: "${_prefix}pod2daemon-flexvol:${CALICO_TAG}"
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: "${_prefix}node:${CALICO_TAG}"
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Use fixed subnet CIDR to autodetect IP (supported since Calico v3.16.x)
- name: IP_AUTODETECTION_METHOD
value: "cidr=${CLUSTER_SUBNET_CIDR}"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "${CALICO_IPV4POOL_IPIP}"
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within '--cluster-cidr'.
- name: CALICO_IPV4POOL_CIDR
value: ${CALICO_IPV4POOL}
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
lifecycle:
preStop:
exec:
command:
- /bin/calico-node
- -shutdown
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: "${_prefix}kube-controllers:${CALICO_TAG}"
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
---
# Source: calico/templates/calico-etcd-secrets.yaml
---
# Source: calico/templates/calico-typha.yaml
---
# Source: calico/templates/configure-canal.yaml
EOF
}
set -x
until [ "ok" = "$(kubectl get --raw='/healthz')" ]
do
echo "Waiting for Kubernetes API..."
sleep 5
done
/usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system
fi
printf "Finished running ${step}\n"
|
ArchiFleKs/magnum
|
magnum/drivers/common/templates/kubernetes/fragments/calico-service.sh
|
Shell
|
apache-2.0
| 218,743 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
if [ `uname` == 'Linux' ];
then
base_dir=$(readlink -f $(dirname $0))
else
base_dir=$(realpath $(dirname $0))
fi
if [ "x$LOG4J_OPTS" = "x" ]; then
export LOG4J_OPTS="-Dlog4j.configuration=file://$base_dir/../config/samza-sql-console-log4j.xml"
fi
if [ "x$HEAP_OPTS" = "x" ]; then
export HEAP_OPTS="-Xmx1G -Xms1G"
fi
exec $base_dir/run-class.sh org.apache.samza.tools.SamzaSqlConsole "$@"
|
fredji97/samza
|
samza-tools/scripts/samza-sql-console.sh
|
Shell
|
apache-2.0
| 1,201 |
#!/bin/bash
# halt.command
# send halt to VM
~/bin/corectl halt k8solo-01
|
TheNewNormal/kube-solo-osx
|
src/halt.command
|
Shell
|
apache-2.0
| 77 |
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
pkg=pb
protoc --go_out=import_path=${pkg}:. hello.proto
|
merlimat/pulsar
|
pulsar-client-go/pulsar/pb/build.sh
|
Shell
|
apache-2.0
| 867 |
#
# Crochet defines a handful of standard shell functions
# to support end-user customization. These are never
# defined or overridden by board or option definitions.
#
#customize_boot_partition ( ) { }
#customize_freebsd_partition ( ) { }
#customize_post_unmount ( ) { }
# If any of the above are actually defined, add them to the
# strategy. We deliberately add them with a late priority.
#
install_customize_hooks ( ) {
# If customize_boot_partition was defined, add it.
if command -v customize_boot_partition >/dev/null 2>&1; then
PRIORITY=200 strategy_add $PHASE_BOOT_INSTALL customize_boot_partition
fi
if command -v customize_freebsd_partition >/dev/null 2>&1; then
PRIORITY=200 strategy_add $PHASE_FREEBSD_USER_CUSTOMIZATION customize_freebsd_partition
fi
if command -v customize_post_unmount >/dev/null 2>&1; then
PRIORITY=200 strategy_add $PHASE_POST_UNMOUNT customize_post_unmount
fi
}
strategy_add $PHASE_POST_CONFIG install_customize_hooks
# Copy overlay files early in the user customization phase.
# Typically, people want to copy static files and then
# tweak them afterwards.
customize_overlay_files ( ) {
if [ -d ${WORKDIR}/overlay ]; then
echo "Overlaying files from ${WORKDIR}/overlay"
(cd ${WORKDIR}/overlay; find . | cpio -pmud ${BOARD_FREEBSD_MOUNTPOINT})
fi
}
PRIORITY=50 strategy_add $PHASE_FREEBSD_USER_CUSTOMIZATION customize_overlay_files
|
rm4rty/crochet-freebsd
|
lib/customize.sh
|
Shell
|
bsd-2-clause
| 1,448 |
#!/bin/bash
echo backend : Agg > matplotlibrc
|
leouieda/tesseroids-original
|
.travis.sh
|
Shell
|
bsd-3-clause
| 46 |
BASEDIR=`dirname $0`
echo $BASEDIR
java -cp $BASEDIR/cobertura-2.1.1.jar:$BASEDIR/lib/asm-5.0.1.jar:$BASEDIR/lib/asm-analysis-5.0.1.jar:$BASEDIR/lib/asm-tree-5.0.1.jar:$BASEDIR/lib/asm-commons-5.0.1.jar:$BASEDIR/lib/asm-util-5.0.1.jar:$BASEDIR/lib/slf4j-api-1.7.5.jar:$BASEDIR/lib/logback-core-1.0.13.jar:$BASEDIR/lib/logback-classic-1.0.13.jar:$BASEDIR/lib/oro-2.0.8.jar net.sourceforge.cobertura.instrument.InstrumentMain $*
|
jminusminus/jmm
|
vendor/cobertura/cobertura-instrument.sh
|
Shell
|
bsd-3-clause
| 430 |
#!/bin/sh
export TMPDIR='/tmp';
cd "$1";
shift;
eval "$@"
|
jason-hwang/secc
|
tool/chdir.sh
|
Shell
|
mit
| 58 |
#!/bin/sh
# Use this script to test if a given TCP host/port are available
cmdname=$(basename $0)
echoerr() { if [[ $QUIET -ne 1 ]]; then echo "$@" 1>&2; fi }
usage()
{
cat << USAGE >&2
Usage:
$cmdname host:port [-s] [-t timeout] [-- command args]
-h HOST | --host=HOST Host or IP under test
-p PORT | --port=PORT TCP port under test
Alternatively, you specify the host and port as host:port
# -s | --strict Only execute subcommand if the test succeeds
-q | --quiet Dont output any status messages
-t TIMEOUT | --timeout=TIMEOUT
Timeout in seconds, zero for no timeout
-- COMMAND ARGS Execute command with args after the test finishes
USAGE
exit 1
}
# process arguments
while [[ $# -gt 0 ]]
do
case "$1" in
-h)
HOST="$2"
if [[ $HOST == "" ]]; then break; fi
shift 2
;;
--host=*)
HOST="${1#*=}"
shift 1
;;
-p)
PORT="$2"
if [[ $PORT == "" ]]; then break; fi
shift 2
;;
--port=*)
PORT="${1#*=}"
shift 1
;;
-t)
TIMEOUT="$2"
if [[ $TIMEOUT == "" ]]; then break; fi
shift 2
;;
--timeout=*)
TIMEOUT="${1#*=}"
shift 1
;;
-q | --quiet)
QUIET=1
shift 1
;;
--)
shift
CLI="$@"
break
;;
--help)
usage
;;
*)
echoerr "Unknown argument: $1"
usage
;;
esac
done
TIMEOUT=${TIMEOUT:-15}
QUIET=${QUIET:-0}
SHOULD_EXECUTE=0
if [[ "$HOST" == "" || "$PORT" == "" || $CLI == "" ]]; then
echoerr "Error: you need to provide host and port to test condition and a command to run after the test is successfull."
usage
exit 1
else
echo "Commmand : $CLI"
start_ts=$(date +%s)
while :
do
# (echo > /dev/tcp/$HOST/$PORT) >/dev/null 2>&1
nc "$HOST" "$PORT" < /dev/null > /dev/null 2>&1
result=$?
if [[ $result -eq 0 ]]; then
end_ts=$(date +%s)
echoerr "$HOST:$PORT is available after $((end_ts - start_ts)) seconds"
SHOULD_EXECUTE=1
break
fi
end_ts=$(date +%s)
if [[ $((TIMEOUT - $((end_ts - start_ts)))) -lt 0 ]]; then
echoerr "timeout reached, exiting"
break
else
echoerr "$HOST:$PORT is not available - sleeping"
sleep 1
fi
done
echo "Should execute : $SHOULD_EXECUTE"
if [[ $SHOULD_EXECUTE -eq 1 ]]; then
echoerr "$HOST:$PORT is up - executing command"
exec $CLI
fi
fi
|
anirbanroydas/ci-testing-python
|
scripts/wait-for-it.sh
|
Shell
|
mit
| 2,793 |
#!/bin/bash
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Creating Datastore/App Engine instance"
gcloud app create --region "us-central"
echo "Creating bucket: gs://$DEVSHELL_PROJECT_ID-media"
gsutil mb gs://$DEVSHELL_PROJECT_ID-media
echo "Exporting GCLOUD_PROJECT and GCLOUD_BUCKET"
export GCLOUD_PROJECT=$DEVSHELL_PROJECT_ID
export GCLOUD_BUCKET=$DEVSHELL_PROJECT_ID-media
echo "Creating virtual environment"
mkdir ~/venvs
virtualenv -p python3 ~/venvs/developingapps
source ~/venvs/developingapps/bin/activate
echo "Installing Python libraries"
pip install --upgrade pip
pip install -r requirements.txt
echo "Creating Datastore entities"
python add_entities.py
echo "Creating quiz-account Service Account"
gcloud iam service-accounts create quiz-account --display-name "Quiz Account"
gcloud iam service-accounts keys create key.json --iam-account=quiz-account@$DEVSHELL_PROJECT_ID.iam.gserviceaccount.com
export GOOGLE_APPLICATION_CREDENTIALS=key.json
echo "Setting quiz-account IAM Role"
gcloud projects add-iam-policy-binding $DEVSHELL_PROJECT_ID --member serviceAccount:quiz-account@$DEVSHELL_PROJECT_ID.iam.gserviceaccount.com --role roles/owner
echo "Creating Cloud Pub/Sub topics"
gcloud pubsub topics create feedback
gcloud pubsub topics create answers
gcloud pubsub subscriptions create worker-subscription --topic feedback
gcloud pubsub subscriptions create answer-subscription --topic answers
echo "Creating Cloud Spanner Instance, Database, and Tables"
gcloud spanner instances create quiz-instance --config=regional-us-central1 --description="Quiz instance" --nodes=1
gcloud spanner databases create quiz-database --instance quiz-instance --ddl "CREATE TABLE Feedback ( feedbackId STRING(100) NOT NULL, email STRING(100), quiz STRING(20), feedback STRING(MAX), rating INT64, score FLOAT64, timestamp INT64 ) PRIMARY KEY (feedbackId); CREATE TABLE Answers (answerId STRING(100) NOT NULL, id INT64, email STRING(60), quiz STRING(20), answer INT64, correct INT64, timestamp INT64) PRIMARY KEY (answerId DESC);"
echo "Creating Container Engine cluster"
gcloud container clusters create quiz-cluster --zone us-central1-a --scopes cloud-platform
gcloud container clusters get-credentials quiz-cluster --zone us-central1-a
echo "Building Containers"
gcloud container builds submit -t gcr.io/$DEVSHELL_PROJECT_ID/quiz-frontend ./frontend/
gcloud container builds submit -t gcr.io/$DEVSHELL_PROJECT_ID/quiz-backend ./backend/
gcloud container builds submit -t gcr.io/$DEVSHELL_PROJECT_ID/quiz-answer-backend ./answer_backend/
echo "Deploying to Container Engine"
sed -i -e "s/\[GCLOUD_PROJECT\]/$DEVSHELL_PROJECT_ID/g" ./frontend-deployment.yaml
sed -i -e "s/\[GCLOUD_PROJECT\]/$DEVSHELL_PROJECT_ID/g" ./backend-deployment.yaml
sed -i -e "s/\[GCLOUD_PROJECT\]/$DEVSHELL_PROJECT_ID/g" ./answer-backend-deployment.yaml
kubectl create -f ./frontend-deployment.yaml
kubectl create -f ./backend-deployment.yaml
kubectl create -f ./answer-backend-deployment.yaml
kubectl create -f ./frontend-service.yaml
echo "Project ID: $DEVSHELL_PROJECT_ID"
|
GoogleCloudPlatform/training-data-analyst
|
courses/developingapps/v1.3/python/kubernetesengine/bonus/prepare_environment.sh
|
Shell
|
apache-2.0
| 3,596 |
#!/bin/sh
#
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Basic check for whether partitioned tables work.
set -eu
for BACKEND in tidb importer local; do
if [ "$BACKEND" = 'local' ]; then
check_cluster_version 4 0 0 'local backend' || continue
fi
run_sql 'DROP DATABASE IF EXISTS partitioned;'
run_lightning --backend $BACKEND
run_sql 'SELECT count(1), sum(a) FROM partitioned.a;'
check_contains 'count(1): 8'
check_contains 'sum(a): 277151781'
run_sql "SHOW TABLE STATUS FROM partitioned WHERE name = 'a';"
check_contains 'Create_options: partitioned'
done
|
c4pt0r/tidb
|
br/tests/lightning_partitioned-table/run.sh
|
Shell
|
apache-2.0
| 1,140 |
if [ "x$PYTHON" = "x" ]; then
echo "Please define PYTHON as an environment variable pointing to the python-2.7 binary"
exit 1
fi
|
JuliBakagianni/META-SHARE
|
misc/tools/multitest/_python.sh
|
Shell
|
bsd-3-clause
| 136 |
#!/bin/bash
FN="mgu74b.db_3.2.3.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/annotation/src/contrib/mgu74b.db_3.2.3.tar.gz"
"https://bioarchive.galaxyproject.org/mgu74b.db_3.2.3.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mgu74b.db/bioconductor-mgu74b.db_3.2.3_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mgu74b.db/bioconductor-mgu74b.db_3.2.3_src_all.tar.gz"
)
MD5="9d019b194a64ec8c915514019896708e"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
bebatut/bioconda-recipes
|
recipes/bioconductor-mgu74b.db/post-link.sh
|
Shell
|
mit
| 1,405 |
#!/bin/bash
FN="rat2302cdf_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/rat2302cdf_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/rat2302cdf_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-rat2302cdf/bioconductor-rat2302cdf_2.18.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-rat2302cdf/bioconductor-rat2302cdf_2.18.0_src_all.tar.gz"
)
MD5="a033730f79a11d45b4cd7e7c520f8052"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-rat2302cdf/post-link.sh
|
Shell
|
mit
| 1,417 |
#!/usr/bin/env bash
###########################################################################
# Packaging script which creates debian and RPM packages. It optionally
# tags the repo with the given version.
#
# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS
# CLI tools must also be installed.
#
# https://github.com/jordansissel/fpm
# http://aws.amazon.com/cli/
#
# Packaging process: to package a build, simply execute:
#
# package.sh
#
# The script will automatically determined the version number from git using
# `git describe --always --tags`
#
# AWS upload: the script will also offer to upload the packages to S3. If
# this option is selected, the credentials should be present in the file
# ~/aws.conf. The contents should be of the form:
#
# [default]
# aws_access_key_id=<access ID>
# aws_secret_access_key=<secret key>
# region = us-east-1
#
# Trim the leading spaces when creating the file. The script will exit if
# S3 upload is requested, but this file does not exist.
AWS_FILE=~/aws.conf
INSTALL_ROOT_DIR=/opt/telegraf
TELEGRAF_LOG_DIR=/var/log/telegraf
CONFIG_ROOT_DIR=/etc/opt/telegraf
LOGROTATE_DIR=/etc/logrotate.d
SAMPLE_CONFIGURATION=etc/config.sample.toml
LOGROTATE_CONFIGURATION=etc/logrotate.d/telegraf
INITD_SCRIPT=scripts/init.sh
TMP_WORK_DIR=`mktemp -d`
POST_INSTALL_PATH=`mktemp`
ARCH=`uname -i`
LICENSE=MIT
URL=influxdb.com
[email protected]
VENDOR=InfluxDB
DESCRIPTION="InfluxDB Telegraf agent"
PKG_DEPS=(coreutils)
GO_VERSION="go1.4.2"
GOPATH_INSTALL=
BINS=(
telegraf
)
###########################################################################
# Helper functions.
# usage prints simple usage information.
usage() {
echo -e "$0\n"
cleanup_exit $1
}
# make_dir_tree creates the directory structure within the packages.
make_dir_tree() {
work_dir=$1
version=$2
mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts
if [ $? -ne 0 ]; then
echo "Failed to create installation directory -- aborting."
cleanup_exit 1
fi
mkdir -p $work_dir/$CONFIG_ROOT_DIR
if [ $? -ne 0 ]; then
echo "Failed to create configuration directory -- aborting."
cleanup_exit 1
fi
mkdir -p $work_dir/$LOGROTATE_DIR
if [ $? -ne 0 ]; then
echo "Failed to create configuration directory -- aborting."
cleanup_exit 1
fi
}
# cleanup_exit removes all resources created during the process and exits with
# the supplied returned code.
cleanup_exit() {
rm -r $TMP_WORK_DIR
rm $POST_INSTALL_PATH
exit $1
}
# check_gopath sanity checks the value of the GOPATH env variable, and determines
# the path where build artifacts are installed. GOPATH may be a colon-delimited
# list of directories.
check_gopath() {
[ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1
GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1`
[ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1
echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation."
}
check_gvm() {
source $HOME/.gvm/scripts/gvm
which gvm
if [ $? -ne 0 ]; then
echo "gvm not found -- aborting."
cleanup_exit $1
fi
gvm use $GO_VERSION
if [ $? -ne 0 ]; then
echo "gvm cannot find Go version $GO_VERSION -- aborting."
cleanup_exit $1
fi
}
# check_clean_tree ensures that no source file is locally modified.
check_clean_tree() {
modified=$(git ls-files --modified | wc -l)
if [ $modified -ne 0 ]; then
echo "The source tree is not clean -- aborting."
cleanup_exit 1
fi
echo "Git tree is clean."
}
# do_build builds the code. The version and commit must be passed in.
do_build() {
version=$1
commit=`git rev-parse HEAD`
if [ $? -ne 0 ]; then
echo "Unable to retrieve current commit -- aborting"
cleanup_exit 1
fi
for b in ${BINS[*]}; do
rm -f $GOPATH_INSTALL/bin/$b
done
godep go install -a -ldflags="-X main.Version $version" ./...
if [ $? -ne 0 ]; then
echo "Build failed, unable to create package -- aborting"
cleanup_exit 1
fi
echo "Build completed successfully."
}
# generate_postinstall_script creates the post-install script for the
# package. It must be passed the version.
generate_postinstall_script() {
version=$1
cat <<EOF >$POST_INSTALL_PATH
rm -f $INSTALL_ROOT_DIR/telegraf
rm -f $INSTALL_ROOT_DIR/init.sh
ln -s $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf
ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh
rm -f /etc/init.d/telegraf
ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/telegraf
chmod +x /etc/init.d/telegraf
if which update-rc.d > /dev/null 2>&1 ; then
update-rc.d -f telegraf remove
update-rc.d telegraf defaults
else
chkconfig --add telegraf
fi
if ! id telegraf >/dev/null 2>&1; then
useradd --system -U -M telegraf
fi
chown -R -L telegraf:telegraf $INSTALL_ROOT_DIR
chmod -R a+rX $INSTALL_ROOT_DIR
mkdir -p $TELEGRAF_LOG_DIR
chown -R -L telegraf:telegraf $TELEGRAF_LOG_DIR
EOF
echo "Post-install script created successfully at $POST_INSTALL_PATH"
}
###########################################################################
# Start the packaging process.
if [ "$1" == "-h" ]; then
usage 0
fi
VERSION=`git describe --always --tags | tr -d v`
echo -e "\nStarting package process, version: $VERSION\n"
if [ "$CIRCLE_BRANCH" == "" ]; then
check_gvm
fi
check_gopath
do_build $VERSION
make_dir_tree $TMP_WORK_DIR $VERSION
###########################################################################
# Copy the assets to the installation directories.
for b in ${BINS[*]}; do
cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION
if [ $? -ne 0 ]; then
echo "Failed to copy binaries to packaging directory -- aborting."
cleanup_exit 1
fi
done
echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION"
cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts
if [ $? -ne 0 ]; then
echo "Failed to copy init.d script to packaging directory -- aborting."
cleanup_exit 1
fi
echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts"
cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/telegraf.conf
if [ $? -ne 0 ]; then
echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting."
cleanup_exit 1
fi
cp $LOGROTATE_CONFIGURATION $TMP_WORK_DIR/$LOGROTATE_DIR/telegraf
if [ $? -ne 0 ]; then
echo "Failed to copy $LOGROTATE_CONFIGURATION to packaging directory -- aborting."
cleanup_exit 1
fi
generate_postinstall_script $VERSION
###########################################################################
# Create the actual packages.
if [ "$CIRCLE_BRANCH" == "" ]; then
echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] "
read response
response=`echo $response | tr 'A-Z' 'a-z'`
if [ "x$response" == "xn" ]; then
echo "Packaging aborted."
cleanup_exit 1
fi
fi
if [ $ARCH == "i386" ]; then
rpm_package=telegraf-$VERSION-1.i686.rpm
debian_package=telegraf_${VERSION}_i686.deb
deb_args="-a i686"
rpm_args="setarch i686"
elif [ $ARCH == "arm" ]; then
rpm_package=telegraf-$VERSION-1.armel.rpm
debian_package=telegraf_${VERSION}_armel.deb
else
rpm_package=telegraf-$VERSION-1.x86_64.rpm
debian_package=telegraf_${VERSION}_amd64.deb
fi
COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name telegraf --version $VERSION --config-files $CONFIG_ROOT_DIR ."
$rpm_args fpm -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS
if [ $? -ne 0 ]; then
echo "Failed to create RPM package -- aborting."
cleanup_exit 1
fi
echo "RPM package created successfully."
fpm -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS
if [ $? -ne 0 ]; then
echo "Failed to create Debian package -- aborting."
cleanup_exit 1
fi
echo "Debian package created successfully."
###########################################################################
# Offer to publish the packages.
if [ "$CIRCLE_BRANCH" == "" ]; then
echo -n "Publish packages to S3? [y/N] "
read response
response=`echo $response | tr 'A-Z' 'a-z'`
if [ "x$response" == "xy" ]; then
echo "Publishing packages to S3."
if [ ! -e "$AWS_FILE" ]; then
echo "$AWS_FILE does not exist -- aborting."
cleanup_exit 1
fi
for filepath in `ls *.{deb,rpm}`; do
echo "Uploading $filepath to S3"
filename=`basename $filepath`
echo "Uploading $filename to s3://get.influxdb.org/telegraf/$filename"
AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://get.influxdb.org/telegraf/$filename --acl public-read --region us-east-1
if [ $? -ne 0 ]; then
echo "Upload failed -- aborting".
cleanup_exit 1
fi
done
else
echo "Not publishing packages to S3."
fi
fi
###########################################################################
# All done.
echo -e "\nPackaging process complete."
cleanup_exit 0
|
wodin/telegraf
|
package.sh
|
Shell
|
mit
| 9,421 |
#! /bin/sh
# Copyright (C) 2010-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test error reporting for AC_CONFIG_LIBOBJ_DIR.
# See also sister tests 'libobj20a.sh' and 'libobj20c.sh'.
. test-init.sh
cat >> configure.ac << 'END'
AC_CONFIG_LIBOBJ_DIR([libobj-dir])
AC_PROG_CC
AM_PROG_AR
AC_PROG_RANLIB
AC_LIBSOURCE([foobar.c])
# NOTE: this call to AC_OUTPUT is really needed; see Automake bug #7635
# <http://debbugs.gnu.org/cgi/bugreport.cgi?bug=7635>
AC_OUTPUT
END
cat > Makefile.am << 'END'
AUTOMAKE_OPTIONS = subdir-objects
noinst_LIBRARIES = libtu.a
libtu_a_SOURCES =
libtu_a_LIBADD = $(LIBOBJS)
END
: > ar-lib
$ACLOCAL
AUTOMAKE_fails
grep 'configure\.ac:.*required directory.*libobj-dir' stderr
mkdir libobj-dir
: > foobar.c # Oops, it should be in libobj-dir!
AUTOMAKE_fails
grep 'configure\.ac:.*required file.*libobj-dir/foobar.c.*' stderr
rm -f foobar.c
: > libobj-dir/foobar.c
$AUTOMAKE # Now we should succeed.
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/libobj20b.sh
|
Shell
|
gpl-2.0
| 1,553 |
#!/bin/bash -eu
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
# ----------------------------------------------------------------
# Generate pkcs11 token for fabric tests
# ----------------------------------------------------------------
softhsm2-util --init-token --slot 0 --label "ForFabric" --so-pin 1234 --pin 98765432
cat <<EOF >>/home/vagrant/.bashrc
export PKCS11_LIB="$(find /usr/lib -name libsofthsm2.so | head -1)"
export PKCS11_PIN=98765432
export PKCS11_LABEL="ForFabric"
EOF
cat <<EOF >>/home/vagrant/.bashrc
export GOPATH=\$HOME/go
export PATH=\$PATH:\$HOME/go/bin
cd \$HOME/fabric
EOF
|
stemlending/fabric
|
vagrant/user.sh
|
Shell
|
apache-2.0
| 643 |
#!/bin/sh
# This script is loosly based on a script with same purpose provided
# by RIOT-OS (https://github.com/RIOT-OS/RIOT)
OPENOCD_CMD="${OPENOCD:-openocd} -s ${OPENOCD_DEFAULT_PATH}"
OPENOCD_CONFIG=${ZEPHYR_BASE}/boards/${BOARD_NAME}/support/openocd.cfg
BIN_NAME=${O}/${KERNEL_BIN_NAME}
ELF_NAME=${O}/${KERNEL_ELF_NAME}
test_config() {
if [ ! -f "${OPENOCD_CONFIG}" ]; then
echo "Error: Unable to locate OpenOCD configuration file: ${OPENOCD_CONFIG}"
exit 1
fi
if [ ! -f "${OPENOCD}" ]; then
echo "Error: Unable to locate OpenOCD executable: ${OPENOCD}"
exit 1
fi
}
test_bin() {
if [ ! -f "${BIN_NAME}" ]; then
echo "Error: Unable to locate image binary: ${BIN_NAME}"
exit 1
fi
}
do_flash() {
test_config
test_bin
# flash device with specified image
sh -c "${OPENOCD_CMD} -f '${OPENOCD_CONFIG}' \
-c 'init' \
-c 'targets' \
${OPENOCD_PRE_CMD} \
-c 'reset halt' \
-c ${OPENOCD_LOAD_CMD} \
-c 'reset halt' \
-c ${OPENOCD_VERIFY_CMD} \
${OPENOCD_POST_CMD} \
-c 'reset run' \
-c 'shutdown'"
echo 'Done flashing'
}
do_debug() {
test_config
test_bin
# setsid is needed so that Ctrl+C in GDB doesn't kill OpenOCD
[ -z "${SETSID}" ] && SETSID="$(which setsid)"
# temporary file that saves OpenOCD pid
OCD_PIDFILE=$(mktemp -t "openocd_pid.XXXXXXXXXX")
# cleanup after script terminates
trap "cleanup ${OCD_PIDFILE}" EXIT
# don't trap on Ctrl+C, because GDB keeps running
trap '' INT
# start OpenOCD as GDB server
${SETSID} sh -c "${OPENOCD_CMD} -f '${OPENOCD_CONFIG}' \
${OPENOCD_EXTRA_INIT} \
-c 'tcl_port ${TCL_PORT:-6333}' \
-c 'telnet_port ${TELNET_PORT:-4444}' \
-c 'gdb_port ${GDB_PORT:-3333}' \
-c 'init' \
-c 'targets' \
-c 'halt' \
& \
echo \$! > $OCD_PIDFILE" &
# connect to the GDB server
${GDB} ${TUI} -ex "target remote :${GDB_PORT:-3333}" ${ELF_NAME}
# will be called by trap
cleanup() {
OCD_PID="$(cat $OCD_PIDFILE)"
kill ${OCD_PID} &>/dev/null
rm -f "$OCD_PIDFILE"
exit 0
}
}
do_debugserver() {
test_config
sh -c "${OPENOCD_CMD} -f '${OPENOCD_CONFIG}' \
-c 'init' \
-c 'targets' \
-c 'reset halt'"
}
CMD="$1"
shift
case "${CMD}" in
flash)
echo "Flashing Target Device"
do_flash "$@"
;;
debugserver)
do_debugserver "$@"
;;
debug)
do_debug "$@"
;;
esac
|
coldnew/zephyr-project-fork
|
scripts/support/openocd.sh
|
Shell
|
apache-2.0
| 2,673 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#
#
# - updated by Shannon Mitchell([email protected])
# on 14-jan-2012 to allow for "less permissive" permissions
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-788
#Group Title: Default/Skeleton Dot Files Permissions
#Rule ID: SV-788r7_rule
#Severity: CAT II
#Rule Version (STIG-ID): GEN001800
#Rule Title: All skeleton files (typically those in /etc/skel) must
#have mode 0644 or less permissive.
#
#Vulnerability Discussion: If the skeleton files are not protected,
#unauthorized personnel could change user startup parameters and
#possibly jeopardize user files.
#
#Responsibility: System Administrator
#IAControls: ECLP-1
#
#Check Content:
#Check skeleton files permissions.
# ls -alL /etc/skel
#If a skeleton file has a mode more permissive than 0644, this is a finding.
#
#Fix Text: Change the mode of skeleton files with incorrect mode:
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN001800
BADSKELFILE=$( find /etc/skel -perm /7133 -type f )
#Start-Lockdown
for file in $BADSKELFILE
do
chmod u-xs,g-wxs,o-wxt $file
done
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN001800.sh
|
Shell
|
apache-2.0
| 2,694 |
#!/bin/bash
fw_depends mysql java maven
./mvnw clean package
nohup java -jar target/tech-empower-framework-benchmark-1.0-SNAPSHOT-netty-bundle.jar &
|
saturday06/FrameworkBenchmarks
|
frameworks/Kotlin/ktor/setup-netty.sh
|
Shell
|
bsd-3-clause
| 152 |
#!/usr/bin/env bash
if [[ ! -e $TMPDIR ]]; then
TMPDIR=/tmp
fi
PIDFILE=$TMPDIR/buttercoin.api-server.pid
if [[ "$1" == "stop" ]] || [[ -e $PIDFILE ]]; then
kill -s KILL `cat $PIDFILE`
rm $PIDFILE
fi
if [[ "$1" == "start" ]] || [[ -z "$1" ]]; then
if [[ -z "$BUTTERCOIN_CONFIG_FILE" ]]; then
if [[ -z "$2" ]]; then
BUTTERCOIN_CONFIG_FILE="`pwd`/config/api.json"
else
BUTTERCOIN_CONFIG_FILE="$2"
fi
fi
coffee bin/api --config "$BUTTERCOIN_CONFIG_FILE" &
pid=$!
echo $pid > $PIDFILE
fi
|
Sphere2013/Globe-Exchange-Buttercoin
|
api.sh
|
Shell
|
mit
| 549 |
#! /bin/sh
# Copyright (C) 2011-2015 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Check that the global testsuite log file referenced in the testsuite
# summary and in the global testsuite log itself is correct.
. test-init.sh
mv configure.ac configure.stub
cat > fail << 'END'
#!/bin/sh
exit 1
END
chmod a+x fail
cat configure.stub - > configure.ac <<'END'
AC_OUTPUT
END
cat > Makefile.am << 'END'
TEST_SUITE_LOG = my_test_suite.log
TESTS = fail
END
$ACLOCAL
$AUTOCONF
$AUTOMAKE -a
mkdir build
cd build
../configure
run_make -O -e FAIL check
grep '^See \./my_test_suite\.log$' stdout
mkdir bar
run_make -O -e FAIL TEST_SUITE_LOG=bar/bar.log check
grep '^See \./bar/bar\.log$' stdout
cd ..
echo SUBDIRS = sub > Makefile.am
mkdir sub
echo TESTS = fail > sub/Makefile.am
mv fail sub
cat configure.stub - > configure.ac <<'END'
AC_CONFIG_FILES([sub/Makefile])
AC_OUTPUT
END
$ACLOCAL --force
$AUTOCONF --force
$AUTOMAKE
./configure
run_make -O -e FAIL check
grep '^See sub/test-suite\.log$' stdout
cd sub
run_make -O -e FAIL check
grep '^See sub/test-suite\.log$' stdout
cd ..
run_make -O -e FAIL TEST_SUITE_LOG=foo.log check
grep '^See sub/foo\.log$' stdout
:
|
darrengarvey/automake
|
t/testsuite-summary-reference-log.sh
|
Shell
|
gpl-2.0
| 1,784 |
#!/bin/sh
test_description='combined diff'
. ./test-lib.sh
. "$TEST_DIRECTORY"/diff-lib.sh
setup_helper () {
one=$1 branch=$2 side=$3 &&
git branch $side $branch &&
for l in $one two three fyra
do
echo $l
done >file &&
git add file &&
test_tick &&
git commit -m $branch &&
git checkout $side &&
for l in $one two three quatro
do
echo $l
done >file &&
git add file &&
test_tick &&
git commit -m $side &&
test_must_fail git merge $branch &&
for l in $one three four
do
echo $l
done >file &&
git add file &&
test_tick &&
git commit -m "merge $branch into $side"
}
verify_helper () {
it=$1 &&
# Ignore lines that were removed only from the other parent
sed -e '
1,/^@@@/d
/^ -/d
s/^\(.\)./\1/
' "$it" >"$it.actual.1" &&
sed -e '
1,/^@@@/d
/^- /d
s/^.\(.\)/\1/
' "$it" >"$it.actual.2" &&
git diff "$it^" "$it" -- | sed -e '1,/^@@/d' >"$it.expect.1" &&
test_cmp "$it.expect.1" "$it.actual.1" &&
git diff "$it^2" "$it" -- | sed -e '1,/^@@/d' >"$it.expect.2" &&
test_cmp "$it.expect.2" "$it.actual.2"
}
test_expect_success setup '
>file &&
git add file &&
test_tick &&
git commit -m initial &&
git branch withone &&
git branch sansone &&
git checkout withone &&
setup_helper one withone sidewithone &&
git checkout sansone &&
setup_helper "" sansone sidesansone
'
test_expect_success 'check combined output (1)' '
git show sidewithone -- >sidewithone &&
verify_helper sidewithone
'
test_expect_success 'check combined output (2)' '
git show sidesansone -- >sidesansone &&
verify_helper sidesansone
'
test_expect_success 'diagnose truncated file' '
>file &&
git add file &&
git commit --amend -C HEAD &&
git show >out &&
grep "diff --cc file" out
'
test_expect_success 'setup for --cc --raw' '
blob=$(echo file | git hash-object --stdin -w) &&
base_tree=$(echo "100644 blob $blob file" | git mktree) &&
trees= &&
for i in $(test_seq 1 40)
do
blob=$(echo file$i | git hash-object --stdin -w) &&
trees="$trees$(echo "100644 blob $blob file" | git mktree)$LF"
done
'
test_expect_success 'check --cc --raw with four trees' '
four_trees=$(echo "$trees" | sed -e 4q) &&
git diff --cc --raw $four_trees $base_tree >out &&
# Check for four leading colons in the output:
grep "^::::[^:]" out
'
test_expect_success 'check --cc --raw with forty trees' '
git diff --cc --raw $trees $base_tree >out &&
# Check for forty leading colons in the output:
grep "^::::::::::::::::::::::::::::::::::::::::[^:]" out
'
test_expect_success 'setup combined ignore spaces' '
git checkout master &&
>test &&
git add test &&
git commit -m initial &&
tr -d Q <<-\EOF >test &&
always coalesce
eol space coalesce Q
space change coalesce
all spa ces coalesce
eol spaces Q
space change
all spa ces
EOF
git commit -m "test space change" -a &&
git checkout -b side HEAD^ &&
tr -d Q <<-\EOF >test &&
always coalesce
eol space coalesce
space change coalesce
all spaces coalesce
eol spaces
space change
all spaces
EOF
git commit -m "test other space changes" -a &&
test_must_fail git merge master &&
tr -d Q <<-\EOF >test &&
eol spaces Q
space change
all spa ces
EOF
git commit -m merged -a
'
test_expect_success 'check combined output (no ignore space)' '
git show >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
--always coalesce
- eol space coalesce
- space change coalesce
- all spaces coalesce
- eol spaces
- space change
- all spaces
-eol space coalesce Q
-space change coalesce
-all spa ces coalesce
+ eol spaces Q
+ space change
+ all spa ces
EOF
compare_diff_patch expected actual
'
test_expect_success 'check combined output (ignore space at eol)' '
git show --ignore-space-at-eol >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
--always coalesce
--eol space coalesce
- space change coalesce
- all spaces coalesce
-space change coalesce
-all spa ces coalesce
eol spaces Q
- space change
- all spaces
+ space change
+ all spa ces
EOF
compare_diff_patch expected actual
'
test_expect_success 'check combined output (ignore space change)' '
git show -b >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
--always coalesce
--eol space coalesce
--space change coalesce
- all spaces coalesce
-all spa ces coalesce
eol spaces Q
space change
- all spaces
+ all spa ces
EOF
compare_diff_patch expected actual
'
test_expect_success 'check combined output (ignore all spaces)' '
git show -w >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
--always coalesce
--eol space coalesce
--space change coalesce
--all spaces coalesce
eol spaces Q
space change
all spa ces
EOF
compare_diff_patch expected actual
'
test_expect_success 'combine diff coalesce simple' '
>test &&
git add test &&
git commit -m initial &&
test_seq 4 >test &&
git commit -a -m empty1 &&
git branch side1 &&
git checkout HEAD^ &&
test_seq 5 >test &&
git commit -a -m empty2 &&
test_must_fail git merge side1 &&
>test &&
git commit -a -m merge &&
git show >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
--1
--2
--3
--4
- 5
EOF
compare_diff_patch expected actual
'
test_expect_success 'combine diff coalesce tricky' '
>test &&
git add test &&
git commit -m initial --allow-empty &&
cat <<-\EOF >test &&
3
1
2
3
4
EOF
git commit -a -m empty1 &&
git branch -f side1 &&
git checkout HEAD^ &&
cat <<-\EOF >test &&
1
3
5
4
EOF
git commit -a -m empty2 &&
git branch -f side2 &&
test_must_fail git merge side1 &&
>test &&
git commit -a -m merge &&
git show >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
-3
--1
-2
--3
- 5
--4
EOF
compare_diff_patch expected actual &&
git checkout -f side1 &&
test_must_fail git merge side2 &&
>test &&
git commit -a -m merge &&
git show >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
- 3
--1
- 2
--3
-5
--4
EOF
compare_diff_patch expected actual
'
test_expect_failure 'combine diff coalesce three parents' '
>test &&
git add test &&
git commit -m initial --allow-empty &&
cat <<-\EOF >test &&
3
1
2
3
4
EOF
git commit -a -m empty1 &&
git checkout -B side1 &&
git checkout HEAD^ &&
cat <<-\EOF >test &&
1
3
7
5
4
EOF
git commit -a -m empty2 &&
git branch -f side2 &&
git checkout HEAD^ &&
cat <<-\EOF >test &&
3
1
6
5
4
EOF
git commit -a -m empty3 &&
>test &&
git add test &&
TREE=$(git write-tree) &&
COMMIT=$(git commit-tree -p HEAD -p side1 -p side2 -m merge $TREE) &&
git show $COMMIT >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
-- 3
---1
- 6
- 2
--3
-7
- -5
---4
EOF
compare_diff_patch expected actual
'
# Test for a bug reported at
# https://lore.kernel.org/git/[email protected]/
# where a delete lines were missing from combined diff output when they
# occurred exactly before the context lines of a later change.
test_expect_success 'combine diff missing delete bug' '
git commit -m initial --allow-empty &&
cat <<-\EOF >test &&
1
2
3
4
EOF
git add test &&
git commit -a -m side1 &&
git checkout -B side1 &&
git checkout HEAD^ &&
cat <<-\EOF >test &&
0
1
2
3
4modified
EOF
git add test &&
git commit -m side2 &&
git branch -f side2 &&
test_must_fail git merge --no-commit side1 &&
cat <<-\EOF >test &&
1
2
3
4modified
EOF
git add test &&
git commit -a -m merge &&
git diff-tree -c -p HEAD >actual.tmp &&
sed -e "1,/^@@@/d" < actual.tmp >actual &&
tr -d Q <<-\EOF >expected &&
- 0
1
2
3
-4
+4modified
EOF
compare_diff_patch expected actual
'
test_expect_success 'combine diff gets tree sorting right' '
# create a directory and a file that sort differently in trees
# versus byte-wise (implied "/" sorts after ".")
git checkout -f master &&
mkdir foo &&
echo base >foo/one &&
echo base >foo/two &&
echo base >foo.ext &&
git add foo foo.ext &&
git commit -m base &&
# one side modifies a file in the directory, along with the root
# file...
echo master >foo/one &&
echo master >foo.ext &&
git commit -a -m master &&
# the other side modifies the other file in the directory
git checkout -b other HEAD^ &&
echo other >foo/two &&
git commit -a -m other &&
# And now we merge. The files in the subdirectory will resolve cleanly,
# meaning that a combined diff will not find them interesting. But it
# will find the tree itself interesting, because it had to be merged.
git checkout master &&
git merge other &&
printf "MM\tfoo\n" >expect &&
git diff-tree -c --name-status -t HEAD >actual.tmp &&
sed 1d <actual.tmp >actual &&
test_cmp expect actual
'
test_expect_success 'setup for --combined-all-paths' '
git branch side1c &&
git branch side2c &&
git checkout side1c &&
test_seq 1 10 >filename-side1c &&
side1cf=$(git hash-object filename-side1c) &&
git add filename-side1c &&
git commit -m with &&
git checkout side2c &&
test_seq 1 9 >filename-side2c &&
echo ten >>filename-side2c &&
side2cf=$(git hash-object filename-side2c) &&
git add filename-side2c &&
git commit -m iam &&
git checkout -b mergery side1c &&
git merge --no-commit side2c &&
git rm filename-side1c &&
echo eleven >>filename-side2c &&
git mv filename-side2c filename-merged &&
mergedf=$(git hash-object filename-merged) &&
git add filename-merged &&
git commit
'
test_expect_success '--combined-all-paths and --raw' '
cat <<-EOF >expect &&
::100644 100644 100644 $side1cf $side2cf $mergedf RR filename-side1c filename-side2c filename-merged
EOF
git diff-tree -c -M --raw --combined-all-paths HEAD >actual.tmp &&
sed 1d <actual.tmp >actual &&
test_cmp expect actual
'
test_expect_success '--combined-all-paths and --cc' '
cat <<-\EOF >expect &&
--- a/filename-side1c
--- a/filename-side2c
+++ b/filename-merged
EOF
git diff-tree --cc -M --combined-all-paths HEAD >actual.tmp &&
grep ^[-+][-+][-+] <actual.tmp >actual &&
test_cmp expect actual
'
test_expect_success FUNNYNAMES 'setup for --combined-all-paths with funny names' '
git branch side1d &&
git branch side2d &&
git checkout side1d &&
test_seq 1 10 >"$(printf "file\twith\ttabs")" &&
git add file* &&
side1df=$(git hash-object *tabs) &&
git commit -m with &&
git checkout side2d &&
test_seq 1 9 >"$(printf "i\tam\ttabbed")" &&
echo ten >>"$(printf "i\tam\ttabbed")" &&
git add *tabbed &&
side2df=$(git hash-object *tabbed) &&
git commit -m iam &&
git checkout -b funny-names-mergery side1d &&
git merge --no-commit side2d &&
git rm *tabs &&
echo eleven >>"$(printf "i\tam\ttabbed")" &&
git mv "$(printf "i\tam\ttabbed")" "$(printf "fickle\tnaming")" &&
git add fickle* &&
headf=$(git hash-object fickle*) &&
git commit &&
head=$(git rev-parse HEAD)
'
test_expect_success FUNNYNAMES '--combined-all-paths and --raw and funny names' '
cat <<-EOF >expect &&
::100644 100644 100644 $side1df $side2df $headf RR "file\twith\ttabs" "i\tam\ttabbed" "fickle\tnaming"
EOF
git diff-tree -c -M --raw --combined-all-paths HEAD >actual.tmp &&
sed 1d <actual.tmp >actual &&
test_cmp expect actual
'
test_expect_success FUNNYNAMES '--combined-all-paths and --raw -and -z and funny names' '
printf "$head\0::100644 100644 100644 $side1df $side2df $headf RR\0file\twith\ttabs\0i\tam\ttabbed\0fickle\tnaming\0" >expect &&
git diff-tree -c -M --raw --combined-all-paths -z HEAD >actual &&
test_cmp expect actual
'
test_expect_success FUNNYNAMES '--combined-all-paths and --cc and funny names' '
cat <<-\EOF >expect &&
--- "a/file\twith\ttabs"
--- "a/i\tam\ttabbed"
+++ "b/fickle\tnaming"
EOF
git diff-tree --cc -M --combined-all-paths HEAD >actual.tmp &&
grep ^[-+][-+][-+] <actual.tmp >actual &&
test_cmp expect actual
'
test_done
|
brunosantiagovazquez/git
|
t/t4038-diff-combined.sh
|
Shell
|
gpl-2.0
| 11,957 |
#!/bin/bash
# check performance test mode
mode="$1"
echo "testing Random Forest Classification workflow"
# server IP source
. ../../server.conf
# Die on any error:
set -e
DIR="${BASH_SOURCE%/*}"; if [ ! -d "$DIR" ]; then DIR="$PWD"; fi; . "$DIR/../../base.sh"
echo "#create a dataset"
path=$(pwd)
curl -X POST -b cookies https://$SEVER_IP:$SERVER_PORT/api/datasets -H "Authorization: Basic YWRtaW46YWRtaW4=" -H "Content-Type: multipart/form-data" -F datasetName='breastCancerWisconsin-random-forest-classification-dataset' -F version='2.0.0' -F description='Breast Cancer Wisconsin Dataset' -F sourceType='file' -F destination='file' -F dataFormat='CSV' -F containsHeader='true' -F file=@'/'$path'/breastCancerWisconsin.csv' -k
sleep 5
# creating a project
echo "#creating a project"
curl -X POST -d @'create-project' -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/projects -k
sleep 2
#getting the project
echo "#getting the project"
project=$(curl -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/projects/wso2-ml-random-forest-classification-tuned-sample-project -k)
sleep 2
#update the json file with retrieved values
projectId=$(echo "$project"|jq '.id')
datasetId=$(echo "$project"|jq '.datasetId')
${SED} -i 's/^\("projectId":"\)[^"]*/\1'$projectId/ create-analysis;
sleep 2
#creating an analysis
echo "creating an analysis"
curl -X POST -d @'create-analysis' -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/analyses -k
sleep 2
#getting analysis id
echo "getting analysis id"
analysis=$(curl -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/projects/${projectId}/analyses/wso2-ml-random-forest-classification-tuned-sample-analysis -k)
sleep 2
analysisId=$(echo "$analysis"|jq '.id')
#setting model configs
echo "#setting model configs"
curl -X POST -d @'create-model-config' -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/analyses/${analysisId}/configurations -k -v
sleep 2
echo "#adding default features with customized options"
curl -X POST -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/analyses/${analysisId}/features/defaults -k -v -d @'customized-features'
sleep 2
echo "#setting tuned hyper params"
curl -X POST -d @'hyper-parameters' -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/analyses/${analysisId}/hyperParams?algorithmName=RANDOM_FOREST_CLASSIFICATION -k -v
sleep 2
echo "#getting dataset version"
datasetVersions=$(curl -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/datasets/${datasetId}/versions -k)
sleep 2
#update the json file
datasetVersionId=$(echo "$datasetVersions"|jq '.[0] .id')
${SED} -i 's/^\("analysisId":"\)[^"]*/\1'$analysisId/ create-model;
sleep 2
${SED} -i 's/^\("versionSetId":"\)[^"]*/\1'$datasetVersionId/ create-model;
sleep 2
# build only one model for default case and warm-tests
# build three models for performance tests
modelCount=1
if [ "$mode" = "perf" ]; then
modelCount=3
fi
for i in `seq $modelCount`; do
echo "#create model"
model=$(curl -X POST -d @'create-model' -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/models -k)
sleep 2
echo "#getting model"
modelName=$(echo "$model"|jq -r '.name')
model=$(curl -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/models/${modelName} -k)
sleep 2
modelId=$(echo "$model"|jq '.id')
echo "#building the model"
curl -X POST -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/models/${modelId} -k -v
while [ 1 ]
do
model=$(curl -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/models/${modelName} -k)
sleep 2
model_status=$(echo "$model"|jq '.status')
if [[ $model_status == *"Complete"* ]]
then
echo "Model building has completed."
break
fi
sleep 10
done
echo "#predict using model"
curl -X POST -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -v https://$SEVER_IP:$SERVER_PORT/api/models/${modelId}/predict -k -v -d @'prediction-test'
done
# delete project and dataset when running warm-up tests
if [ "$mode" = "wmp" ]; then
curl -s -X DELETE -H "Authorization: Basic YWRtaW46YWRtaW4=" https://$SEVER_IP:$SERVER_PORT/api/projects/${projectId} -k
curl -s -X DELETE -H "Authorization: Basic YWRtaW46YWRtaW4=" https://$SEVER_IP:$SERVER_PORT/api/datasets/${datasetId} -k
fi
|
Amutheezan/product-das
|
modules/samples/ml/tuned/random-forest-classification/model-generation.sh
|
Shell
|
apache-2.0
| 5,058 |
#!/bin/bash
FN="ye6100subbcdf_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/annotation/src/contrib/ye6100subbcdf_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/ye6100subbcdf_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-ye6100subbcdf/bioconductor-ye6100subbcdf_2.18.0_src_all.tar.gz"
)
MD5="ee9ec4bd941940745bad538d79bfeab4"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
cokelaer/bioconda-recipes
|
recipes/bioconductor-ye6100subbcdf/post-link.sh
|
Shell
|
mit
| 1,317 |
_azk()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 1 ]; then
COMPREPLY=( $( compgen -W '-h --help -q --quiet -h --help --no-color -l= --log= -v --verbose --version -q --quiet -h --help --no-color -l= --log= -v --verbose info status scale logs deploy open doctor stop vm agent start init shell version docker config restart help' -- $cur) )
else
case ${COMP_WORDS[1]} in
info)
_azk_info
;;
status)
_azk_status
;;
scale)
_azk_scale
;;
logs)
_azk_logs
;;
deploy)
_azk_deploy
;;
open)
_azk_open
;;
doctor)
_azk_doctor
;;
stop)
_azk_stop
;;
vm)
_azk_vm
;;
agent)
_azk_agent
;;
start)
_azk_start
;;
init)
_azk_init
;;
shell)
_azk_shell
;;
version)
_azk_version
;;
docker)
_azk_docker
;;
config)
_azk_config
;;
restart)
_azk_restart
;;
help)
_azk_help
;;
esac
fi
}
_azk_info()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -W '--no-color -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_status()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '--long --short --text -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_scale()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '-r --no-remove -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_logs()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '--no-timestamps -f --follow -n= --lines= -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_deploy()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 2 ]; then
COMPREPLY=( $( compgen -W '-q --quiet -h --help --no-color -l= --log= -v --verbose -q --quiet -h --help --no-color -l= --log= -v --verbose shell rollback clear-cache versions fast full ssh restart' -- $cur) )
else
case ${COMP_WORDS[2]} in
shell)
_azk_deploy_shell
;;
rollback)
_azk_deploy_rollback
;;
clear-cache)
_azk_deploy_clear-cache
;;
versions)
_azk_deploy_versions
;;
fast)
_azk_deploy_fast
;;
full)
_azk_deploy_full
;;
ssh)
_azk_deploy_ssh
;;
restart)
_azk_deploy_restart
;;
esac
fi
}
_azk_deploy_shell()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 3 ]; then
COMPREPLY=( $( compgen -fW '-c= --command= -q --quiet -h --help --no-color -l= --log= -v --verbose --' -- $cur) )
else
case ${COMP_WORDS[3]} in
--)
_azk_deploy_shell_--
;;
esac
fi
}
_azk_deploy_shell_--()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 4 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_deploy_rollback()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -fW '-q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_deploy_clear-cache()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_deploy_versions()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_deploy_fast()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_deploy_full()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_deploy_ssh()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 3 ]; then
COMPREPLY=( $( compgen -fW '-q --quiet -h --help --no-color -l= --log= -v --verbose --' -- $cur) )
else
case ${COMP_WORDS[3]} in
--)
_azk_deploy_ssh_--
;;
esac
fi
}
_azk_deploy_ssh_--()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 4 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_deploy_restart()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_open()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '-a= --open-with= -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_doctor()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -W '--logo -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_stop()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '-r --no-remove -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_vm()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 2 ]; then
COMPREPLY=( $( compgen -fW '-F --force -q --quiet -h --help --no-color -l= --log= -v --verbose status -- stop remove installed start ssh' -- $cur) )
else
case ${COMP_WORDS[2]} in
status)
_azk_vm_status
;;
--)
_azk_vm_--
;;
stop)
_azk_vm_stop
;;
remove)
_azk_vm_remove
;;
installed)
_azk_vm_installed
;;
start)
_azk_vm_start
;;
ssh)
_azk_vm_ssh
;;
esac
fi
}
_azk_vm_status()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_vm_--()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_vm_stop()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_vm_remove()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_vm_installed()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_vm_start()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_vm_ssh()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_agent()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 2 ]; then
COMPREPLY=( $( compgen -W '--no-daemon --child --no-reload-vm --configure-file= -q --quiet -h --help --no-color -l= --log= -v --verbose status start stop' -- $cur) )
else
case ${COMP_WORDS[2]} in
status)
_azk_agent_status
;;
start)
_azk_agent_start
;;
stop)
_azk_agent_stop
;;
esac
fi
}
_azk_agent_status()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_agent_start()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_agent_stop()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_start()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '-R --reprovision -B --rebuild -r --no-remove -o --open -a= --open-with= -q --quiet -h --help --no-color -l= --log= -v --verbose --git-ref= -R --reprovision -B --rebuild -r --no-remove -o --open -a= --open-with= -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_init()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '--filename -F --force -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_shell()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 2 ]; then
COMPREPLY=( $( compgen -fW '-c= --command= -C= --cwd= -i= --image= --shell= -B --rebuild -r --no-remove --silent -t --tty -T --no-tty -m= --mount= -e= --env= -q --quiet -h --help --no-color -l= --log= -v --verbose --' -- $cur) )
else
case ${COMP_WORDS[2]} in
--)
_azk_shell_--
;;
esac
fi
}
_azk_shell_--()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_version()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_docker()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 2 ]; then
COMPREPLY=( $( compgen -fW '-q --quiet -h --help --no-color -l= --log= -v --verbose --' -- $cur) )
else
case ${COMP_WORDS[2]} in
--)
_azk_docker_--
;;
esac
fi
}
_azk_docker_--()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_config()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -eq 2 ]; then
COMPREPLY=( $( compgen -fW '-q --quiet -h --help --no-color -l= --log= -v --verbose reset set list' -- $cur) )
else
case ${COMP_WORDS[2]} in
reset)
_azk_config_reset
;;
set)
_azk_config_set
;;
list)
_azk_config_list
;;
esac
fi
}
_azk_config_reset()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_config_set()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_config_list()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 3 ]; then
COMPREPLY=( $( compgen -W ' ' -- $cur) )
fi
}
_azk_restart()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '-R --reprovision -B --rebuild -r --no-remove -o --open -a= --open-with= -q --quiet -h --help --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
_azk_help()
{
local cur
cur="${COMP_WORDS[COMP_CWORD]}"
if [ $COMP_CWORD -ge 2 ]; then
COMPREPLY=( $( compgen -fW '-q --quiet --no-color -l= --log= -v --verbose ' -- $cur) )
fi
}
complete -F _azk azk
|
gullitmiranda/azk
|
shared/completions/azk.sh
|
Shell
|
apache-2.0
| 12,308 |
#!/bin/bash
#
# Update Eclipse formatters/templates from central source. Makes certain assumptions
# about where things are, so this is not guaranteed to work for everyone. Only
# here for convenience of some maintainers.
#
[email protected]:adatao
SRC=projects/docs/code-convention
DST=BigR/eclipse
ROOT=../..
FILES=($ROOT/$SRC/*)
for file in ${FILES[*]} ; do
base=`basename $file`
echo $base
cp $file .
done
# Special handling for README.md
cat > README.md << END
### ATTENTION: files in this directory are automatically copied from ${GITROOT}/${SRC}. If you have modifiations, make them there. Modifications made here will be overwritten.
Generated: `date` by `whoami`@`hostname`
END
cat $ROOT/$SRC/README.md >> README.md
|
ddf-project/DDF
|
style/update.sh
|
Shell
|
apache-2.0
| 743 |
#!/bin/sh
entry_num=0
if [ "$1" != "" ]; then
entry_num=$1
fi
key_base64=$(uci get fwknopd.@access[$entry_num].KEY_BASE64)
key=$(uci get fwknopd.@access[$entry_num].KEY)
hmac_key_base64=$(uci get fwknopd.@access[$entry_num].HMAC_KEY_BASE64)
hmac_key=$(uci get fwknopd.@access[$entry_num].HMAC_KEY)
if [ $key_base64 != "" ]; then
qr="KEY_BASE64:$key_base64"
fi
if [ $key != "" ]; then
qr="$qr KEY:$key"
fi
if [ $hmac_key_base64 != "" ]; then
qr="$qr HMAC_KEY_BASE64:$hmac_key_base64"
fi
if [ $hmac_key != "" ]; then
qr="$qr HMAC_KEY:$hmac_key"
fi
qrencode -o - "$qr"
|
aa65535/luci
|
applications/luci-app-fwknopd/root/usr/sbin/gen-qr.sh
|
Shell
|
apache-2.0
| 570 |
#!/bin/sh
TEST_PURPOSE=goal
TEST_TYPE=umlXhost
TESTNAME=co-terminal-01
XHOST_LIST="NIC RW GWD MRCHARLIE"
REF_CONSOLE_FIXUPS="kern-list-fixups.sed nocr.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-look-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS east-prompt-splitline.pl"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS script-only.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS cutout.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS klips-debug-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-setup-sanitize.sed"
NICHOST=nic
NIC_INIT_SCRIPT=nic-init.sh
RWHOST=japan
RW_INIT_SCRIPT=rw-init.sh
RW_RUN_SCRIPT=rw-run.sh
RW_RUN2_SCRIPT=rw-run2.sh
REF_RW_CONSOLE_OUTPUT=rw-console.txt
REF26_RW_CONSOLE_OUTPUT=rw-console.txt
GWDHOST=west
GWD_INIT_SCRIPT=gwd-init.sh
GWD_RUN_SCRIPT=gwd-run.sh
GWD_RUN2_SCRIPT=gwd-run2.sh
REF_GWD_CONSOLE_OUTPUT=gwd-console.txt
REF26_GWD_CONSOLE_OUTPUT=gwd-console.txt
MRCHARLIEHOST=east
MRCHARLIE_RUN_SCRIPT=mrcharlie-init.sh
MRCHARLIE_RUN2_SCRIPT=rw-run2.sh
MRCHARLIE_FINAL_SCRIPT=mrcharlie-final.sh
REF_MRCHARLIE_CONSOLE_OUTPUT=mrcharlie-console.txt
REF26_MRCHARLIE_CONSOLE_OUTPUT=mrcharlie-console.txt
ADDITIONAL_HOSTS="sunrise"
|
mcr/bluerose
|
testing/pluto/co-terminal-01/testparams.sh
|
Shell
|
gpl-2.0
| 1,183 |
source $srcdir/sndrcv_drvr_noexit.sh $1 $2
|
rangochan/rsyslog
|
tests/sndrcv_drvr.sh
|
Shell
|
gpl-3.0
| 43 |
#!/bin/sh
set -eu -o pipefail
VERSION="0.1.13"
PSLIVAR_SHA256SUM="00ae0b0ca141af57aea7900183ef9ff0d2b8e1147a9d5ec6e0f8a4147ab40ce8"
mkdir -p $PREFIX/bin
chmod a+x slivar
cp slivar $PREFIX/bin/slivar
curl -L -s -o pslivar https://github.com/brentp/slivar/releases/download/v${VERSION}/pslivar
sha256sum pslivar | grep ${PSLIVAR_SHA256SUM}
chmod a+x pslivar
cp pslivar $PREFIX/bin/pslivar
|
cokelaer/bioconda-recipes
|
recipes/slivar/build.sh
|
Shell
|
mit
| 390 |
#!/bin/bash
mkdir -p "$PREFIX/bin"
export MACHTYPE=x86_64
export BINDIR=$(pwd)/bin
mkdir -p "$BINDIR"
(cd kent/src/lib && make)
(cd kent/src/htslib && make)
(cd kent/src/jkOwnLib && make)
(cd kent/src/hg/lib && make)
(cd kent/src/utils/bigWigCorrelate && make)
cp bin/bigWigCorrelate "$PREFIX/bin"
chmod +x "$PREFIX/bin/bigWigCorrelate"
|
joachimwolff/bioconda-recipes
|
recipes/ucsc-bigwigcorrelate/build.sh
|
Shell
|
mit
| 337 |
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -ex
if [ "$CONFIG" != "gcov" ] ; then exit ; fi
# change to gRPC repo root
cd $(dirname $0)/../..
# Generate the csharp extension coverage report
gcov objs/gcov/src/csharp/ext/*.o
lcov --base-directory . --directory . -c -o coverage.info
lcov -e coverage.info '**/src/csharp/ext/*' -o coverage.info
genhtml -o reports/csharp_ext_coverage --num-spaces 2 \
-t 'gRPC C# native extension test coverage' coverage.info \
--rc genhtml_hi_limit=95 --rc genhtml_med_limit=80 --no-prefix
|
arkmaxim/grpc
|
tools/run_tests/post_tests_csharp.sh
|
Shell
|
bsd-3-clause
| 2,030 |
#!/bin/bash
####################################################
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
####################################################
set -eu -o pipefail
set -x
git init .
git clean -dfx
git reset --hard
git config --local user.name 'ACS Bot'
git config --local user.email '[email protected]'
git fetch --tags https://github.com/${REPO_OWNER}/${REPO_NAME} master +refs/pull/${PULL_NUMBER}/head:refs/pr/${PULL_NUMBER}
git checkout -B test "${PULL_BASE_SHA}"
git merge --no-ff -m "Merge +refs/pull/${PULL_NUMBER}/head:refs/pr/${PULL_NUMBER}" "${PULL_PULL_SHA}"
echo "----------------------------------------------------------"
env
echo "----------------------------------------------------------"
|
rjtsdl/acs-engine
|
test/bootstrap/checkout-pr.sh
|
Shell
|
mit
| 1,097 |
#!/usr/bin/env bash
set -e
# This script builds various binary artifacts from a checkout of the docker
# source code.
#
# Requirements:
# - The current directory should be a checkout of the docker source code
# (https://github.com/docker/docker). Whatever version is checked out
# will be built.
# - The VERSION file, at the root of the repository, should exist, and
# will be used as Docker binary version and package version.
# - The hash of the git commit will also be included in the Docker binary,
# with the suffix -dirty if the repository isn't clean.
# - The script is intented to be run inside the docker container specified
# in the Dockerfile at the root of the source. In other words:
# DO NOT CALL THIS SCRIPT DIRECTLY.
# - The right way to call this script is to invoke "make" from
# your checkout of the Docker repository.
# the Makefile will do a "docker build -t docker ." and then
# "docker run hack/make.sh" in the resulting image.
#
set -o pipefail
export DOCKER_PKG='github.com/docker/docker'
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export MAKEDIR="$SCRIPTDIR/make"
# We're a nice, sexy, little shell script, and people might try to run us;
# but really, they shouldn't. We want to be in a container!
if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then
{
echo "# WARNING! I don't seem to be running in the Docker container."
echo "# The result of this command might be an incorrect build, and will not be"
echo "# officially supported."
echo "#"
echo "# Try this instead: make all"
echo "#"
} >&2
fi
echo
# List of bundles to create when no argument is passed
DEFAULT_BUNDLES=(
validate-dco
validate-gofmt
validate-pkg
validate-test
validate-toml
validate-vet
binary
test-unit
test-integration-cli
test-docker-py
dynbinary
cover
cross
tgz
ubuntu
)
VERSION=$(< ./VERSION)
if command -v git &> /dev/null && git rev-parse &> /dev/null; then
GITCOMMIT=$(git rev-parse --short HEAD)
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
GITCOMMIT="$GITCOMMIT-dirty"
fi
BUILDTIME=$(date -u)
elif [ "$DOCKER_GITCOMMIT" ]; then
GITCOMMIT="$DOCKER_GITCOMMIT"
else
echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified'
echo >&2 ' Please either build with the .git directory accessible, or specify the'
echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for'
echo >&2 ' future accountability in diagnosing build issues. Thanks!'
exit 1
fi
if [ "$AUTO_GOPATH" ]; then
rm -rf .gopath
mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
export GOPATH="${PWD}/.gopath:${PWD}/vendor"
fi
if [ ! "$GOPATH" ]; then
echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH'
echo >&2 ' alternatively, set AUTO_GOPATH=1'
exit 1
fi
if [ "$DOCKER_EXPERIMENTAL" ]; then
echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
echo >&2
DOCKER_BUILDTAGS+=" experimental"
fi
if [ -z "$DOCKER_CLIENTONLY" ]; then
DOCKER_BUILDTAGS+=" daemon"
fi
if [ "$DOCKER_EXECDRIVER" = 'lxc' ]; then
DOCKER_BUILDTAGS+=' test_no_exec'
fi
# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately
if \
command -v gcc &> /dev/null \
&& ! gcc -E - &> /dev/null <<<'#include <btrfs/version.h>' \
; then
DOCKER_BUILDTAGS+=' btrfs_noversion'
fi
# test whether "libdevmapper.h" is new enough to support deferred remove
# functionality.
if \
command -v gcc &> /dev/null \
&& ! ( echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }'| gcc -ldevmapper -xc - &> /dev/null ) \
; then
DOCKER_BUILDTAGS+=' libdm_no_deferred_remove'
fi
# Use these flags when compiling the tests and final binary
IAMSTATIC='true'
source "$SCRIPTDIR/make/.go-autogen"
if [ -z "$DOCKER_DEBUG" ]; then
LDFLAGS='-w'
fi
LDFLAGS_STATIC='-linkmode external'
# Cgo -H windows is incompatible with -linkmode external.
if [ "$(go env GOOS)" == 'windows' ]; then
LDFLAGS_STATIC=''
fi
EXTLDFLAGS_STATIC='-static'
# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build
# with options like -race.
ORIG_BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" -installsuffix netgo )
# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here
BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" )
# Test timeout.
: ${TIMEOUT:=60m}
TESTFLAGS+=" -test.timeout=${TIMEOUT}"
# A few more flags that are specific just to building a completely-static binary (see hack/make/binary)
# PLEASE do not use these anywhere else.
EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files"
LDFLAGS_STATIC_DOCKER="
$LDFLAGS_STATIC
-extldflags \"$EXTLDFLAGS_STATIC_DOCKER\"
"
if [ "$(uname -s)" = 'FreeBSD' ]; then
# Tell cgo the compiler is Clang, not GCC
# https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752
export CC=clang
# "-extld clang" is a workaround for
# https://code.google.com/p/go/issues/detail?id=6845
LDFLAGS="$LDFLAGS -extld clang"
fi
# If sqlite3.h doesn't exist under /usr/include,
# check /usr/local/include also just in case
# (e.g. FreeBSD Ports installs it under the directory)
if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then
export CGO_CFLAGS='-I/usr/local/include'
export CGO_LDFLAGS='-L/usr/local/lib'
fi
HAVE_GO_TEST_COVER=
if \
go help testflag | grep -- -cover > /dev/null \
&& go tool -n cover > /dev/null 2>&1 \
; then
HAVE_GO_TEST_COVER=1
fi
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
# You can use this to select certain tests to run, eg.
#
# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
#
# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
# to run certain tests on your local host, you should run with command:
#
# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli
#
go_test_dir() {
dir=$1
coverpkg=$2
testcover=()
if [ "$HAVE_GO_TEST_COVER" ]; then
# if our current go install has -cover, we want to use it :)
mkdir -p "$DEST/coverprofiles"
coverprofile="docker${dir#.}"
coverprofile="$ABS_DEST/coverprofiles/${coverprofile//\//-}"
testcover=( -cover -coverprofile "$coverprofile" $coverpkg )
fi
(
echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
cd "$dir"
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
test_env go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS
)
}
test_env() {
# use "env -i" to tightly control the environment variables that bleed into the tests
env -i \
DEST="$DEST" \
DOCKER_EXECDRIVER="$DOCKER_EXECDRIVER" \
DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \
DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \
DOCKER_HOST="$DOCKER_HOST" \
GOPATH="$GOPATH" \
HOME="$ABS_DEST/fake-HOME" \
PATH="$PATH" \
TEMP="$TEMP" \
TEST_DOCKERINIT_PATH="$TEST_DOCKERINIT_PATH" \
"$@"
}
# a helper to provide ".exe" when it's appropriate
binary_extension() {
if [ "$(go env GOOS)" = 'windows' ]; then
echo -n '.exe'
fi
}
# This helper function walks the current directory looking for directories
# holding certain files ($1 parameter), and prints their paths on standard
# output, one per line.
find_dirs() {
find . -not \( \
\( \
-path './vendor/*' \
-o -path './integration-cli/*' \
-o -path './contrib/*' \
-o -path './pkg/mflag/example/*' \
-o -path './.git/*' \
-o -path './bundles/*' \
-o -path './docs/*' \
-o -path './pkg/libcontainer/nsinit/*' \
\) \
-prune \
\) -name "$1" -print0 | xargs -0n1 dirname | sort -u
}
hash_files() {
while [ $# -gt 0 ]; do
f="$1"
shift
dir="$(dirname "$f")"
base="$(basename "$f")"
for hashAlgo in md5 sha256; do
if command -v "${hashAlgo}sum" &> /dev/null; then
(
# subshell and cd so that we get output files like:
# $HASH docker-$VERSION
# instead of:
# $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
cd "$dir"
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
)
fi
done
done
}
bundle() {
local bundle="$1"; shift
echo "---> Making bundle: $(basename "$bundle") (in $DEST)"
source "$SCRIPTDIR/make/$bundle" "$@"
}
main() {
# We want this to fail if the bundles already exist and cannot be removed.
# This is to avoid mixing bundles from different versions of the code.
mkdir -p bundles
if [ -e "bundles/$VERSION" ]; then
echo "bundles/$VERSION already exists. Removing."
rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1
echo
fi
if [ "$(go env GOHOSTOS)" != 'windows' ]; then
# Windows and symlinks don't get along well
rm -f bundles/latest
ln -s "$VERSION" bundles/latest
fi
if [ $# -lt 1 ]; then
bundles=(${DEFAULT_BUNDLES[@]})
else
bundles=($@)
fi
for bundle in ${bundles[@]}; do
export DEST="bundles/$VERSION/$(basename "$bundle")"
# Cygdrive paths don't play well with go build -o.
if [[ "$(uname -s)" == CYGWIN* ]]; then
export DEST="$(cygpath -mw "$DEST")"
fi
mkdir -p "$DEST"
ABS_DEST="$(cd "$DEST" && pwd -P)"
bundle "$bundle"
echo
done
}
main "$@"
|
rhuss/gofabric8
|
vendor/github.com/docker/docker/hack/make.sh
|
Shell
|
apache-2.0
| 9,472 |
#!/usr/bin/env bash
source $(dirname $0)/reader.sh
source $(dirname $0)/printer.sh
source $(dirname $0)/env.sh
source $(dirname $0)/core.sh
# read
READ () {
[ "${1}" ] && r="${1}" || READLINE
READ_STR "${r}"
}
# eval
IS_PAIR () {
if _sequential? "${1}"; then
_count "${1}"
[[ "${r}" > 0 ]] && return 0
fi
return 1
}
QUASIQUOTE () {
if ! IS_PAIR "${1}"; then
_symbol quote
_list "${r}" "${1}"
return
else
_nth "${1}" 0; local a0="${r}"
if [[ "${ANON["${a0}"]}" == "unquote" ]]; then
_nth "${1}" 1
return
elif IS_PAIR "${a0}"; then
_nth "${a0}" 0; local a00="${r}"
if [[ "${ANON["${a00}"]}" == "splice-unquote" ]]; then
_symbol concat; local a="${r}"
_nth "${a0}" 1; local b="${r}"
_rest "${1}"
QUASIQUOTE "${r}"; local c="${r}"
_list "${a}" "${b}" "${c}"
return
fi
fi
fi
_symbol cons; local a="${r}"
QUASIQUOTE "${a0}"; local b="${r}"
_rest "${1}"
QUASIQUOTE "${r}"; local c="${r}"
_list "${a}" "${b}" "${c}"
return
}
EVAL_AST () {
local ast="${1}" env="${2}"
#_pr_str "${ast}"; echo "EVAL_AST '${ast}:${r} / ${env}'"
_obj_type "${ast}"; local ot="${r}"
case "${ot}" in
symbol)
ENV_GET "${env}" "${ast}"
return ;;
list)
_map_with_type _list EVAL "${ast}" "${env}" ;;
vector)
_map_with_type _vector EVAL "${ast}" "${env}" ;;
hash_map)
local res="" val="" hm="${ANON["${ast}"]}"
_hash_map; local new_hm="${r}"
eval local keys="\${!${hm}[@]}"
for key in ${keys}; do
eval val="\${${hm}[\"${key}\"]}"
EVAL "${val}" "${env}"
_assoc! "${new_hm}" "${key}" "${r}"
done
r="${new_hm}" ;;
*)
r="${ast}" ;;
esac
}
EVAL () {
local ast="${1}" env="${2}"
while true; do
r=
[[ "${__ERROR}" ]] && return 1
#_pr_str "${ast}"; echo "EVAL '${r} / ${env}'"
if ! _list? "${ast}"; then
EVAL_AST "${ast}" "${env}"
return
fi
# apply list
_nth "${ast}" 0; local a0="${r}"
_nth "${ast}" 1; local a1="${r}"
_nth "${ast}" 2; local a2="${r}"
case "${ANON["${a0}"]}" in
def!) EVAL "${a2}" "${env}"
[[ "${__ERROR}" ]] && return 1
ENV_SET "${env}" "${a1}" "${r}"
return ;;
let*) ENV "${env}"; local let_env="${r}"
local let_pairs=(${ANON["${a1}"]})
local idx=0
#echo "let: [${let_pairs[*]}] for ${a2}"
while [[ "${let_pairs["${idx}"]}" ]]; do
EVAL "${let_pairs[$(( idx + 1))]}" "${let_env}"
ENV_SET "${let_env}" "${let_pairs[${idx}]}" "${r}"
idx=$(( idx + 2))
done
ast="${a2}"
env="${let_env}"
# Continue loop
;;
quote)
r="${a1}"
return ;;
quasiquote)
QUASIQUOTE "${a1}"
ast="${r}"
# Continue loop
;;
do) _count "${ast}"
_slice "${ast}" 1 $(( ${r} - 2 ))
EVAL_AST "${r}" "${env}"
[[ "${__ERROR}" ]] && r= && return 1
_last "${ast}"
ast="${r}"
# Continue loop
;;
if) EVAL "${a1}" "${env}"
[[ "${__ERROR}" ]] && return 1
if [[ "${r}" == "${__false}" || "${r}" == "${__nil}" ]]; then
# eval false form
_nth "${ast}" 3; local a3="${r}"
if [[ "${a3}" ]]; then
ast="${a3}"
else
r="${__nil}"
return
fi
else
# eval true condition
ast="${a2}"
fi
# Continue loop
;;
fn*) _function "ENV \"${env}\" \"${a1}\" \"\${@}\"; \
EVAL \"${a2}\" \"\${r}\"" \
"${a2}" "${env}" "${a1}"
return ;;
*) EVAL_AST "${ast}" "${env}"
[[ "${__ERROR}" ]] && r= && return 1
local el="${r}"
_first "${el}"; local f="${ANON["${r}"]}"
_rest "${el}"; local args="${ANON["${r}"]}"
#echo "invoke: [${f}] ${args}"
if [[ "${f//@/ }" != "${f}" ]]; then
set -- ${f//@/ }
ast="${2}"
ENV "${3}" "${4}" ${args}
env="${r}"
else
eval ${f%%@*} ${args}
return
fi
# Continue loop
;;
esac
done
}
# print
PRINT () {
if [[ "${__ERROR}" ]]; then
_pr_str "${__ERROR}" yes
r="Error: ${r}"
__ERROR=
else
_pr_str "${1}" yes
fi
}
# repl
ENV; REPL_ENV="${r}"
REP () {
r=
READ "${1}"
EVAL "${r}" "${REPL_ENV}"
PRINT "${r}"
}
# core.sh: defined using bash
_fref () {
_symbol "${1}"; local sym="${r}"
_function "${2} \"\${@}\""
ENV_SET "${REPL_ENV}" "${sym}" "${r}"
}
for n in "${!core_ns[@]}"; do _fref "${n}" "${core_ns["${n}"]}"; done
_eval () { EVAL "${1}" "${REPL_ENV}"; }
_fref "eval" _eval
_list; argv="${r}"
for _arg in "${@:2}"; do _string "${_arg}"; _conj! "${argv}" "${r}"; done
_symbol "__STAR__ARGV__STAR__"
ENV_SET "${REPL_ENV}" "${r}" "${argv}";
# core.mal: defined using the language itself
REP "(def! not (fn* (a) (if a false true)))"
REP "(def! load-file (fn* (f) (eval (read-string (str \"(do \" (slurp f) \")\")))))"
# load/run file from command line (then exit)
if [[ "${1}" ]]; then
REP "(load-file \"${1}\")"
exit 0
fi
# repl loop
while true; do
READLINE "user> " || exit "$?"
[[ "${r}" ]] && REP "${r}" && echo "${r}"
done
|
czchen/mal
|
bash/step7_quote.sh
|
Shell
|
mpl-2.0
| 6,032 |
#!/bin/sh
test_description='basic work tree status reporting'
. ./test-lib.sh
test_expect_success setup '
git config --global advice.statusuoption false &&
test_commit A &&
test_commit B oneside added &&
git checkout A^0 &&
test_commit C oneside created
'
test_expect_success 'A/A conflict' '
git checkout B^0 &&
test_must_fail git merge C
'
test_expect_success 'Report path with conflict' '
git diff --cached --name-status >actual &&
echo "U oneside" >expect &&
test_cmp expect actual
'
test_expect_success 'Report new path with conflict' '
git diff --cached --name-status HEAD^ >actual &&
echo "U oneside" >expect &&
test_cmp expect actual
'
test_expect_success 'M/D conflict does not segfault' '
cat >expect <<EOF &&
On branch side
You have unmerged paths.
(fix conflicts and run "git commit")
Unmerged paths:
(use "git add/rm <file>..." as appropriate to mark resolution)
deleted by us: foo
no changes added to commit (use "git add" and/or "git commit -a")
EOF
mkdir mdconflict &&
(
cd mdconflict &&
git init &&
test_commit initial foo "" &&
test_commit modify foo foo &&
git checkout -b side HEAD^ &&
git rm foo &&
git commit -m delete &&
test_must_fail git merge master &&
test_must_fail git commit --dry-run >../actual &&
test_i18ncmp ../expect ../actual &&
git status >../actual &&
test_i18ncmp ../expect ../actual
)
'
test_expect_success 'rename & unmerged setup' '
git rm -f -r . &&
cat "$TEST_DIRECTORY/README" >ONE &&
git add ONE &&
test_tick &&
git commit -m "One commit with ONE" &&
echo Modified >TWO &&
cat ONE >>TWO &&
cat ONE >>THREE &&
git add TWO THREE &&
sha1=$(git rev-parse :ONE) &&
git rm --cached ONE &&
(
echo "100644 $sha1 1 ONE" &&
echo "100644 $sha1 2 ONE" &&
echo "100644 $sha1 3 ONE"
) | git update-index --index-info &&
echo Further >>THREE
'
test_expect_success 'rename & unmerged status' '
git status -suno >actual &&
cat >expect <<-EOF &&
UU ONE
AM THREE
A TWO
EOF
test_cmp expect actual
'
test_expect_success 'git diff-index --cached shows 2 added + 1 unmerged' '
cat >expected <<-EOF &&
U ONE
A THREE
A TWO
EOF
git diff-index --cached --name-status HEAD >actual &&
test_cmp expected actual
'
test_expect_success 'git diff-index --cached -M shows 2 added + 1 unmerged' '
cat >expected <<-EOF &&
U ONE
A THREE
A TWO
EOF
git diff-index --cached -M --name-status HEAD >actual &&
test_cmp expected actual
'
test_expect_success 'git diff-index --cached -C shows 2 copies + 1 unmerged' '
cat >expected <<-EOF &&
U ONE
C ONE THREE
C ONE TWO
EOF
git diff-index --cached -C --name-status HEAD |
sed "s/^C[0-9]*/C/g" >actual &&
test_cmp expected actual
'
test_expect_success 'status when conflicts with add and rm advice (deleted by them)' '
git reset --hard &&
git checkout master &&
test_commit init main.txt init &&
git checkout -b second_branch &&
git rm main.txt &&
git commit -m "main.txt deleted on second_branch" &&
test_commit second conflict.txt second &&
git checkout master &&
test_commit on_second main.txt on_second &&
test_commit master conflict.txt master &&
test_must_fail git merge second_branch &&
cat >expected <<\EOF &&
On branch master
You have unmerged paths.
(fix conflicts and run "git commit")
Unmerged paths:
(use "git add/rm <file>..." as appropriate to mark resolution)
both added: conflict.txt
deleted by them: main.txt
no changes added to commit (use "git add" and/or "git commit -a")
EOF
git status --untracked-files=no >actual &&
test_i18ncmp expected actual
'
test_expect_success 'prepare for conflicts' '
git reset --hard &&
git checkout -b conflict &&
test_commit one main.txt one &&
git branch conflict_second &&
git mv main.txt sub_master.txt &&
git commit -m "main.txt renamed in sub_master.txt" &&
git checkout conflict_second &&
git mv main.txt sub_second.txt &&
git commit -m "main.txt renamed in sub_second.txt"
'
test_expect_success 'status when conflicts with add and rm advice (both deleted)' '
test_must_fail git merge conflict &&
cat >expected <<\EOF &&
On branch conflict_second
You have unmerged paths.
(fix conflicts and run "git commit")
Unmerged paths:
(use "git add/rm <file>..." as appropriate to mark resolution)
both deleted: main.txt
added by them: sub_master.txt
added by us: sub_second.txt
no changes added to commit (use "git add" and/or "git commit -a")
EOF
git status --untracked-files=no >actual &&
test_i18ncmp expected actual
'
test_expect_success 'status when conflicts with only rm advice (both deleted)' '
git reset --hard conflict_second &&
test_must_fail git merge conflict &&
git add sub_master.txt &&
git add sub_second.txt &&
cat >expected <<\EOF &&
On branch conflict_second
You have unmerged paths.
(fix conflicts and run "git commit")
Changes to be committed:
new file: sub_master.txt
Unmerged paths:
(use "git rm <file>..." to mark resolution)
both deleted: main.txt
Untracked files not listed (use -u option to show untracked files)
EOF
git status --untracked-files=no >actual &&
test_i18ncmp expected actual &&
git reset --hard &&
git checkout master
'
test_done
|
xantage/git
|
t/t7060-wtstatus.sh
|
Shell
|
gpl-2.0
| 5,178 |
#!/bin/sh
#
# Copyright (C) 2016 OpenWrt.org
#
mediatek_board_detect() {
local machine
local name
machine=$(cat /proc/device-tree/model)
case "$machine" in
"MediaTek MT7623 evaluation board")
name="mt7623_evb"
;;
"MediaTek MT7623 eMMC evaluation board")
name="eMMC"
;;
"MediaTek MT7623 NAND evaluation board")
name="NAND"
;;
esac
[ -z "$name" ] && name="unknown"
[ -e "/tmp/sysinfo/" ] || mkdir -p "/tmp/sysinfo/"
echo "$name" > /tmp/sysinfo/board_name
echo "$machine" > /tmp/sysinfo/model
}
mediatek_board_name() {
local name
[ -f /tmp/sysinfo/board_name ] && name=$(cat /tmp/sysinfo/board_name)
[ -z "$name" ] && name="unknown"
echo "$name"
}
|
ZSL2000/openwrt
|
target/linux/mediatek/base-files/lib/mediatek.sh
|
Shell
|
gpl-2.0
| 682 |
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
LOG_FILE="/var/log/cloudera-azure-initialize.log"
log() {
echo "$(date): $*" >> "${LOG_FILE}"
}
n=120
sleepInterval=10
internal_ip=$1
log "Verifying DNS configuration ..."
until grep "nameserver ${internal_ip}" /etc/resolv.conf || [ ${n} -le 0 ]
do
service network restart
log "Waiting for Azure DNS nameserver updates to propagate, this usually takes less than 2 minutes..."
n=$((n - sleepInterval))
sleep ${sleepInterval}
done
if [ "${n}" -le 0 ]; then
log "Failed to pick up dns server from VNET" & exit 1;
fi
# Verify DNS is working
hostname -f
if [ $? != 0 ]
then
log "Unable to run the command 'hostname -f' (check 1 of 4)"
exit 1
fi
hostname -i
if [ $? != 0 ]
then
log "Unable to run the command 'hostname -i' (check 2 of 4)"
exit 1
fi
host "$(hostname -f)"
if [ $? != 0 ]
then
log "Unable to run the command 'host \`hostname -f\`' (check 3 of 4)"
exit 1
fi
host "$(hostname -i)"
if [ $? != 0 ]
then
log "Unable to run the command 'host \`hostname -i\`' (check 4 of 4)"
exit 1
fi
log "Verifying DNS configuration ... Successful"
exit 0
|
simongdavies/azure-quickstart-templates
|
cloudera-director-on-centos/scripts/update-vm-dns.sh
|
Shell
|
mit
| 1,672 |
# ltmain.sh - Provide generalized library-building support services.
# NOTE: Changing this file will not affect anything until you rerun configure.
#
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004
# Free Software Foundation, Inc.
# Originally by Gordon Matzigkeit <[email protected]>, 1996
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
basename="s,^.*/,,g"
# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
# is ksh but when the shell is invoked as "sh" and the current value of
# the _XPG environment variable is not equal to 1 (one), the special
# positional parameter $0, within a function call, is the name of the
# function.
progpath="$0"
# The name of this program:
progname=`echo "$progpath" | $SED $basename`
modename="$progname"
# Global variables:
EXIT_SUCCESS=0
EXIT_FAILURE=1
PROGRAM=ltmain.sh
PACKAGE=libtool
VERSION=1.5.6
TIMESTAMP=" (1.1220.2.94 2004/04/10 16:27:27)"
# Check that we have a working $echo.
if test "X$1" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test "X$1" = X--fallback-echo; then
# Avoid inline document here, it may be left over
:
elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
# Yippee, $echo works!
:
else
# Restart under the correct shell, and then maybe $echo will work.
exec $SHELL "$progpath" --no-reexec ${1+"$@"}
fi
if test "X$1" = X--fallback-echo; then
# used as fallback echo
shift
cat <<EOF
$*
EOF
exit $EXIT_SUCCESS
fi
default_mode=
help="Try \`$progname --help' for more information."
magic="%%%MAGIC variable%%%"
mkdir="mkdir"
mv="mv -f"
rm="rm -f"
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed="${SED}"' -e 1s/^X//'
sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
# test EBCDIC or ASCII
case `echo A|tr A '\301'` in
A) # EBCDIC based system
SP2NL="tr '\100' '\n'"
NL2SP="tr '\r\n' '\100\100'"
;;
*) # Assume ASCII based system
SP2NL="tr '\040' '\012'"
NL2SP="tr '\015\012' '\040\040'"
;;
esac
# NLS nuisances.
# Only set LANG and LC_ALL to C if already set.
# These must not be set unconditionally because not all systems understand
# e.g. LANG=C (notably SCO).
# We save the old values to restore during execute mode.
if test "${LC_ALL+set}" = set; then
save_LC_ALL="$LC_ALL"; LC_ALL=C; export LC_ALL
fi
if test "${LANG+set}" = set; then
save_LANG="$LANG"; LANG=C; export LANG
fi
# Make sure IFS has a sensible default
: ${IFS="
"}
if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
$echo "$modename: not configured to build any kind of library" 1>&2
$echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit $EXIT_FAILURE
fi
# Global variables.
mode=$default_mode
nonopt=
prev=
prevopt=
run=
show="$echo"
show_help=
execute_dlfiles=
lo2o="s/\\.lo\$/.${objext}/"
o2lo="s/\\.${objext}\$/.lo/"
#####################################
# Shell function definitions:
# This seems to be the best place for them
# func_win32_libid arg
# return the library type of file 'arg'
#
# Need a lot of goo to handle *both* DLLs and import libs
# Has to be a shell function in order to 'eat' the argument
# that is supplied when $file_magic_command is called.
func_win32_libid () {
win32_libid_type="unknown"
win32_fileres=`file -L $1 2>/dev/null`
case $win32_fileres in
*ar\ archive\ import\ library*) # definitely import
win32_libid_type="x86 archive import"
;;
*ar\ archive*) # could be an import, or static
if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \
$EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then
win32_nmres=`eval $NM -f posix -A $1 | \
sed -n -e '1,100{/ I /{x;/import/!{s/^/import/;h;p;};x;};}'`
if test "X$win32_nmres" = "Ximport" ; then
win32_libid_type="x86 archive import"
else
win32_libid_type="x86 archive static"
fi
fi
;;
*DLL*)
win32_libid_type="x86 DLL"
;;
*executable*) # but shell scripts are "executable" too...
case $win32_fileres in
*MS\ Windows\ PE\ Intel*)
win32_libid_type="x86 DLL"
;;
esac
;;
esac
$echo $win32_libid_type
}
# func_infer_tag arg
# Infer tagged configuration to use if any are available and
# if one wasn't chosen via the "--tag" command line option.
# Only attempt this if the compiler in the base compile
# command doesn't match the default compiler.
# arg is usually of the form 'gcc ...'
func_infer_tag () {
if test -n "$available_tags" && test -z "$tagname"; then
CC_quoted=
for arg in $CC; do
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
CC_quoted="$CC_quoted $arg"
done
case $@ in
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when configure was run.
" $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;;
# Blanks at the start of $base_compile will cause this to fail
# if we don't check for them as well.
*)
for z in $available_tags; do
if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
CC_quoted=
for arg in $CC; do
# Double-quote args containing other shell metacharacters.
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
CC_quoted="$CC_quoted $arg"
done
case "$@ " in
" $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*)
# The compiler in the base compile command matches
# the one in the tagged configuration.
# Assume this is the tagged configuration we want.
tagname=$z
break
;;
esac
fi
done
# If $tagname still isn't set, then no tagged configuration
# was found and let the user know that the "--tag" command
# line option must be used.
if test -z "$tagname"; then
$echo "$modename: unable to infer tagged configuration"
$echo "$modename: specify a tag with \`--tag'" 1>&2
exit $EXIT_FAILURE
# else
# $echo "$modename: using $tagname tagged configuration"
fi
;;
esac
fi
}
# End of Shell function definitions
#####################################
# Darwin sucks
eval std_shrext=\"$shrext_cmds\"
# Parse our command line options once, thoroughly.
while test "$#" -gt 0
do
arg="$1"
shift
case $arg in
-*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
*) optarg= ;;
esac
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
execute_dlfiles)
execute_dlfiles="$execute_dlfiles $arg"
;;
tag)
tagname="$arg"
preserve_args="${preserve_args}=$arg"
# Check whether tagname contains only valid characters
case $tagname in
*[!-_A-Za-z0-9,/]*)
$echo "$progname: invalid tag name: $tagname" 1>&2
exit $EXIT_FAILURE
;;
esac
case $tagname in
CC)
# Don't test for the "default" C tag, as we know, it's there, but
# not specially marked.
;;
*)
if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then
taglist="$taglist $tagname"
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`"
else
$echo "$progname: ignoring unknown tag $tagname" 1>&2
fi
;;
esac
;;
*)
eval "$prev=\$arg"
;;
esac
prev=
prevopt=
continue
fi
# Have we seen a non-optional argument yet?
case $arg in
--help)
show_help=yes
;;
--version)
$echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
$echo
$echo "Copyright (C) 2003 Free Software Foundation, Inc."
$echo "This is free software; see the source for copying conditions. There is NO"
$echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
exit $EXIT_SUCCESS
;;
--config)
${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath
# Now print the configurations for the tags.
for tagname in $taglist; do
${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath"
done
exit $EXIT_SUCCESS
;;
--debug)
$echo "$progname: enabling shell trace mode"
set -x
preserve_args="$preserve_args $arg"
;;
--dry-run | -n)
run=:
;;
--features)
$echo "host: $host"
if test "$build_libtool_libs" = yes; then
$echo "enable shared libraries"
else
$echo "disable shared libraries"
fi
if test "$build_old_libs" = yes; then
$echo "enable static libraries"
else
$echo "disable static libraries"
fi
exit $EXIT_SUCCESS
;;
--finish) mode="finish" ;;
--mode) prevopt="--mode" prev=mode ;;
--mode=*) mode="$optarg" ;;
--preserve-dup-deps) duplicate_deps="yes" ;;
--quiet | --silent)
show=:
preserve_args="$preserve_args $arg"
;;
--tag) prevopt="--tag" prev=tag ;;
--tag=*)
set tag "$optarg" ${1+"$@"}
shift
prev=tag
preserve_args="$preserve_args --tag"
;;
-dlopen)
prevopt="-dlopen"
prev=execute_dlfiles
;;
-*)
$echo "$modename: unrecognized option \`$arg'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
*)
nonopt="$arg"
break
;;
esac
done
if test -n "$prevopt"; then
$echo "$modename: option \`$prevopt' requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# If this variable is set in any of the actions, the command in it
# will be execed at the end. This prevents here-documents from being
# left over by shells.
exec_cmd=
if test -z "$show_help"; then
# Infer the operation mode.
if test -z "$mode"; then
$echo "*** Warning: inferring the mode of operation is deprecated." 1>&2
$echo "*** Future versions of Libtool will require -mode=MODE be specified." 1>&2
case $nonopt in
*cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc*)
mode=link
for arg
do
case $arg in
-c)
mode=compile
break
;;
esac
done
;;
*db | *dbx | *strace | *truss)
mode=execute
;;
*install*|cp|mv)
mode=install
;;
*rm)
mode=uninstall
;;
*)
# If we have no mode, but dlfiles were specified, then do execute mode.
test -n "$execute_dlfiles" && mode=execute
# Just use the default operation mode.
if test -z "$mode"; then
if test -n "$nonopt"; then
$echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2
else
$echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2
fi
fi
;;
esac
fi
# Only execute mode is allowed to have -dlopen flags.
if test -n "$execute_dlfiles" && test "$mode" != execute; then
$echo "$modename: unrecognized option \`-dlopen'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Change the help message to a mode-specific one.
generic_help="$help"
help="Try \`$modename --help --mode=$mode' for more information."
# These modes are in order of execution frequency so that they run quickly.
case $mode in
# libtool compile mode
compile)
modename="$modename: compile"
# Get the compilation command and the source file.
base_compile=
srcfile="$nonopt" # always keep a non-empty value in "srcfile"
suppress_opt=yes
suppress_output=
arg_mode=normal
libobj=
later=
for arg
do
case "$arg_mode" in
arg )
# do not "continue". Instead, add this to base_compile
lastarg="$arg"
arg_mode=normal
;;
target )
libobj="$arg"
arg_mode=normal
continue
;;
normal )
# Accept any command-line options.
case $arg in
-o)
if test -n "$libobj" ; then
$echo "$modename: you cannot specify \`-o' more than once" 1>&2
exit $EXIT_FAILURE
fi
arg_mode=target
continue
;;
-static | -prefer-pic | -prefer-non-pic)
later="$later $arg"
continue
;;
-no-suppress)
suppress_opt=no
continue
;;
-Xcompiler)
arg_mode=arg # the next one goes into the "base_compile" arg list
continue # The current "srcfile" will either be retained or
;; # replaced later. I would guess that would be a bug.
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"`
lastarg=
save_ifs="$IFS"; IFS=','
for arg in $args; do
IFS="$save_ifs"
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
lastarg="$lastarg $arg"
done
IFS="$save_ifs"
lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"`
# Add the arguments to base_compile.
base_compile="$base_compile $lastarg"
continue
;;
* )
# Accept the current argument as the source file.
# The previous "srcfile" becomes the current argument.
#
lastarg="$srcfile"
srcfile="$arg"
;;
esac # case $arg
;;
esac # case $arg_mode
# Aesthetically quote the previous argument.
lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
case $lastarg in
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
lastarg="\"$lastarg\""
;;
esac
base_compile="$base_compile $lastarg"
done # for arg
case $arg_mode in
arg)
$echo "$modename: you must specify an argument for -Xcompile"
exit $EXIT_FAILURE
;;
target)
$echo "$modename: you must specify a target with \`-o'" 1>&2
exit $EXIT_FAILURE
;;
*)
# Get the name of the library object.
[ -z "$libobj" ] && libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'`
;;
esac
# Recognize several different file suffixes.
# If the user specifies -o file.o, it is replaced with file.lo
xform='[cCFSifmso]'
case $libobj in
*.ada) xform=ada ;;
*.adb) xform=adb ;;
*.ads) xform=ads ;;
*.asm) xform=asm ;;
*.c++) xform=c++ ;;
*.cc) xform=cc ;;
*.ii) xform=ii ;;
*.class) xform=class ;;
*.cpp) xform=cpp ;;
*.cxx) xform=cxx ;;
*.f90) xform=f90 ;;
*.for) xform=for ;;
*.java) xform=java ;;
esac
libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
case $libobj in
*.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
*)
$echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
exit $EXIT_FAILURE
;;
esac
func_infer_tag $base_compile
for arg in $later; do
case $arg in
-static)
build_old_libs=yes
continue
;;
-prefer-pic)
pic_mode=yes
continue
;;
-prefer-non-pic)
pic_mode=no
continue
;;
esac
done
objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$obj"; then
xdir=
else
xdir=$xdir/
fi
lobj=${xdir}$objdir/$objname
if test -z "$base_compile"; then
$echo "$modename: you must specify a compilation command" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Delete any leftover library objects.
if test "$build_old_libs" = yes; then
removelist="$obj $lobj $libobj ${libobj}T"
else
removelist="$lobj $libobj ${libobj}T"
fi
$run $rm $removelist
trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15
# On Cygwin there's no "real" PIC flag so we must build both object types
case $host_os in
cygwin* | mingw* | pw32* | os2*)
pic_mode=default
;;
esac
if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then
# non-PIC code in shared libraries is not supported
pic_mode=default
fi
# Calculate the filename of the output object if compiler does
# not support -o with -c
if test "$compiler_c_o" = no; then
output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
lockfile="$output_obj.lock"
removelist="$removelist $output_obj $lockfile"
trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15
else
output_obj=
need_locks=no
lockfile=
fi
# Lock this critical section if it is needed
# We use this script file to make the link, it avoids creating a new file
if test "$need_locks" = yes; then
until $run ln "$progpath" "$lockfile" 2>/dev/null; do
$show "Waiting for $lockfile to be removed"
sleep 2
done
elif test "$need_locks" = warn; then
if test -f "$lockfile"; then
$echo "\
*** ERROR, $lockfile exists and contains:
`cat $lockfile 2>/dev/null`
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
$echo $srcfile > "$lockfile"
fi
if test -n "$fix_srcfile_path"; then
eval srcfile=\"$fix_srcfile_path\"
fi
$run $rm "$libobj" "${libobj}T"
# Create a libtool object file (analogous to a ".la" file),
# but don't create it if we're doing a dry run.
test -z "$run" && cat > ${libobj}T <<EOF
# $libobj - a libtool object file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# Name of the PIC object.
EOF
# Only build a PIC object if we are building libtool libraries.
if test "$build_libtool_libs" = yes; then
# Without this assignment, base_compile gets emptied.
fbsd_hideous_sh_bug=$base_compile
if test "$pic_mode" != no; then
command="$base_compile $srcfile $pic_flag"
else
# Don't build PIC code
command="$base_compile $srcfile"
fi
if test ! -d "${xdir}$objdir"; then
$show "$mkdir ${xdir}$objdir"
$run $mkdir ${xdir}$objdir
status=$?
if test "$status" -ne 0 && test ! -d "${xdir}$objdir"; then
exit $status
fi
fi
if test -z "$output_obj"; then
# Place PIC objects in $objdir
command="$command -o $lobj"
fi
$run $rm "$lobj" "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
test -n "$output_obj" && $run $rm $removelist
exit $EXIT_FAILURE
fi
if test "$need_locks" = warn &&
test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
$echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
# Just move the object if needed, then go on to compile the next one
if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
$show "$mv $output_obj $lobj"
if $run $mv $output_obj $lobj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Append the name of the PIC object to the libtool object file.
test -z "$run" && cat >> ${libobj}T <<EOF
pic_object='$objdir/$objname'
EOF
# Allow error messages only from the first compilation.
if test "$suppress_opt" = yes; then
suppress_output=' >/dev/null 2>&1'
fi
else
# No PIC object so indicate it doesn't exist in the libtool
# object file.
test -z "$run" && cat >> ${libobj}T <<EOF
pic_object=none
EOF
fi
# Only build a position-dependent object if we build old libraries.
if test "$build_old_libs" = yes; then
if test "$pic_mode" != yes; then
# Don't build PIC code
command="$base_compile $srcfile"
else
command="$base_compile $srcfile $pic_flag"
fi
if test "$compiler_c_o" = yes; then
command="$command -o $obj"
fi
# Suppress compiler output if we already did a PIC compilation.
command="$command$suppress_output"
$run $rm "$obj" "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
$run $rm $removelist
exit $EXIT_FAILURE
fi
if test "$need_locks" = warn &&
test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
$echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
# Just move the object if needed
if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
$show "$mv $output_obj $obj"
if $run $mv $output_obj $obj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Append the name of the non-PIC object the libtool object file.
# Only append if the libtool object file exists.
test -z "$run" && cat >> ${libobj}T <<EOF
# Name of the non-PIC object.
non_pic_object='$objname'
EOF
else
# Append the name of the non-PIC object the libtool object file.
# Only append if the libtool object file exists.
test -z "$run" && cat >> ${libobj}T <<EOF
# Name of the non-PIC object.
non_pic_object=none
EOF
fi
$run $mv "${libobj}T" "${libobj}"
# Unlock the critical section if it was locked
if test "$need_locks" != no; then
$run $rm "$lockfile"
fi
exit $EXIT_SUCCESS
;;
# libtool link mode
link | relink)
modename="$modename: link"
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# It is impossible to link a dll without this setting, and
# we shouldn't force the makefile maintainer to figure out
# which system we are compiling for in order to pass an extra
# flag for every libtool invocation.
# allow_undefined=no
# FIXME: Unfortunately, there are problems with the above when trying
# to make a dll which has undefined symbols, in which case not
# even a static library is built. For now, we need to specify
# -no-undefined on the libtool link line when we can be certain
# that all symbols are satisfied, otherwise we get a static library.
allow_undefined=yes
;;
*)
allow_undefined=yes
;;
esac
libtool_args="$nonopt"
base_compile="$nonopt $@"
compile_command="$nonopt"
finalize_command="$nonopt"
compile_rpath=
finalize_rpath=
compile_shlibpath=
finalize_shlibpath=
convenience=
old_convenience=
deplibs=
old_deplibs=
compiler_flags=
linker_flags=
dllsearchpath=
lib_search_path=`pwd`
inst_prefix_dir=
avoid_version=no
dlfiles=
dlprefiles=
dlself=no
export_dynamic=no
export_symbols=
export_symbols_regex=
generated=
libobjs=
ltlibs=
module=no
no_install=no
objs=
non_pic_objects=
precious_files_regex=
prefer_static_libs=no
preload=no
prev=
prevarg=
release=
rpath=
xrpath=
perm_rpath=
temp_rpath=
thread_safe=no
vinfo=
vinfo_number=no
func_infer_tag $base_compile
# We need to know -static, to get the right output filenames.
for arg
do
case $arg in
-all-static | -static)
if test "X$arg" = "X-all-static"; then
if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
$echo "$modename: warning: complete static linking is impossible in this configuration" 1>&2
fi
if test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
else
if test -z "$pic_flag" && test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
fi
build_libtool_libs=no
build_old_libs=yes
prefer_static_libs=yes
break
;;
esac
done
# See if our shared archives depend on static archives.
test -n "$old_archive_from_new_cmds" && build_old_libs=yes
# Go through the arguments, transforming them on the way.
while test "$#" -gt 0; do
arg="$1"
shift
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test
;;
*) qarg=$arg ;;
esac
libtool_args="$libtool_args $qarg"
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
output)
compile_command="$compile_command @OUTPUT@"
finalize_command="$finalize_command @OUTPUT@"
;;
esac
case $prev in
dlfiles|dlprefiles)
if test "$preload" = no; then
# Add the symbol object into the linking commands.
compile_command="$compile_command @SYMFILE@"
finalize_command="$finalize_command @SYMFILE@"
preload=yes
fi
case $arg in
*.la | *.lo) ;; # We handle these cases below.
force)
if test "$dlself" = no; then
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
self)
if test "$prev" = dlprefiles; then
dlself=yes
elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
dlself=yes
else
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
*)
if test "$prev" = dlfiles; then
dlfiles="$dlfiles $arg"
else
dlprefiles="$dlprefiles $arg"
fi
prev=
continue
;;
esac
;;
expsyms)
export_symbols="$arg"
if test ! -f "$arg"; then
$echo "$modename: symbol file \`$arg' does not exist"
exit $EXIT_FAILURE
fi
prev=
continue
;;
expsyms_regex)
export_symbols_regex="$arg"
prev=
continue
;;
inst_prefix)
inst_prefix_dir="$arg"
prev=
continue
;;
precious_regex)
precious_files_regex="$arg"
prev=
continue
;;
release)
release="-$arg"
prev=
continue
;;
objectlist)
if test -f "$arg"; then
save_arg=$arg
moreargs=
for fil in `cat $save_arg`
do
# moreargs="$moreargs $fil"
arg=$fil
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
pic_object=
non_pic_object=
# Read the .lo file
# If there is no directory component, then add one.
case $arg in
*/* | *\\*) . $arg ;;
*) . ./$arg ;;
esac
if test -z "$pic_object" || \
test -z "$non_pic_object" ||
test "$pic_object" = none && \
test "$non_pic_object" = none; then
$echo "$modename: cannot find name of object for \`$arg'" 1>&2
exit $EXIT_FAILURE
fi
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
libobjs="$libobjs $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
non_pic_objects="$non_pic_objects $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
fi
else
# Only an error if not doing a dry-run.
if test -z "$run"; then
$echo "$modename: \`$arg' is not a valid libtool object" 1>&2
exit $EXIT_FAILURE
else
# Dry-run case.
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"`
non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"`
libobjs="$libobjs $pic_object"
non_pic_objects="$non_pic_objects $non_pic_object"
fi
fi
done
else
$echo "$modename: link input file \`$save_arg' does not exist"
exit $EXIT_FAILURE
fi
arg=$save_arg
prev=
continue
;;
rpath | xrpath)
# We need an absolute path.
case $arg in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit $EXIT_FAILURE
;;
esac
if test "$prev" = rpath; then
case "$rpath " in
*" $arg "*) ;;
*) rpath="$rpath $arg" ;;
esac
else
case "$xrpath " in
*" $arg "*) ;;
*) xrpath="$xrpath $arg" ;;
esac
fi
prev=
continue
;;
xcompiler)
compiler_flags="$compiler_flags $qarg"
prev=
compile_command="$compile_command $qarg"
finalize_command="$finalize_command $qarg"
continue
;;
xlinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $wl$qarg"
prev=
compile_command="$compile_command $wl$qarg"
finalize_command="$finalize_command $wl$qarg"
continue
;;
xcclinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $qarg"
prev=
compile_command="$compile_command $qarg"
finalize_command="$finalize_command $qarg"
continue
;;
shrext)
shrext_cmds="$arg"
prev=
continue
;;
*)
eval "$prev=\"\$arg\""
prev=
continue
;;
esac
fi # test -n "$prev"
prevarg="$arg"
case $arg in
-all-static)
if test -n "$link_static_flag"; then
compile_command="$compile_command $link_static_flag"
finalize_command="$finalize_command $link_static_flag"
fi
continue
;;
-allow-undefined)
# FIXME: remove this flag sometime in the future.
$echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2
continue
;;
-avoid-version)
avoid_version=yes
continue
;;
-dlopen)
prev=dlfiles
continue
;;
-dlpreopen)
prev=dlprefiles
continue
;;
-export-dynamic)
export_dynamic=yes
continue
;;
-export-symbols | -export-symbols-regex)
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: more than one -exported-symbols argument is not allowed"
exit $EXIT_FAILURE
fi
if test "X$arg" = "X-export-symbols"; then
prev=expsyms
else
prev=expsyms_regex
fi
continue
;;
-inst-prefix-dir)
prev=inst_prefix
continue
;;
# The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
# so, if we see these flags be careful not to treat them like -L
-L[A-Z][A-Z]*:*)
case $with_gcc/$host in
no/*-*-irix* | /*-*-irix*)
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
;;
esac
continue
;;
-L*)
dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2
exit $EXIT_FAILURE
fi
dir="$absdir"
;;
esac
case "$deplibs " in
*" -L$dir "*) ;;
*)
deplibs="$deplibs -L$dir"
lib_search_path="$lib_search_path $dir"
;;
esac
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$dir:"*) ;;
*) dllsearchpath="$dllsearchpath:$dir";;
esac
;;
esac
continue
;;
-l*)
if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
case $host in
*-*-cygwin* | *-*-pw32* | *-*-beos*)
# These systems don't actually have a C or math library (as such)
continue
;;
*-*-mingw* | *-*-os2*)
# These systems don't actually have a C library (as such)
test "X$arg" = "X-lc" && continue
;;
*-*-openbsd* | *-*-freebsd*)
# Do not include libc due to us having libc/libc_r.
test "X$arg" = "X-lc" && continue
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C and math libraries are in the System framework
deplibs="$deplibs -framework System"
continue
esac
elif test "X$arg" = "X-lc_r"; then
case $host in
*-*-openbsd* | *-*-freebsd*)
# Do not include libc_r directly, use -pthread flag.
continue
;;
esac
fi
deplibs="$deplibs $arg"
continue
;;
-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe)
deplibs="$deplibs $arg"
continue
;;
-module)
module=yes
continue
;;
# gcc -m* arguments should be passed to the linker via $compiler_flags
# in order to pass architecture information to the linker
# (e.g. 32 vs 64-bit). This may also be accomplished via -Wl,-mfoo
# but this is not reliable with gcc because gcc may use -mfoo to
# select a different linker, different libraries, etc, while
# -Wl,-mfoo simply passes -mfoo to the linker.
-m*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
if test "$with_gcc" = "yes" ; then
compiler_flags="$compiler_flags $arg"
fi
continue
;;
-shrext)
prev=shrext
continue
;;
-no-fast-install)
fast_install=no
continue
;;
-no-install)
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# The PATH hackery in wrapper scripts is required on Windows
# in order for the loader to find any dlls it needs.
$echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2
$echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2
fast_install=no
;;
*) no_install=yes ;;
esac
continue
;;
-no-undefined)
allow_undefined=no
continue
;;
-objectlist)
prev=objectlist
continue
;;
-o) prev=output ;;
-precious-files-regex)
prev=precious_regex
continue
;;
-release)
prev=release
continue
;;
-rpath)
prev=rpath
continue
;;
-R)
prev=xrpath
continue
;;
-R*)
dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit $EXIT_FAILURE
;;
esac
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
continue
;;
-static)
# The effects of -static are defined in a previous loop.
# We used to do the same as -all-static on platforms that
# didn't have a PIC flag, but the assumption that the effects
# would be equivalent was wrong. It would break on at least
# Digital Unix and AIX.
continue
;;
-thread-safe)
thread_safe=yes
continue
;;
-version-info)
prev=vinfo
continue
;;
-version-number)
prev=vinfo
vinfo_number=yes
continue
;;
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'`
arg=
save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Wl,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'`
arg=
save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $wl$flag"
linker_flags="$linker_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Xcompiler)
prev=xcompiler
continue
;;
-Xlinker)
prev=xlinker
continue
;;
-XCClinker)
prev=xcclinker
continue
;;
# Some other compiler flag.
-* | +*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
;;
*.$objext)
# A standard object.
objs="$objs $arg"
;;
*.lo)
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
pic_object=
non_pic_object=
# Read the .lo file
# If there is no directory component, then add one.
case $arg in
*/* | *\\*) . $arg ;;
*) . ./$arg ;;
esac
if test -z "$pic_object" || \
test -z "$non_pic_object" ||
test "$pic_object" = none && \
test "$non_pic_object" = none; then
$echo "$modename: cannot find name of object for \`$arg'" 1>&2
exit $EXIT_FAILURE
fi
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
libobjs="$libobjs $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
non_pic_objects="$non_pic_objects $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
fi
else
# Only an error if not doing a dry-run.
if test -z "$run"; then
$echo "$modename: \`$arg' is not a valid libtool object" 1>&2
exit $EXIT_FAILURE
else
# Dry-run case.
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"`
non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"`
libobjs="$libobjs $pic_object"
non_pic_objects="$non_pic_objects $non_pic_object"
fi
fi
;;
*.$libext)
# An archive.
deplibs="$deplibs $arg"
old_deplibs="$old_deplibs $arg"
continue
;;
*.la)
# A libtool-controlled library.
if test "$prev" = dlfiles; then
# This library was specified with -dlopen.
dlfiles="$dlfiles $arg"
prev=
elif test "$prev" = dlprefiles; then
# The library was specified with -dlpreopen.
dlprefiles="$dlprefiles $arg"
prev=
else
deplibs="$deplibs $arg"
fi
continue
;;
# Some other compiler argument.
*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
;;
esac # arg
# Now actually substitute the argument into the commands.
if test -n "$arg"; then
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
done # argument parsing loop
if test -n "$prev"; then
$echo "$modename: the \`$prevarg' option requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
eval arg=\"$export_dynamic_flag_spec\"
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
oldlibs=
# calculate the name of the file, without its directory
outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
libobjs_save="$libobjs"
if test -n "$shlibpath_var"; then
# get the directories listed in $shlibpath_var
eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
else
shlib_search_path=
fi
eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
if test "X$output_objdir" = "X$output"; then
output_objdir="$objdir"
else
output_objdir="$output_objdir/$objdir"
fi
# Create the object directory.
if test ! -d "$output_objdir"; then
$show "$mkdir $output_objdir"
$run $mkdir $output_objdir
status=$?
if test "$status" -ne 0 && test ! -d "$output_objdir"; then
exit $status
fi
fi
# Determine the type of output
case $output in
"")
$echo "$modename: you must specify an output file" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
*.$libext) linkmode=oldlib ;;
*.lo | *.$objext) linkmode=obj ;;
*.la) linkmode=lib ;;
*) linkmode=prog ;; # Anything else should be a program.
esac
case $host in
*cygwin* | *mingw* | *pw32*)
# don't eliminate duplications in $postdeps and $predeps
duplicate_compiler_generated_deps=yes
;;
*)
duplicate_compiler_generated_deps=$duplicate_deps
;;
esac
specialdeplibs=
libs=
# Find all interdependent deplibs by searching for libraries
# that are linked more than once (e.g. -la -lb -la)
for deplib in $deplibs; do
if test "X$duplicate_deps" = "Xyes" ; then
case "$libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
libs="$libs $deplib"
done
if test "$linkmode" = lib; then
libs="$predeps $libs $compiler_lib_search_path $postdeps"
# Compute libraries that are listed more than once in $predeps
# $postdeps and mark them as special (i.e., whose duplicates are
# not to be eliminated).
pre_post_deps=
if test "X$duplicate_compiler_generated_deps" = "Xyes" ; then
for pre_post_dep in $predeps $postdeps; do
case "$pre_post_deps " in
*" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;;
esac
pre_post_deps="$pre_post_deps $pre_post_dep"
done
fi
pre_post_deps=
fi
deplibs=
newdependency_libs=
newlib_search_path=
need_relink=no # whether we're linking any uninstalled libtool libraries
notinst_deplibs= # not-installed libtool libraries
notinst_path= # paths that contain not-installed libtool libraries
case $linkmode in
lib)
passes="conv link"
for file in $dlfiles $dlprefiles; do
case $file in
*.la) ;;
*)
$echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2
exit $EXIT_FAILURE
;;
esac
done
;;
prog)
compile_deplibs=
finalize_deplibs=
alldeplibs=no
newdlfiles=
newdlprefiles=
passes="conv scan dlopen dlpreopen link"
;;
*) passes="conv"
;;
esac
for pass in $passes; do
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan"; then
libs="$deplibs"
deplibs=
fi
if test "$linkmode" = prog; then
case $pass in
dlopen) libs="$dlfiles" ;;
dlpreopen) libs="$dlprefiles" ;;
link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
esac
fi
if test "$pass" = dlopen; then
# Collect dlpreopened libraries
save_deplibs="$deplibs"
deplibs=
fi
for deplib in $libs; do
lib=
found=no
case $deplib in
-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe)
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
fi
continue
;;
-l*)
if test "$linkmode" != lib && test "$linkmode" != prog; then
$echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2
continue
fi
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
for search_ext in .la $std_shrext .so .a; do
# Search the libtool library
lib="$searchdir/lib${name}${search_ext}"
if test -f "$lib"; then
if test "$search_ext" = ".la"; then
found=yes
else
found=no
fi
break 2
fi
done
done
if test "$found" != yes; then
# deplib doesn't seem to be a libtool library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
else # deplib is a libtool library
# If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
# We need to do some special things here, and not later.
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $deplib "*)
if (${SED} -e '2q' $lib |
grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
library_names=
old_library=
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
for l in $old_library $library_names; do
ll="$l"
done
if test "X$ll" = "X$old_library" ; then # only static version available
found=no
ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
test "X$ladir" = "X$lib" && ladir="."
lib=$ladir/$old_library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
fi
fi
;;
*) ;;
esac
fi
fi
;; # -l
-L*)
case $linkmode in
lib)
deplibs="$deplib $deplibs"
test "$pass" = conv && continue
newdependency_libs="$deplib $newdependency_libs"
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
;;
prog)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
if test "$pass" = scan; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
;;
*)
$echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2
;;
esac # linkmode
continue
;; # -L
-R*)
if test "$pass" = link; then
dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
# Make sure the xrpath contains only unique directories.
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
fi
deplibs="$deplib $deplibs"
continue
;;
*.la) lib="$deplib" ;;
*.$libext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
case $linkmode in
lib)
if test "$deplibs_check_method" != pass_all; then
$echo
$echo "*** Warning: Trying to link with static lib archive $deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because the file extensions .$libext of this argument makes me believe"
$echo "*** that it is just a static archive that I should not used here."
else
$echo
$echo "*** Warning: Linking the shared library $output against the"
$echo "*** static library $deplib is not portable!"
deplibs="$deplib $deplibs"
fi
continue
;;
prog)
if test "$pass" != link; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
continue
;;
esac # linkmode
;; # *.$libext
*.lo | *.$objext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
elif test "$linkmode" = prog; then
if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlopen support or we're linking statically,
# we need to preload.
newdlprefiles="$newdlprefiles $deplib"
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
newdlfiles="$newdlfiles $deplib"
fi
fi
continue
;;
%DEPLIBS%)
alldeplibs=yes
continue
;;
esac # case $deplib
if test "$found" = yes || test -f "$lib"; then :
else
$echo "$modename: cannot find the library \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
test "X$ladir" = "X$lib" && ladir="."
dlname=
dlopen=
dlpreopen=
libdir=
library_names=
old_library=
# If the library was installed with an old release of libtool,
# it will not redefine variables installed, or shouldnotlink
installed=yes
shouldnotlink=no
# Read the .la file
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan" ||
{ test "$linkmode" != prog && test "$linkmode" != lib; }; then
test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
fi
if test "$pass" = conv; then
# Only check for convenience libraries
deplibs="$lib $deplibs"
if test -z "$libdir"; then
if test -z "$old_library"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# It is a libtool convenience library, so add in its objects.
convenience="$convenience $ladir/$objdir/$old_library"
old_convenience="$old_convenience $ladir/$objdir/$old_library"
tmp_libs=
for deplib in $dependency_libs; do
deplibs="$deplib $deplibs"
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done
elif test "$linkmode" != prog && test "$linkmode" != lib; then
$echo "$modename: \`$lib' is not a convenience library" 1>&2
exit $EXIT_FAILURE
fi
continue
fi # $pass = conv
# Get the name of the library we link against.
linklib=
for l in $old_library $library_names; do
linklib="$l"
done
if test -z "$linklib"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# This library was specified with -dlopen.
if test "$pass" = dlopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
if test -z "$dlname" ||
test "$dlopen_support" != yes ||
test "$build_libtool_libs" = no; then
# If there is no dlname, no dlopen support or we're linking
# statically, we need to preload. We also need to preload any
# dependent libraries so libltdl's deplib preloader doesn't
# bomb out in the load deplibs phase.
dlprefiles="$dlprefiles $lib $dependency_libs"
else
newdlfiles="$newdlfiles $lib"
fi
continue
fi # $pass = dlopen
# We need an absolute path.
case $ladir in
[\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
*)
abs_ladir=`cd "$ladir" && pwd`
if test -z "$abs_ladir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2
$echo "$modename: passing it literally to the linker, although it might fail" 1>&2
abs_ladir="$ladir"
fi
;;
esac
laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
# Find the relevant object directory and library name.
if test "X$installed" = Xyes; then
if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
$echo "$modename: warning: library \`$lib' was moved." 1>&2
dir="$ladir"
absdir="$abs_ladir"
libdir="$abs_ladir"
else
dir="$libdir"
absdir="$libdir"
fi
else
dir="$ladir/$objdir"
absdir="$abs_ladir/$objdir"
# Remove this search path later
notinst_path="$notinst_path $abs_ladir"
fi # $installed = yes
name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
# This library was specified with -dlpreopen.
if test "$pass" = dlpreopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# Prefer using a static library (so that no silly _DYNAMIC symbols
# are required to link).
if test -n "$old_library"; then
newdlprefiles="$newdlprefiles $dir/$old_library"
# Otherwise, use the dlname, so that lt_dlopen finds it.
elif test -n "$dlname"; then
newdlprefiles="$newdlprefiles $dir/$dlname"
else
newdlprefiles="$newdlprefiles $dir/$linklib"
fi
fi # $pass = dlpreopen
if test -z "$libdir"; then
# Link the convenience library
if test "$linkmode" = lib; then
deplibs="$dir/$old_library $deplibs"
elif test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$dir/$old_library $compile_deplibs"
finalize_deplibs="$dir/$old_library $finalize_deplibs"
else
deplibs="$lib $deplibs" # used for prog,scan pass
fi
continue
fi
if test "$linkmode" = prog && test "$pass" != link; then
newlib_search_path="$newlib_search_path $ladir"
deplibs="$lib $deplibs"
linkalldeplibs=no
if test "$link_all_deplibs" != no || test -z "$library_names" ||
test "$build_libtool_libs" = no; then
linkalldeplibs=yes
fi
tmp_libs=
for deplib in $dependency_libs; do
case $deplib in
-L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test
esac
# Need to link against all dependency_libs?
if test "$linkalldeplibs" = yes; then
deplibs="$deplib $deplibs"
else
# Need to hardcode shared library paths
# or/and link against static libraries
newdependency_libs="$deplib $newdependency_libs"
fi
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done # for deplib
continue
fi # $linkmode = prog...
if test "$linkmode,$pass" = "prog,link"; then
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
# We need to hardcode the library path
if test -n "$shlibpath_var"; then
# Make sure the rpath contains only unique directories.
case "$temp_rpath " in
*" $dir "*) ;;
*" $absdir "*) ;;
*) temp_rpath="$temp_rpath $dir" ;;
esac
fi
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi # $linkmode,$pass = prog,link...
if test "$alldeplibs" = yes &&
{ test "$deplibs_check_method" = pass_all ||
{ test "$build_libtool_libs" = yes &&
test -n "$library_names"; }; }; then
# We only need to search for static libraries
continue
fi
fi
link_static=no # Whether the deplib will be linked statically
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
if test "$installed" = no; then
notinst_deplibs="$notinst_deplibs $lib"
need_relink=yes
fi
# This is a shared library
# Warn about portability, can't link against -module's on
# some systems (darwin)
if test "$shouldnotlink" = yes && test "$pass" = link ; then
$echo
if test "$linkmode" = prog; then
$echo "*** Warning: Linking the executable $output against the loadable module"
else
$echo "*** Warning: Linking the shared library $output against the loadable module"
fi
$echo "*** $linklib is not portable!"
fi
if test "$linkmode" = lib &&
test "$hardcode_into_libs" = yes; then
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi
if test -n "$old_archive_from_expsyms_cmds"; then
# figure out the soname
set dummy $library_names
realname="$2"
shift; shift
libname=`eval \\$echo \"$libname_spec\"`
# use dlname if we got it. it's perfectly good, no?
if test -n "$dlname"; then
soname="$dlname"
elif test -n "$soname_spec"; then
# bleh windows
case $host in
*cygwin* | mingw*)
major=`expr $current - $age`
versuffix="-$major"
;;
esac
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
# Make a new name for the extract_expsyms_cmds to use
soroot="$soname"
soname=`$echo $soroot | ${SED} -e 's/^.*\///'`
newlib="libimp-`$echo $soname | ${SED} 's/^lib//;s/\.dll$//'`.a"
# If the library has no export list, then create one now
if test -f "$output_objdir/$soname-def"; then :
else
$show "extracting exported symbol list from \`$soname'"
save_ifs="$IFS"; IFS='~'
cmds=$extract_expsyms_cmds
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Create $newlib
if test -f "$output_objdir/$newlib"; then :; else
$show "generating import library for \`$soname'"
save_ifs="$IFS"; IFS='~'
cmds=$old_archive_from_expsyms_cmds
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# make sure the library variables are pointing to the new library
dir=$output_objdir
linklib=$newlib
fi # test -n "$old_archive_from_expsyms_cmds"
if test "$linkmode" = prog || test "$mode" != relink; then
add_shlibpath=
add_dir=
add=
lib_linked=yes
case $hardcode_action in
immediate | unsupported)
if test "$hardcode_direct" = no; then
add="$dir/$linklib"
case $host in
*-*-sco3.2v5* ) add_dir="-L$dir" ;;
*-*-darwin* )
# if the lib is a module then we can not link against
# it, someone is ignoring the new warnings I added
if /usr/bin/file -L $add 2> /dev/null | $EGREP "bundle" >/dev/null ; then
$echo "** Warning, lib $linklib is a module, not a shared library"
if test -z "$old_library" ; then
$echo
$echo "** And there doesn't seem to be a static archive available"
$echo "** The link will probably fail, sorry"
else
add="$dir/$old_library"
fi
fi
esac
elif test "$hardcode_minus_L" = no; then
case $host in
*-*-sunos*) add_shlibpath="$dir" ;;
esac
add_dir="-L$dir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = no; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
relink)
if test "$hardcode_direct" = yes; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$dir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case "$libdir" in
[\\/]*)
add_dir="$add_dir -L$inst_prefix_dir$libdir"
;;
esac
fi
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
*) lib_linked=no ;;
esac
if test "$lib_linked" != yes; then
$echo "$modename: configuration error: unsupported hardcode properties"
exit $EXIT_FAILURE
fi
if test -n "$add_shlibpath"; then
case :$compile_shlibpath: in
*":$add_shlibpath:"*) ;;
*) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
esac
fi
if test "$linkmode" = prog; then
test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
test -n "$add" && compile_deplibs="$add $compile_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
if test "$hardcode_direct" != yes && \
test "$hardcode_minus_L" != yes && \
test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
fi
fi
fi
if test "$linkmode" = prog || test "$mode" = relink; then
add_shlibpath=
add_dir=
add=
# Finalize command for both is simple: just hardcode it.
if test "$hardcode_direct" = yes; then
add="$libdir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$libdir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
add="-l$name"
elif test "$hardcode_automatic" = yes; then
if test -n "$inst_prefix_dir" &&
test -f "$inst_prefix_dir$libdir/$linklib" ; then
add="$inst_prefix_dir$libdir/$linklib"
else
add="$libdir/$linklib"
fi
else
# We cannot seem to hardcode it, guess we'll fake it.
add_dir="-L$libdir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case "$libdir" in
[\\/]*)
add_dir="$add_dir -L$inst_prefix_dir$libdir"
;;
esac
fi
add="-l$name"
fi
if test "$linkmode" = prog; then
test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
fi
fi
elif test "$linkmode" = prog; then
# Here we assume that one of hardcode_direct or hardcode_minus_L
# is not unsupported. This is valid on all known static and
# shared platforms.
if test "$hardcode_direct" != unsupported; then
test -n "$old_library" && linklib="$old_library"
compile_deplibs="$dir/$linklib $compile_deplibs"
finalize_deplibs="$dir/$linklib $finalize_deplibs"
else
compile_deplibs="-l$name -L$dir $compile_deplibs"
finalize_deplibs="-l$name -L$dir $finalize_deplibs"
fi
elif test "$build_libtool_libs" = yes; then
# Not a shared library
if test "$deplibs_check_method" != pass_all; then
# We're trying link a shared library against a static one
# but the system doesn't support it.
# Just print a warning and add the library to dependency_libs so
# that the program can be linked against the static library.
$echo
$echo "*** Warning: This system can not link to static lib archive $lib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have."
if test "$module" = yes; then
$echo "*** But as you try to build a module library, libtool will still create "
$echo "*** a static module, that should work as long as the dlopening application"
$echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
if test -z "$global_symbol_pipe"; then
$echo
$echo "*** However, this would only work if libtool was able to extract symbol"
$echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
$echo "*** not find such a program. So, this module is probably useless."
$echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
else
convenience="$convenience $dir/$old_library"
old_convenience="$old_convenience $dir/$old_library"
deplibs="$dir/$old_library $deplibs"
link_static=yes
fi
fi # link shared/static library?
if test "$linkmode" = lib; then
if test -n "$dependency_libs" &&
{ test "$hardcode_into_libs" != yes ||
test "$build_old_libs" = yes ||
test "$link_static" = yes; }; then
# Extract -R from dependency_libs
temp_deplibs=
for libdir in $dependency_libs; do
case $libdir in
-R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'`
case " $xrpath " in
*" $temp_xrpath "*) ;;
*) xrpath="$xrpath $temp_xrpath";;
esac;;
*) temp_deplibs="$temp_deplibs $libdir";;
esac
done
dependency_libs="$temp_deplibs"
fi
newlib_search_path="$newlib_search_path $absdir"
# Link against this library
test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
# ... and its dependency_libs
tmp_libs=
for deplib in $dependency_libs; do
newdependency_libs="$deplib $newdependency_libs"
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done
if test "$link_all_deplibs" != no; then
# Add the search paths of all dependency libraries
for deplib in $dependency_libs; do
case $deplib in
-L*) path="$deplib" ;;
*.la)
dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$deplib" && dir="."
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
absdir="$dir"
fi
;;
esac
if grep "^installed=no" $deplib > /dev/null; then
path="$absdir/$objdir"
else
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
if test "$absdir" != "$libdir"; then
$echo "$modename: warning: \`$deplib' seems to be moved" 1>&2
fi
path="$absdir"
fi
depdepl=
case $host in
*-*-darwin*)
# we do not want to link against static libs,
# but need to link against shared
eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
if test -n "$deplibrary_names" ; then
for tmp in $deplibrary_names ; do
depdepl=$tmp
done
if test -f "$path/$depdepl" ; then
depdepl="$path/$depdepl"
fi
# do not add paths which are already there
case " $newlib_search_path " in
*" $path "*) ;;
*) newlib_search_path="$newlib_search_path $path";;
esac
fi
path=""
;;
*)
path="-L$path"
;;
esac
;;
-l*)
case $host in
*-*-darwin*)
# Again, we only want to link against shared libraries
eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"`
for tmp in $newlib_search_path ; do
if test -f "$tmp/lib$tmp_libs.dylib" ; then
eval depdepl="$tmp/lib$tmp_libs.dylib"
break
fi
done
path=""
;;
*) continue ;;
esac
;;
*) continue ;;
esac
case " $deplibs " in
*" $depdepl "*) ;;
*) deplibs="$depdepl $deplibs" ;;
esac
case " $deplibs " in
*" $path "*) ;;
*) deplibs="$deplibs $path" ;;
esac
done
fi # link_all_deplibs != no
fi # linkmode = lib
done # for deplib in $libs
dependency_libs="$newdependency_libs"
if test "$pass" = dlpreopen; then
# Link the dlpreopened libraries before other libraries
for deplib in $save_deplibs; do
deplibs="$deplib $deplibs"
done
fi
if test "$pass" != dlopen; then
if test "$pass" != conv; then
# Make sure lib_search_path contains only unique directories.
lib_search_path=
for dir in $newlib_search_path; do
case "$lib_search_path " in
*" $dir "*) ;;
*) lib_search_path="$lib_search_path $dir" ;;
esac
done
newlib_search_path=
fi
if test "$linkmode,$pass" != "prog,link"; then
vars="deplibs"
else
vars="compile_deplibs finalize_deplibs"
fi
for var in $vars dependency_libs; do
# Add libraries to $var in reverse order
eval tmp_libs=\"\$$var\"
new_libs=
for deplib in $tmp_libs; do
# FIXME: Pedantically, this is the right thing to do, so
# that some nasty dependency loop isn't accidentally
# broken:
#new_libs="$deplib $new_libs"
# Pragmatically, this seems to cause very few problems in
# practice:
case $deplib in
-L*) new_libs="$deplib $new_libs" ;;
-R*) ;;
*)
# And here is the reason: when a library appears more
# than once as an explicit dependence of a library, or
# is implicitly linked in more than once by the
# compiler, it is considered special, and multiple
# occurrences thereof are not removed. Compare this
# with having the same library being listed as a
# dependency of multiple other libraries: in this case,
# we know (pedantically, we assume) the library does not
# need to be listed more than once, so we keep only the
# last copy. This is not always right, but it is rare
# enough that we require users that really mean to play
# such unportable linking tricks to link the library
# using -Wl,-lname, so that libtool does not consider it
# for duplicate removal.
case " $specialdeplibs " in
*" $deplib "*) new_libs="$deplib $new_libs" ;;
*)
case " $new_libs " in
*" $deplib "*) ;;
*) new_libs="$deplib $new_libs" ;;
esac
;;
esac
;;
esac
done
tmp_libs=
for deplib in $new_libs; do
case $deplib in
-L*)
case " $tmp_libs " in
*" $deplib "*) ;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
done
eval $var=\"$tmp_libs\"
done # for var
fi
# Last step: remove runtime libs from dependency_libs
# (they stay in deplibs)
tmp_libs=
for i in $dependency_libs ; do
case " $predeps $postdeps $compiler_lib_search_path " in
*" $i "*)
i=""
;;
esac
if test -n "$i" ; then
tmp_libs="$tmp_libs $i"
fi
done
dependency_libs=$tmp_libs
done # for pass
if test "$linkmode" = prog; then
dlfiles="$newdlfiles"
dlprefiles="$newdlprefiles"
fi
case $linkmode in
oldlib)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for archives" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info/-version-number' is ignored for archives" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for archives" 1>&2
fi
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2
fi
# Now set the variables for building old libraries.
build_libtool_libs=no
oldlibs="$output"
objs="$objs$old_deplibs"
;;
lib)
# Make sure we only generate libraries of the form `libNAME.la'.
case $outputname in
lib*)
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
eval shared_ext=\"$shrext_cmds\"
eval libname=\"$libname_spec\"
;;
*)
if test "$module" = no; then
$echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test "$need_lib_prefix" != no; then
# Add the "lib" prefix for modules if required
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
eval shared_ext=\"$shrext_cmds\"
eval libname=\"$libname_spec\"
else
libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
fi
;;
esac
if test -n "$objs"; then
if test "$deplibs_check_method" != pass_all; then
$echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1
exit $EXIT_FAILURE
else
$echo
$echo "*** Warning: Linking the shared library $output against the non-libtool"
$echo "*** objects $objs is not portable!"
libobjs="$libobjs $objs"
fi
fi
if test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2
fi
set dummy $rpath
if test "$#" -gt 2; then
$echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2
fi
install_libdir="$2"
oldlibs=
if test -z "$rpath"; then
if test "$build_libtool_libs" = yes; then
# Building a libtool convenience library.
# Some compilers have problems with a `.al' extension so
# convenience libraries should have the same extension an
# archive normally would.
oldlibs="$output_objdir/$libname.$libext $oldlibs"
build_libtool_libs=convenience
build_old_libs=yes
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info/-version-number' is ignored for convenience libraries" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2
fi
else
# Parse the version information argument.
save_ifs="$IFS"; IFS=':'
set dummy $vinfo 0 0 0
IFS="$save_ifs"
if test -n "$8"; then
$echo "$modename: too many parameters to \`-version-info'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# convert absolute version numbers to libtool ages
# this retains compatibility with .la files and attempts
# to make the code below a bit more comprehensible
case $vinfo_number in
yes)
number_major="$2"
number_minor="$3"
number_revision="$4"
#
# There are really only two kinds -- those that
# use the current revision as the major version
# and those that subtract age and use age as
# a minor version. But, then there is irix
# which has an extra 1 added just for fun
#
case $version_type in
darwin|linux|osf|windows)
current=`expr $number_major + $number_minor`
age="$number_minor"
revision="$number_revision"
;;
freebsd-aout|freebsd-elf|sunos)
current="$number_major"
revision="$number_minor"
age="0"
;;
irix|nonstopux)
current=`expr $number_major + $number_minor - 1`
age="$number_minor"
revision="$number_minor"
;;
esac
;;
no)
current="$2"
revision="$3"
age="$4"
;;
esac
# Check that each of the things are valid numbers.
case $current in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
case $revision in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
case $age in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
if test "$age" -gt "$current"; then
$echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
fi
# Calculate the version variables.
major=
versuffix=
verstring=
case $version_type in
none) ;;
darwin)
# Like Linux, but with the current version available in
# verstring for coding it into the library header
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
# Darwin ld doesn't like 0 for these options...
minor_current=`expr $current + 1`
verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
;;
freebsd-aout)
major=".$current"
versuffix=".$current.$revision";
;;
freebsd-elf)
major=".$current"
versuffix=".$current";
;;
irix | nonstopux)
major=`expr $current - $age + 1`
case $version_type in
nonstopux) verstring_prefix=nonstopux ;;
*) verstring_prefix=sgi ;;
esac
verstring="$verstring_prefix$major.$revision"
# Add in all the interfaces that we are compatible with.
loop=$revision
while test "$loop" -ne 0; do
iface=`expr $revision - $loop`
loop=`expr $loop - 1`
verstring="$verstring_prefix$major.$iface:$verstring"
done
# Before this point, $major must not contain `.'.
major=.$major
versuffix="$major.$revision"
;;
linux)
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
;;
osf)
major=.`expr $current - $age`
versuffix=".$current.$age.$revision"
verstring="$current.$age.$revision"
# Add in all the interfaces that we are compatible with.
loop=$age
while test "$loop" -ne 0; do
iface=`expr $current - $loop`
loop=`expr $loop - 1`
verstring="$verstring:${iface}.0"
done
# Make executables depend on our current version.
verstring="$verstring:${current}.0"
;;
sunos)
major=".$current"
versuffix=".$current.$revision"
;;
windows)
# Use '-' rather than '.', since we only want one
# extension on DOS 8.3 filesystems.
major=`expr $current - $age`
versuffix="-$major"
;;
*)
$echo "$modename: unknown library version type \`$version_type'" 1>&2
$echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit $EXIT_FAILURE
;;
esac
# Clear the version info if we defaulted, and they specified a release.
if test -z "$vinfo" && test -n "$release"; then
major=
case $version_type in
darwin)
# we can't check for "0.0" in archive_cmds due to quoting
# problems, so we reset it completely
verstring=
;;
*)
verstring="0.0"
;;
esac
if test "$need_version" = no; then
versuffix=
else
versuffix=".0.0"
fi
fi
# Remove version info from name if versioning should be avoided
if test "$avoid_version" = yes && test "$need_version" = no; then
major=
versuffix=
verstring=""
fi
# Check to see if the archive will have undefined symbols.
if test "$allow_undefined" = yes; then
if test "$allow_undefined_flag" = unsupported; then
$echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2
build_libtool_libs=no
build_old_libs=yes
fi
else
# Don't allow undefined symbols.
allow_undefined_flag="$no_undefined_flag"
fi
fi
if test "$mode" != relink; then
# Remove our outputs, but don't remove object files since they
# may have been created when compiling PIC objects.
removelist=
tempremovelist=`$echo "$output_objdir/*"`
for p in $tempremovelist; do
case $p in
*.$objext)
;;
$output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*)
if test "X$precious_files_regex" != "X"; then
if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
then
continue
fi
fi
removelist="$removelist $p"
;;
*) ;;
esac
done
if test -n "$removelist"; then
$show "${rm}r $removelist"
$run ${rm}r $removelist
fi
fi
# Now set the variables for building old libraries.
if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
oldlibs="$oldlibs $output_objdir/$libname.$libext"
# Transform .lo files to .o files.
oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
fi
# Eliminate all temporary directories.
for path in $notinst_path; do
lib_search_path=`$echo "$lib_search_path " | ${SED} -e 's% $path % %g'`
deplibs=`$echo "$deplibs " | ${SED} -e 's% -L$path % %g'`
dependency_libs=`$echo "$dependency_libs " | ${SED} -e 's% -L$path % %g'`
done
if test -n "$xrpath"; then
# If the user specified any rpath flags, then add them.
temp_xrpath=
for libdir in $xrpath; do
temp_xrpath="$temp_xrpath -R$libdir"
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then
dependency_libs="$temp_xrpath $dependency_libs"
fi
fi
# Make sure dlfiles contains only unique files that won't be dlpreopened
old_dlfiles="$dlfiles"
dlfiles=
for lib in $old_dlfiles; do
case " $dlprefiles $dlfiles " in
*" $lib "*) ;;
*) dlfiles="$dlfiles $lib" ;;
esac
done
# Make sure dlprefiles contains only unique files
old_dlprefiles="$dlprefiles"
dlprefiles=
for lib in $old_dlprefiles; do
case "$dlprefiles " in
*" $lib "*) ;;
*) dlprefiles="$dlprefiles $lib" ;;
esac
done
if test "$build_libtool_libs" = yes; then
if test -n "$rpath"; then
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
# these systems don't actually have a c library (as such)!
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C library is in the System framework
deplibs="$deplibs -framework System"
;;
*-*-netbsd*)
# Don't link with libc until the a.out ld.so is fixed.
;;
*-*-openbsd* | *-*-freebsd*)
# Do not include libc due to us having libc/libc_r.
test "X$arg" = "X-lc" && continue
;;
*)
# Add libc to deplibs on all other systems if necessary.
if test "$build_libtool_need_lc" = "yes"; then
deplibs="$deplibs -lc"
fi
;;
esac
fi
# Transform deplibs into only deplibs that can be linked in shared.
name_save=$name
libname_save=$libname
release_save=$release
versuffix_save=$versuffix
major_save=$major
# I'm not sure if I'm treating the release correctly. I think
# release should show up in the -l (ie -lgmp5) so we don't want to
# add it in twice. Is that correct?
release=""
versuffix=""
major=""
newdeplibs=
droppeddeps=no
case $deplibs_check_method in
pass_all)
# Don't check for shared/static. Everything works.
# This might be a little naive. We might want to check
# whether the library exists or not. But this is on
# osf3 & osf4 and I'm not really sure... Just
# implementing what was already the behavior.
newdeplibs=$deplibs
;;
test_compile)
# This code stresses the "libraries are programs" paradigm to its
# limits. Maybe even breaks it. We compile a program, linking it
# against the deplibs as a proxy for the library. Then we can check
# whether they linked in statically or dynamically with ldd.
$rm conftest.c
cat > conftest.c <<EOF
int main() { return 0; }
EOF
$rm conftest
$LTCC -o conftest conftest.c $deplibs
if test "$?" -eq 0 ; then
ldd_output=`ldd conftest`
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" -ne "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
newdeplibs="$newdeplibs $i"
i=""
;;
esac
fi
if test -n "$i" ; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
$echo
$echo "*** Warning: dynamic linker does not accept needed library $i."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which I believe you do not have"
$echo "*** because a test_compile did reveal that the linker did not use it for"
$echo "*** its dynamic dependency list that programs get resolved with at runtime."
fi
fi
else
newdeplibs="$newdeplibs $i"
fi
done
else
# Error occurred in the first compile. Let's try to salvage
# the situation: Compile a separate program for each library.
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" != "0"; then
$rm conftest
$LTCC -o conftest conftest.c $i
# Did it work?
if test "$?" -eq 0 ; then
ldd_output=`ldd conftest`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
newdeplibs="$newdeplibs $i"
i=""
;;
esac
fi
if test -n "$i" ; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
$echo
$echo "*** Warning: dynamic linker does not accept needed library $i."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because a test_compile did reveal that the linker did not use this one"
$echo "*** as a dynamic dependency that programs can get resolved with at runtime."
fi
fi
else
droppeddeps=yes
$echo
$echo "*** Warning! Library $i is needed by this library but I was not able to"
$echo "*** make it link in! You will probably need to install it or some"
$echo "*** library that it depends on before this library will be fully"
$echo "*** functional. Installing it before continuing would be even better."
fi
else
newdeplibs="$newdeplibs $i"
fi
done
fi
;;
file_magic*)
set dummy $deplibs_check_method
file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" != "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
# Follow soft links.
if ls -lLd "$potent_lib" 2>/dev/null \
| grep " -> " >/dev/null; then
continue
fi
# The statement above tries to avoid entering an
# endless loop below, in case of cyclic links.
# We might still enter an endless loop, since a link
# loop can be closed while we follow links,
# but so what?
potlib="$potent_lib"
while test -h "$potlib" 2>/dev/null; do
potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'`
case $potliblink in
[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
*) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
esac
done
if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \
| ${SED} 10q \
| $EGREP "$file_magic_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
$echo
$echo "*** Warning: linker path does not have real file for library $a_deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$echo "*** with $libname but no candidates were found. (...for file magic test)"
else
$echo "*** with $libname and none of the candidates passed a file format test"
$echo "*** using a file magic. Last file checked: $potlib"
fi
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
match_pattern*)
set dummy $deplibs_check_method
match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test -n "$name" && test "$name" != "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
potlib="$potent_lib" # see symlink-check above in file_magic test
if eval $echo \"$potent_lib\" 2>/dev/null \
| ${SED} 10q \
| $EGREP "$match_pattern_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
$echo
$echo "*** Warning: linker path does not have real file for library $a_deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$echo "*** with $libname but no candidates were found. (...for regex pattern test)"
else
$echo "*** with $libname and none of the candidates passed a file format test"
$echo "*** using a regex pattern. Last file checked: $potlib"
fi
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
none | unknown | *)
newdeplibs=""
tmp_deplibs=`$echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
-e 's/ -[LR][^ ]*//g'`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
for i in $predeps $postdeps ; do
# can't use Xsed below, because $i might contain '/'
tmp_deplibs=`$echo "X $tmp_deplibs" | ${SED} -e "1s,^X,," -e "s,$i,,"`
done
fi
if $echo "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' \
| grep . >/dev/null; then
$echo
if test "X$deplibs_check_method" = "Xnone"; then
$echo "*** Warning: inter-library dependencies are not supported in this platform."
else
$echo "*** Warning: inter-library dependencies are not known to be supported."
fi
$echo "*** All declared inter-library dependencies are being dropped."
droppeddeps=yes
fi
;;
esac
versuffix=$versuffix_save
major=$major_save
release=$release_save
libname=$libname_save
name=$name_save
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'`
;;
esac
if test "$droppeddeps" = yes; then
if test "$module" = yes; then
$echo
$echo "*** Warning: libtool could not satisfy all declared inter-library"
$echo "*** dependencies of module $libname. Therefore, libtool will create"
$echo "*** a static module, that should work as long as the dlopening"
$echo "*** application is linked with the -dlopen flag."
if test -z "$global_symbol_pipe"; then
$echo
$echo "*** However, this would only work if libtool was able to extract symbol"
$echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
$echo "*** not find such a program. So, this module is probably useless."
$echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
else
$echo "*** The inter-library dependencies that have been dropped here will be"
$echo "*** automatically added whenever a program is linked with this library"
$echo "*** or is declared to -dlopen it."
if test "$allow_undefined" = no; then
$echo
$echo "*** Since this library must not contain undefined symbols,"
$echo "*** because either the platform does not support them or"
$echo "*** it was explicitly requested with -no-undefined,"
$echo "*** libtool will only create a static version of it."
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
fi
fi
# Done checking deplibs!
deplibs=$newdeplibs
fi
# All the library-specific variables (install_libdir is set above).
library_names=
old_library=
dlname=
# Test again, we may have decided not to build it any more
if test "$build_libtool_libs" = yes; then
if test "$hardcode_into_libs" = yes; then
# Hardcode the library paths
hardcode_libdirs=
dep_rpath=
rpath="$finalize_rpath"
test "$mode" != relink && rpath="$compile_rpath$rpath"
for libdir in $rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
dep_rpath="$dep_rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
if test -n "$hardcode_libdir_flag_spec_ld"; then
eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\"
else
eval dep_rpath=\"$hardcode_libdir_flag_spec\"
fi
fi
if test -n "$runpath_var" && test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
fi
test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
fi
shlibpath="$finalize_shlibpath"
test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
if test -n "$shlibpath"; then
eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
fi
# Get the real and link names of the library.
eval shared_ext=\"$shrext_cmds\"
eval library_names=\"$library_names_spec\"
set dummy $library_names
realname="$2"
shift; shift
if test -n "$soname_spec"; then
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
if test -z "$dlname"; then
dlname=$soname
fi
lib="$output_objdir/$realname"
for link
do
linknames="$linknames $link"
done
# Use standard objects if they are pic
test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
$show "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
cmds=$export_symbols_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
if len=`expr "X$cmd" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
$show "$cmd"
$run eval "$cmd" || exit $?
skipped_export=false
else
# The command line is too long to execute in one step.
$show "using reloadable object file for export list..."
skipped_export=:
fi
done
IFS="$save_ifs"
if test -n "$export_symbols_regex"; then
$show "$EGREP -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\""
$run eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
$show "$mv \"${export_symbols}T\" \"$export_symbols\""
$run eval '$mv "${export_symbols}T" "$export_symbols"'
fi
fi
fi
if test -n "$export_symbols" && test -n "$include_expsyms"; then
$run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"'
fi
tmp_deplibs=
for test_deplib in $deplibs; do
case " $convenience " in
*" $test_deplib "*) ;;
*)
tmp_deplibs="$tmp_deplibs $test_deplib"
;;
esac
done
deplibs="$tmp_deplibs"
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${outputname}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test "$status" -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
for xlib in $convenience; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test "$status" -ne 0 && test ! -d "$xdir"; then
exit $status
fi
# We will extract separately just the conflicting names and we will no
# longer touch any unique names. It is faster to leave these extract
# automatically by $AR in one run.
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: warning: object name conflicts; renaming object files" 1>&2
$echo "$modename: warning: to ensure that they will not overwrite" 1>&2
$AR t "$xabs" | sort | uniq -cd | while read -r count name
do
i=1
while test "$i" -le "$count"
do
# Put our $i before any first dot (extension)
# Never overwrite any file
name_to="$name"
while test "X$name_to" = "X$name" || test -f "$xdir/$name_to"
do
name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"`
done
$show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')"
$run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $?
i=`expr $i + 1`
done
done
fi
libobjs="$libobjs "`find $xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
done
fi
fi
if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
eval flag=\"$thread_safe_flag_spec\"
linker_flags="$linker_flags $flag"
fi
# Make a backup of the uninstalled library when relinking
if test "$mode" = relink; then
$run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $?
fi
# Do each of the archive commands.
if test "$module" = yes && test -n "$module_cmds" ; then
if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
eval test_cmds=\"$module_expsym_cmds\"
cmds=$module_expsym_cmds
else
eval test_cmds=\"$module_cmds\"
cmds=$module_cmds
fi
else
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
eval test_cmds=\"$archive_expsym_cmds\"
cmds=$archive_expsym_cmds
else
eval test_cmds=\"$archive_cmds\"
cmds=$archive_cmds
fi
fi
if test "X$skipped_export" != "X:" && len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
:
else
# The command line is too long to link in one step, link piecewise.
$echo "creating reloadable object files..."
# Save the value of $output and $libobjs because we want to
# use them later. If we have whole_archive_flag_spec, we
# want to use save_libobjs as it was before
# whole_archive_flag_spec was expanded, because we can't
# assume the linker understands whole_archive_flag_spec.
# This may have to be revisited, in case too many
# convenience libraries get linked in and end up exceeding
# the spec.
if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
fi
save_output=$output
# Clear the reloadable object creation command queue and
# initialize k to one.
test_cmds=
concat_cmds=
objlist=
delfiles=
last_robj=
k=1
output=$output_objdir/$save_output-${k}.$objext
# Loop over the list of objects to be linked.
for obj in $save_libobjs
do
eval test_cmds=\"$reload_cmds $objlist $last_robj\"
if test "X$objlist" = X ||
{ len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len"; }; then
objlist="$objlist $obj"
else
# The command $test_cmds is almost too long, add a
# command to the queue.
if test "$k" -eq 1 ; then
# The first file doesn't have a previous command to add.
eval concat_cmds=\"$reload_cmds $objlist $last_robj\"
else
# All subsequent reloadable object files will link in
# the last one created.
eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\"
fi
last_robj=$output_objdir/$save_output-${k}.$objext
k=`expr $k + 1`
output=$output_objdir/$save_output-${k}.$objext
objlist=$obj
len=1
fi
done
# Handle the remaining objects by creating one last
# reloadable object file. All subsequent reloadable object
# files will link in the last one created.
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\"
if ${skipped_export-false}; then
$show "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
libobjs=$output
# Append the command to create the export file.
eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\"
fi
# Set up a command to remove the reloadale object files
# after they are used.
i=0
while test "$i" -lt "$k"
do
i=`expr $i + 1`
delfiles="$delfiles $output_objdir/$save_output-${i}.$objext"
done
$echo "creating a temporary reloadable object file: $output"
# Loop through the commands generated above and execute them.
save_ifs="$IFS"; IFS='~'
for cmd in $concat_cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
libobjs=$output
# Restore the value of output.
output=$save_output
if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
fi
# Expand the library linking commands again to reset the
# value of $libobjs for piecewise linking.
# Do each of the archive commands.
if test "$module" = yes && test -n "$module_cmds" ; then
if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
cmds=$module_expsym_cmds
else
cmds=$module_cmds
fi
else
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
cmds=$archive_expsym_cmds
else
cmds=$archive_cmds
fi
fi
# Append the command to remove the reloadable object files
# to the just-reset $cmds.
eval cmds=\"\$cmds~\$rm $delfiles\"
fi
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Restore the uninstalled library and exit
if test "$mode" = relink; then
$run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
exit $EXIT_SUCCESS
fi
# Create links to the real library.
for linkname in $linknames; do
if test "$realname" != "$linkname"; then
$show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $?
fi
done
# If -module or -export-dynamic was specified, set the dlname.
if test "$module" = yes || test "$export_dynamic" = yes; then
# On all known operating systems, these are identical.
dlname="$soname"
fi
fi
;;
obj)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for objects" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for objects" 1>&2
fi
case $output in
*.lo)
if test -n "$objs$old_deplibs"; then
$echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
exit $EXIT_FAILURE
fi
libobj="$output"
obj=`$echo "X$output" | $Xsed -e "$lo2o"`
;;
*)
libobj=
obj="$output"
;;
esac
# Delete the old objects.
$run $rm $obj $libobj
# Objects from convenience libraries. This assumes
# single-version convenience libraries. Whenever we create
# different ones for PIC/non-PIC, this we'll have to duplicate
# the extraction.
reload_conv_objs=
gentop=
# reload_cmds runs $LD directly, so let us get rid of
# -Wl from whole_archive_flag_spec
wl=
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${obj}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test "$status" -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
for xlib in $convenience; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test "$status" -ne 0 && test ! -d "$xdir"; then
exit $status
fi
# We will extract separately just the conflicting names and we will no
# longer touch any unique names. It is faster to leave these extract
# automatically by $AR in one run.
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: warning: object name conflicts; renaming object files" 1>&2
$echo "$modename: warning: to ensure that they will not overwrite" 1>&2
$AR t "$xabs" | sort | uniq -cd | while read -r count name
do
i=1
while test "$i" -le "$count"
do
# Put our $i before any first dot (extension)
# Never overwrite any file
name_to="$name"
while test "X$name_to" = "X$name" || test -f "$xdir/$name_to"
do
name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"`
done
$show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')"
$run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $?
i=`expr $i + 1`
done
done
fi
reload_conv_objs="$reload_objs "`find $xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
done
fi
fi
# Create the old-style object.
reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
output="$obj"
cmds=$reload_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Exit if we aren't doing a library object file.
if test -z "$libobj"; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit $EXIT_SUCCESS
fi
if test "$build_libtool_libs" != yes; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
# Create an invalid libtool object if no PIC, so that we don't
# accidentally link it into a program.
# $show "echo timestamp > $libobj"
# $run eval "echo timestamp > $libobj" || exit $?
exit $EXIT_SUCCESS
fi
if test -n "$pic_flag" || test "$pic_mode" != default; then
# Only do commands if we really have different PIC objects.
reload_objs="$libobjs $reload_conv_objs"
output="$libobj"
cmds=$reload_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit $EXIT_SUCCESS
;;
prog)
case $host in
*cygwin*) output=`$echo $output | ${SED} -e 's,.exe$,,;s,$,.exe,'` ;;
esac
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for programs" 1>&2
fi
if test "$preload" = yes; then
if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown &&
test "$dlopen_self_static" = unknown; then
$echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
fi
fi
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
;;
esac
case $host in
*darwin*)
# Don't allow lazy linking, it breaks C++ global constructors
if test "$tagname" = CXX ; then
compile_command="$compile_command ${wl}-bind_at_load"
finalize_command="$finalize_command ${wl}-bind_at_load"
fi
;;
esac
compile_command="$compile_command $compile_deplibs"
finalize_command="$finalize_command $finalize_deplibs"
if test -n "$rpath$xrpath"; then
# If the user specified any rpath flags, then add them.
for libdir in $rpath $xrpath; do
# This is the magic to use -rpath.
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
fi
# Now hardcode the library paths
rpath=
hardcode_libdirs=
for libdir in $compile_rpath $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$libdir:"*) ;;
*) dllsearchpath="$dllsearchpath:$libdir";;
esac
;;
esac
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
compile_rpath="$rpath"
rpath=
hardcode_libdirs=
for libdir in $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$finalize_perm_rpath " in
*" $libdir "*) ;;
*) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
finalize_rpath="$rpath"
if test -n "$libobjs" && test "$build_old_libs" = yes; then
# Transform all the library objects into standard objects.
compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
fi
dlsyms=
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
if test -n "$NM" && test -n "$global_symbol_pipe"; then
dlsyms="${outputname}S.c"
else
$echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2
fi
fi
if test -n "$dlsyms"; then
case $dlsyms in
"") ;;
*.c)
# Discover the nlist of each of the dlfiles.
nlist="$output_objdir/${outputname}.nm"
$show "$rm $nlist ${nlist}S ${nlist}T"
$run $rm "$nlist" "${nlist}S" "${nlist}T"
# Parse the name list into a source file.
$show "creating $output_objdir/$dlsyms"
test -z "$run" && $echo > "$output_objdir/$dlsyms" "\
/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */
/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */
#ifdef __cplusplus
extern \"C\" {
#endif
/* Prevent the only kind of declaration conflicts we can make. */
#define lt_preloaded_symbols some_other_symbol
/* External symbol declarations for the compiler. */\
"
if test "$dlself" = yes; then
$show "generating symbol list for \`$output'"
test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
# Add our own program objects to the symbol list.
progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
for arg in $progfiles; do
$show "extracting global C symbols from \`$arg'"
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -n "$exclude_expsyms"; then
$run eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
if test -n "$export_symbols_regex"; then
$run eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
export_symbols="$output_objdir/$output.exp"
$run $rm $export_symbols
$run eval "${SED} -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
else
$run eval "${SED} -e 's/\([][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$output.exp"'
$run eval 'grep -f "$output_objdir/$output.exp" < "$nlist" > "$nlist"T'
$run eval 'mv "$nlist"T "$nlist"'
fi
fi
for arg in $dlprefiles; do
$show "extracting global C symbols from \`$arg'"
name=`$echo "$arg" | ${SED} -e 's%^.*/%%'`
$run eval '$echo ": $name " >> "$nlist"'
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -z "$run"; then
# Make sure we have at least an empty file.
test -f "$nlist" || : > "$nlist"
if test -n "$exclude_expsyms"; then
$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
$mv "$nlist"T "$nlist"
fi
# Try sorting and uniquifying the output.
if grep -v "^: " < "$nlist" |
if sort -k 3 </dev/null >/dev/null 2>&1; then
sort -k 3
else
sort +2
fi |
uniq > "$nlist"S; then
:
else
grep -v "^: " < "$nlist" > "$nlist"S
fi
if test -f "$nlist"S; then
eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"'
else
$echo '/* NONE */' >> "$output_objdir/$dlsyms"
fi
$echo >> "$output_objdir/$dlsyms" "\
#undef lt_preloaded_symbols
#if defined (__STDC__) && __STDC__
# define lt_ptr void *
#else
# define lt_ptr char *
# define const
#endif
/* The mapping between symbol names and symbols. */
const struct {
const char *name;
lt_ptr address;
}
lt_preloaded_symbols[] =
{\
"
eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms"
$echo >> "$output_objdir/$dlsyms" "\
{0, (lt_ptr) 0}
};
/* This works around a problem in FreeBSD linker */
#ifdef FREEBSD_WORKAROUND
static const void *lt_preloaded_setup() {
return lt_preloaded_symbols;
}
#endif
#ifdef __cplusplus
}
#endif\
"
fi
pic_flag_for_symtable=
case $host in
# compiling the symbol table file with pic_flag works around
# a FreeBSD bug that causes programs to crash when -lm is
# linked before any other PIC object. But we must not use
# pic_flag when linking with -static. The problem exists in
# FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
*-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";;
esac;;
*-*-hpux*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag";;
esac
esac
# Now compile the dynamic symbol file.
$show "(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")"
$run eval '(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $?
# Clean up the generated files.
$show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T"
$run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T"
# Transform the symbol file into the correct name.
compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
;;
*)
$echo "$modename: unknown suffix for \`$dlsyms'" 1>&2
exit $EXIT_FAILURE
;;
esac
else
# We keep going just in case the user didn't refer to
# lt_preloaded_symbols. The linker will fail if global_symbol_pipe
# really was required.
# Nullify the symbol file.
compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
fi
if test "$need_relink" = no || test "$build_libtool_libs" != yes; then
# Replace the output file specification.
compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
link_command="$compile_command$compile_rpath"
# We have no uninstalled library dependencies, so finalize right now.
$show "$link_command"
$run eval "$link_command"
status=$?
# Delete the generated files.
if test -n "$dlsyms"; then
$show "$rm $output_objdir/${outputname}S.${objext}"
$run $rm "$output_objdir/${outputname}S.${objext}"
fi
exit $status
fi
if test -n "$shlibpath_var"; then
# We should set the shlibpath_var
rpath=
for dir in $temp_rpath; do
case $dir in
[\\/]* | [A-Za-z]:[\\/]*)
# Absolute path.
rpath="$rpath$dir:"
;;
*)
# Relative path: add a thisdir entry.
rpath="$rpath\$thisdir/$dir:"
;;
esac
done
temp_rpath="$rpath"
fi
if test -n "$compile_shlibpath$finalize_shlibpath"; then
compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
fi
if test -n "$finalize_shlibpath"; then
finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
fi
compile_var=
finalize_var=
if test -n "$runpath_var"; then
if test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
if test -n "$finalize_perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $finalize_perm_rpath; do
rpath="$rpath$dir:"
done
finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
fi
if test "$no_install" = yes; then
# We don't need to create a wrapper script.
link_command="$compile_var$compile_command$compile_rpath"
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
# Delete the old output file.
$run $rm $output
# Link the executable and exit
$show "$link_command"
$run eval "$link_command" || exit $?
exit $EXIT_SUCCESS
fi
if test "$hardcode_action" = relink; then
# Fast installation is not supported
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
$echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
$echo "$modename: \`$output' will be relinked during installation" 1>&2
else
if test "$fast_install" != no; then
link_command="$finalize_var$compile_command$finalize_rpath"
if test "$fast_install" = yes; then
relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
else
# fast_install is set to needless
relink_command=
fi
else
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
fi
fi
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
# Delete the old output files.
$run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
$show "$link_command"
$run eval "$link_command" || exit $?
# Now create the wrapper script.
$show "creating $output"
# Quote the relink command for shipping.
if test -n "$relink_command"; then
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
relink_command="(cd `pwd`; $relink_command)"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
fi
# Quote $echo for shipping.
if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then
case $progpath in
[\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";;
*) qecho="$SHELL `pwd`/$progpath --fallback-echo";;
esac
qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"`
else
qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"`
fi
# Only actually do things if our run command is non-null.
if test -z "$run"; then
# win32 will think the script is a binary if it has
# a .exe suffix, so we strip it off here.
case $output in
*.exe) output=`$echo $output|${SED} 's,.exe$,,'` ;;
esac
# test for cygwin because mv fails w/o .exe extensions
case $host in
*cygwin*)
exeext=.exe
outputname=`$echo $outputname|${SED} 's,.exe$,,'` ;;
*) exeext= ;;
esac
case $host in
*cygwin* | *mingw* )
cwrappersource=`$echo ${objdir}/lt-${output}.c`
cwrapper=`$echo ${output}.exe`
$rm $cwrappersource $cwrapper
trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
cat > $cwrappersource <<EOF
/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
The $output program cannot be directly executed until all the libtool
libraries that it depends on are installed.
This wrapper executable should never be moved out of the build directory.
If it is, it will not operate correctly.
Currently, it simply execs the wrapper *script* "/bin/sh $output",
but could eventually absorb all of the scripts functionality and
exec $objdir/$outputname directly.
*/
EOF
cat >> $cwrappersource<<"EOF"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <malloc.h>
#include <stdarg.h>
#include <assert.h>
#if defined(PATH_MAX)
# define LT_PATHMAX PATH_MAX
#elif defined(MAXPATHLEN)
# define LT_PATHMAX MAXPATHLEN
#else
# define LT_PATHMAX 1024
#endif
#ifndef DIR_SEPARATOR
#define DIR_SEPARATOR '/'
#endif
#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
defined (__OS2__)
#define HAVE_DOS_BASED_FILE_SYSTEM
#ifndef DIR_SEPARATOR_2
#define DIR_SEPARATOR_2 '\\'
#endif
#endif
#ifndef DIR_SEPARATOR_2
# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
#else /* DIR_SEPARATOR_2 */
# define IS_DIR_SEPARATOR(ch) \
(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
#endif /* DIR_SEPARATOR_2 */
#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type)))
#define XFREE(stale) do { \
if (stale) { free ((void *) stale); stale = 0; } \
} while (0)
const char *program_name = NULL;
void * xmalloc (size_t num);
char * xstrdup (const char *string);
char * basename (const char *name);
char * fnqualify(const char *path);
char * strendzap(char *str, const char *pat);
void lt_fatal (const char *message, ...);
int
main (int argc, char *argv[])
{
char **newargz;
int i;
program_name = (char *) xstrdup ((char *) basename (argv[0]));
newargz = XMALLOC(char *, argc+2);
EOF
cat >> $cwrappersource <<EOF
newargz[0] = "$SHELL";
EOF
cat >> $cwrappersource <<"EOF"
newargz[1] = fnqualify(argv[0]);
/* we know the script has the same name, without the .exe */
/* so make sure newargz[1] doesn't end in .exe */
strendzap(newargz[1],".exe");
for (i = 1; i < argc; i++)
newargz[i+1] = xstrdup(argv[i]);
newargz[argc+1] = NULL;
EOF
cat >> $cwrappersource <<EOF
execv("$SHELL",newargz);
EOF
cat >> $cwrappersource <<"EOF"
}
void *
xmalloc (size_t num)
{
void * p = (void *) malloc (num);
if (!p)
lt_fatal ("Memory exhausted");
return p;
}
char *
xstrdup (const char *string)
{
return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL
;
}
char *
basename (const char *name)
{
const char *base;
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
/* Skip over the disk name in MSDOS pathnames. */
if (isalpha (name[0]) && name[1] == ':')
name += 2;
#endif
for (base = name; *name; name++)
if (IS_DIR_SEPARATOR (*name))
base = name + 1;
return (char *) base;
}
char *
fnqualify(const char *path)
{
size_t size;
char *p;
char tmp[LT_PATHMAX + 1];
assert(path != NULL);
/* Is it qualified already? */
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
if (isalpha (path[0]) && path[1] == ':')
return xstrdup (path);
#endif
if (IS_DIR_SEPARATOR (path[0]))
return xstrdup (path);
/* prepend the current directory */
/* doesn't handle '~' */
if (getcwd (tmp, LT_PATHMAX) == NULL)
lt_fatal ("getcwd failed");
size = strlen(tmp) + 1 + strlen(path) + 1; /* +2 for '/' and '\0' */
p = XMALLOC(char, size);
sprintf(p, "%s%c%s", tmp, DIR_SEPARATOR, path);
return p;
}
char *
strendzap(char *str, const char *pat)
{
size_t len, patlen;
assert(str != NULL);
assert(pat != NULL);
len = strlen(str);
patlen = strlen(pat);
if (patlen <= len)
{
str += len - patlen;
if (strcmp(str, pat) == 0)
*str = '\0';
}
return str;
}
static void
lt_error_core (int exit_status, const char * mode,
const char * message, va_list ap)
{
fprintf (stderr, "%s: %s: ", program_name, mode);
vfprintf (stderr, message, ap);
fprintf (stderr, ".\n");
if (exit_status >= 0)
exit (exit_status);
}
void
lt_fatal (const char *message, ...)
{
va_list ap;
va_start (ap, message);
lt_error_core (EXIT_FAILURE, "FATAL", message, ap);
va_end (ap);
}
EOF
# we should really use a build-platform specific compiler
# here, but OTOH, the wrappers (shell script and this C one)
# are only useful if you want to execute the "real" binary.
# Since the "real" binary is built for $host, then this
# wrapper might as well be built for $host, too.
$run $LTCC -s -o $cwrapper $cwrappersource
;;
esac
$rm $output
trap "$rm $output; exit $EXIT_FAILURE" 1 2 15
$echo > $output "\
#! $SHELL
# $output - temporary wrapper script for $objdir/$outputname
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# The $output program cannot be directly executed until all the libtool
# libraries that it depends on are installed.
#
# This wrapper script should never be moved out of the build directory.
# If it is, it will not operate correctly.
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed='${SED} -e 1s/^X//'
sed_quote_subst='$sed_quote_subst'
# The HP-UX ksh and POSIX shell print the target directory to stdout
# if CDPATH is set.
if test \"\${CDPATH+set}\" = set; then CDPATH=:; export CDPATH; fi
relink_command=\"$relink_command\"
# This environment variable determines our operation mode.
if test \"\$libtool_install_magic\" = \"$magic\"; then
# install mode needs the following variable:
notinst_deplibs='$notinst_deplibs'
else
# When we are sourced in execute mode, \$file and \$echo are already set.
if test \"\$libtool_execute_magic\" != \"$magic\"; then
echo=\"$qecho\"
file=\"\$0\"
# Make sure echo works.
if test \"X\$1\" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then
# Yippee, \$echo works!
:
else
# Restart under the correct shell, and then maybe \$echo will work.
exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
fi
fi\
"
$echo >> $output "\
# Find the directory that this script lives in.
thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
test \"x\$thisdir\" = \"x\$file\" && thisdir=.
# Follow symbolic links until we get to the real thisdir.
file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\`
while test -n \"\$file\"; do
destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
# If there was a directory component, then change thisdir.
if test \"x\$destdir\" != \"x\$file\"; then
case \"\$destdir\" in
[\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
*) thisdir=\"\$thisdir/\$destdir\" ;;
esac
fi
file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\`
done
# Try to get the absolute directory name.
absdir=\`cd \"\$thisdir\" && pwd\`
test -n \"\$absdir\" && thisdir=\"\$absdir\"
"
if test "$fast_install" = yes; then
$echo >> $output "\
program=lt-'$outputname'$exeext
progdir=\"\$thisdir/$objdir\"
if test ! -f \"\$progdir/\$program\" || \\
{ file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\
test \"X\$file\" != \"X\$progdir/\$program\"; }; then
file=\"\$\$-\$program\"
if test ! -d \"\$progdir\"; then
$mkdir \"\$progdir\"
else
$rm \"\$progdir/\$file\"
fi"
$echo >> $output "\
# relink executable if necessary
if test -n \"\$relink_command\"; then
if relink_command_output=\`eval \$relink_command 2>&1\`; then :
else
$echo \"\$relink_command_output\" >&2
$rm \"\$progdir/\$file\"
exit $EXIT_FAILURE
fi
fi
$mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
{ $rm \"\$progdir/\$program\";
$mv \"\$progdir/\$file\" \"\$progdir/\$program\"; }
$rm \"\$progdir/\$file\"
fi"
else
$echo >> $output "\
program='$outputname'
progdir=\"\$thisdir/$objdir\"
"
fi
$echo >> $output "\
if test -f \"\$progdir/\$program\"; then"
# Export our shlibpath_var if we have one.
if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
$echo >> $output "\
# Add our own library path to $shlibpath_var
$shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
# Some systems cannot cope with colon-terminated $shlibpath_var
# The second colon is a workaround for a bug in BeOS R4 sed
$shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
export $shlibpath_var
"
fi
# fixup the dll searchpath if we need to.
if test -n "$dllsearchpath"; then
$echo >> $output "\
# Add the dll search path components to the executable PATH
PATH=$dllsearchpath:\$PATH
"
fi
$echo >> $output "\
if test \"\$libtool_execute_magic\" != \"$magic\"; then
# Run the actual program with our arguments.
"
case $host in
# Backslashes separate directories on plain windows
*-*-mingw | *-*-os2*)
$echo >> $output "\
exec \$progdir\\\\\$program \${1+\"\$@\"}
"
;;
*)
$echo >> $output "\
exec \$progdir/\$program \${1+\"\$@\"}
"
;;
esac
$echo >> $output "\
\$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\"
exit $EXIT_FAILURE
fi
else
# The program doesn't exist.
\$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
\$echo \"This script is just a wrapper for \$program.\" 1>&2
$echo \"See the $PACKAGE documentation for more information.\" 1>&2
exit $EXIT_FAILURE
fi
fi\
"
chmod +x $output
fi
exit $EXIT_SUCCESS
;;
esac
# See if we need to build an old-fashioned archive.
for oldlib in $oldlibs; do
if test "$build_libtool_libs" = convenience; then
oldobjs="$libobjs_save"
addlibs="$convenience"
build_libtool_libs=no
else
if test "$build_libtool_libs" = module; then
oldobjs="$libobjs_save"
build_libtool_libs=no
else
oldobjs="$old_deplibs $non_pic_objects"
fi
addlibs="$old_convenience"
fi
if test -n "$addlibs"; then
gentop="$output_objdir/${outputname}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test "$status" -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
# Add in members from convenience archives.
for xlib in $addlibs; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test "$status" -ne 0 && test ! -d "$xdir"; then
exit $status
fi
# We will extract separately just the conflicting names and we will no
# longer touch any unique names. It is faster to leave these extract
# automatically by $AR in one run.
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: warning: object name conflicts; renaming object files" 1>&2
$echo "$modename: warning: to ensure that they will not overwrite" 1>&2
$AR t "$xabs" | sort | uniq -cd | while read -r count name
do
i=1
while test "$i" -le "$count"
do
# Put our $i before any first dot (extension)
# Never overwrite any file
name_to="$name"
while test "X$name_to" = "X$name" || test -f "$xdir/$name_to"
do
name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"`
done
$show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')"
$run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $?
i=`expr $i + 1`
done
done
fi
oldobjs="$oldobjs "`find $xdir -name \*.${objext} -print -o -name \*.lo -print | $NL2SP`
done
fi
# Do each command in the archive commands.
if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
cmds=$old_archive_from_new_cmds
else
eval cmds=\"$old_archive_cmds\"
if len=`expr "X$cmds" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
cmds=$old_archive_cmds
else
# the command line is too long to link in one step, link in parts
$echo "using piecewise archive linking..."
save_RANLIB=$RANLIB
RANLIB=:
objlist=
concat_cmds=
save_oldobjs=$oldobjs
# GNU ar 2.10+ was changed to match POSIX; thus no paths are
# encoded into archives. This makes 'ar r' malfunction in
# this piecewise linking case whenever conflicting object
# names appear in distinct ar calls; check, warn and compensate.
if (for obj in $save_oldobjs
do
$echo "X$obj" | $Xsed -e 's%^.*/%%'
done | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: warning: object name conflicts; overriding AR_FLAGS to 'cq'" 1>&2
$echo "$modename: warning: to ensure that POSIX-compatible ar will work" 1>&2
AR_FLAGS=cq
fi
# Is there a better way of finding the last object in the list?
for obj in $save_oldobjs
do
last_oldobj=$obj
done
for obj in $save_oldobjs
do
oldobjs="$objlist $obj"
objlist="$objlist $obj"
eval test_cmds=\"$old_archive_cmds\"
if len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len"; then
:
else
# the above command should be used before it gets too long
oldobjs=$objlist
if test "$obj" = "$last_oldobj" ; then
RANLIB=$save_RANLIB
fi
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\"
objlist=
fi
done
RANLIB=$save_RANLIB
oldobjs=$objlist
if test "X$oldobjs" = "X" ; then
eval cmds=\"\$concat_cmds\"
else
eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
fi
fi
fi
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
eval cmd=\"$cmd\"
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$generated"; then
$show "${rm}r$generated"
$run ${rm}r$generated
fi
# Now create the libtool archive.
case $output in
*.la)
old_library=
test "$build_old_libs" = yes && old_library="$libname.$libext"
$show "creating $output"
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
# Quote the link command for shipping.
relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
if test "$hardcode_automatic" = yes ; then
relink_command=
fi
# Only create the output if not a dry run.
if test -z "$run"; then
for installed in no yes; do
if test "$installed" = yes; then
if test -z "$install_libdir"; then
break
fi
output="$output_objdir/$outputname"i
# Replace all uninstalled libtool libraries with the installed ones
newdependency_libs=
for deplib in $dependency_libs; do
case $deplib in
*.la)
name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdependency_libs="$newdependency_libs $libdir/$name"
;;
*) newdependency_libs="$newdependency_libs $deplib" ;;
esac
done
dependency_libs="$newdependency_libs"
newdlfiles=
for lib in $dlfiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdlfiles="$newdlfiles $libdir/$name"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdlprefiles="$newdlprefiles $libdir/$name"
done
dlprefiles="$newdlprefiles"
else
newdlfiles=
for lib in $dlfiles; do
case $lib in
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
newdlfiles="$newdlfiles $abs"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
case $lib in
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
newdlprefiles="$newdlprefiles $abs"
done
dlprefiles="$newdlprefiles"
fi
$rm $output
# place dlname in correct position for cygwin
tdlname=$dlname
case $host,$output,$installed,$module,$dlname in
*cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
esac
$echo > $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='$tdlname'
# Names of this library.
library_names='$library_names'
# The name of the static archive.
old_library='$old_library'
# Libraries that this one depends upon.
dependency_libs='$dependency_libs'
# Version information for $libname.
current=$current
age=$age
revision=$revision
# Is this an already installed library?
installed=$installed
# Should we warn about portability when linking against -modules?
shouldnotlink=$module
# Files to dlopen/dlpreopen
dlopen='$dlfiles'
dlpreopen='$dlprefiles'
# Directory that this library needs to be installed in:
libdir='$install_libdir'"
if test "$installed" = no && test "$need_relink" = yes; then
$echo >> $output "\
relink_command=\"$relink_command\""
fi
done
fi
# Do a symbolic link so that the libtool archive can be found in
# LD_LIBRARY_PATH before the program is installed.
$show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
$run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $?
;;
esac
exit $EXIT_SUCCESS
;;
# libtool install mode
install)
modename="$modename: install"
# There may be an optional sh(1) argument at the beginning of
# install_prog (especially on Windows NT).
if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
# Allow the use of GNU shtool's install command.
$echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
# Aesthetically quote it.
arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$arg "
arg="$1"
shift
else
install_prog=
arg="$nonopt"
fi
# The real first argument should be the name of the installation program.
# Aesthetically quote it.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$install_prog$arg"
# We need to accept at least all the BSD install flags.
dest=
files=
opts=
prev=
install_type=
isdir=no
stripme=
for arg
do
if test -n "$dest"; then
files="$files $dest"
dest="$arg"
continue
fi
case $arg in
-d) isdir=yes ;;
-f) prev="-f" ;;
-g) prev="-g" ;;
-m) prev="-m" ;;
-o) prev="-o" ;;
-s)
stripme=" -s"
continue
;;
-*) ;;
*)
# If the previous option needed an argument, then skip it.
if test -n "$prev"; then
prev=
else
dest="$arg"
continue
fi
;;
esac
# Aesthetically quote the argument.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$install_prog $arg"
done
if test -z "$install_prog"; then
$echo "$modename: you must specify an install program" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test -n "$prev"; then
$echo "$modename: the \`$prev' option requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test -z "$files"; then
if test -z "$dest"; then
$echo "$modename: no file or destination specified" 1>&2
else
$echo "$modename: you must specify a destination" 1>&2
fi
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Strip any trailing slash from the destination.
dest=`$echo "X$dest" | $Xsed -e 's%/$%%'`
# Check to see that the destination is a directory.
test -d "$dest" && isdir=yes
if test "$isdir" = yes; then
destdir="$dest"
destname=
else
destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'`
test "X$destdir" = "X$dest" && destdir=.
destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'`
# Not a directory, so check to see that there is only one file specified.
set dummy $files
if test "$#" -gt 2; then
$echo "$modename: \`$dest' is not a directory" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
fi
case $destdir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
for file in $files; do
case $file in
*.lo) ;;
*)
$echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
done
;;
esac
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
staticlibs=
future_libdirs=
current_libdirs=
for file in $files; do
# Do each installation.
case $file in
*.$libext)
# Do the static libraries later.
staticlibs="$staticlibs $file"
;;
*.la)
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$file' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
library_names=
old_library=
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Add the libdir to current_libdirs if it is the destination.
if test "X$destdir" = "X$libdir"; then
case "$current_libdirs " in
*" $libdir "*) ;;
*) current_libdirs="$current_libdirs $libdir" ;;
esac
else
# Note the libdir as a future libdir.
case "$future_libdirs " in
*" $libdir "*) ;;
*) future_libdirs="$future_libdirs $libdir" ;;
esac
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/
test "X$dir" = "X$file/" && dir=
dir="$dir$objdir"
if test -n "$relink_command"; then
# Determine the prefix the user has applied to our future dir.
inst_prefix_dir=`$echo "$destdir" | $SED "s%$libdir\$%%"`
# Don't allow the user to place us outside of our expected
# location b/c this prevents finding dependent libraries that
# are installed to the same prefix.
# At present, this check doesn't affect windows .dll's that
# are installed into $libdir/../bin (currently, that works fine)
# but it's something to keep an eye on.
if test "$inst_prefix_dir" = "$destdir"; then
$echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2
exit $EXIT_FAILURE
fi
if test -n "$inst_prefix_dir"; then
# Stick the inst_prefix_dir data into the link command.
relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
else
relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%%"`
fi
$echo "$modename: warning: relinking \`$file'" 1>&2
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
exit $EXIT_FAILURE
fi
fi
# See the names of the shared library.
set dummy $library_names
if test -n "$2"; then
realname="$2"
shift
shift
srcname="$realname"
test -n "$relink_command" && srcname="$realname"T
# Install the shared library and build the symlinks.
$show "$install_prog $dir/$srcname $destdir/$realname"
$run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $?
if test -n "$stripme" && test -n "$striplib"; then
$show "$striplib $destdir/$realname"
$run eval "$striplib $destdir/$realname" || exit $?
fi
if test "$#" -gt 0; then
# Delete the old symlinks, and create new ones.
for linkname
do
if test "$linkname" != "$realname"; then
$show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
fi
done
fi
# Do each command in the postinstall commands.
lib="$destdir/$realname"
cmds=$postinstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Install the pseudo-library for information purposes.
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
instname="$dir/$name"i
$show "$install_prog $instname $destdir/$name"
$run eval "$install_prog $instname $destdir/$name" || exit $?
# Maybe install the static library, too.
test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
;;
*.lo)
# Install (i.e. copy) a libtool object.
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# Deduce the name of the destination old-style object file.
case $destfile in
*.lo)
staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
;;
*.$objext)
staticdest="$destfile"
destfile=
;;
*)
$echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
# Install the libtool object if requested.
if test -n "$destfile"; then
$show "$install_prog $file $destfile"
$run eval "$install_prog $file $destfile" || exit $?
fi
# Install the old object if enabled.
if test "$build_old_libs" = yes; then
# Deduce the name of the old-style object file.
staticobj=`$echo "X$file" | $Xsed -e "$lo2o"`
$show "$install_prog $staticobj $staticdest"
$run eval "$install_prog \$staticobj \$staticdest" || exit $?
fi
exit $EXIT_SUCCESS
;;
*)
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# If the file is missing, and there is a .exe on the end, strip it
# because it is most likely a libtool script we actually want to
# install
stripped_ext=""
case $file in
*.exe)
if test ! -f "$file"; then
file=`$echo $file|${SED} 's,.exe$,,'`
stripped_ext=".exe"
fi
;;
esac
# Do a test to see if this is really a libtool program.
case $host in
*cygwin*|*mingw*)
wrapper=`$echo $file | ${SED} -e 's,.exe$,,'`
;;
*)
wrapper=$file
;;
esac
if (${SED} -e '4q' $wrapper | grep "^# Generated by .*$PACKAGE")>/dev/null 2>&1; then
notinst_deplibs=
relink_command=
# To insure that "foo" is sourced, and not "foo.exe",
# finese the cygwin/MSYS system by explicitly sourcing "foo."
# which disallows the automatic-append-.exe behavior.
case $build in
*cygwin* | *mingw*) wrapperdot=${wrapper}. ;;
*) wrapperdot=${wrapper} ;;
esac
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . ${wrapperdot} ;;
*) . ./${wrapperdot} ;;
esac
# Check the variables that should have been set.
if test -z "$notinst_deplibs"; then
$echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2
exit $EXIT_FAILURE
fi
finalize=yes
for lib in $notinst_deplibs; do
# Check to see that each library is installed.
libdir=
if test -f "$lib"; then
# If there is no directory component, then add one.
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
fi
libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
if test -n "$libdir" && test ! -f "$libfile"; then
$echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
finalize=no
fi
done
relink_command=
# To insure that "foo" is sourced, and not "foo.exe",
# finese the cygwin/MSYS system by explicitly sourcing "foo."
# which disallows the automatic-append-.exe behavior.
case $build in
*cygwin* | *mingw*) wrapperdot=${wrapper}. ;;
*) wrapperdot=${wrapper} ;;
esac
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . ${wrapperdot} ;;
*) . ./${wrapperdot} ;;
esac
outputname=
if test "$fast_install" = no && test -n "$relink_command"; then
if test "$finalize" = yes && test -z "$run"; then
tmpdir="/tmp"
test -n "$TMPDIR" && tmpdir="$TMPDIR"
tmpdir="$tmpdir/libtool-$$"
save_umask=`umask`
umask 0077
if $mkdir "$tmpdir"; then
umask $save_umask
else
umask $save_umask
$echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
continue
fi
file=`$echo "X$file$stripped_ext" | $Xsed -e 's%^.*/%%'`
outputname="$tmpdir/$file"
# Replace the output file specification.
relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
${rm}r "$tmpdir"
continue
fi
file="$outputname"
else
$echo "$modename: warning: cannot relink \`$file'" 1>&2
fi
else
# Install the binary that we compiled earlier.
file=`$echo "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
fi
fi
# remove .exe since cygwin /usr/bin/install will append another
# one anyways
case $install_prog,$host in
*/usr/bin/install*,*cygwin*)
case $file:$destfile in
*.exe:*.exe)
# this is ok
;;
*.exe:*)
destfile=$destfile.exe
;;
*:*.exe)
destfile=`$echo $destfile | ${SED} -e 's,.exe$,,'`
;;
esac
;;
esac
$show "$install_prog$stripme $file $destfile"
$run eval "$install_prog\$stripme \$file \$destfile" || exit $?
test -n "$outputname" && ${rm}r "$tmpdir"
;;
esac
done
for file in $staticlibs; do
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
# Set up the ranlib parameters.
oldlib="$destdir/$name"
$show "$install_prog $file $oldlib"
$run eval "$install_prog \$file \$oldlib" || exit $?
if test -n "$stripme" && test -n "$old_striplib"; then
$show "$old_striplib $oldlib"
$run eval "$old_striplib $oldlib" || exit $?
fi
# Do each command in the postinstall commands.
cmds=$old_postinstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$future_libdirs"; then
$echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2
fi
if test -n "$current_libdirs"; then
# Maybe just do a dry run.
test -n "$run" && current_libdirs=" -n$current_libdirs"
exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs'
else
exit $EXIT_SUCCESS
fi
;;
# libtool finish mode
finish)
modename="$modename: finish"
libdirs="$nonopt"
admincmds=
if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
for dir
do
libdirs="$libdirs $dir"
done
for libdir in $libdirs; do
if test -n "$finish_cmds"; then
# Do each command in the finish commands.
cmds=$finish_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || admincmds="$admincmds
$cmd"
done
IFS="$save_ifs"
fi
if test -n "$finish_eval"; then
# Do the single finish_eval.
eval cmds=\"$finish_eval\"
$run eval "$cmds" || admincmds="$admincmds
$cmds"
fi
done
fi
# Exit here if they wanted silent mode.
test "$show" = : && exit $EXIT_SUCCESS
$echo "----------------------------------------------------------------------"
$echo "Libraries have been installed in:"
for libdir in $libdirs; do
$echo " $libdir"
done
$echo
$echo "If you ever happen to want to link against installed libraries"
$echo "in a given directory, LIBDIR, you must either use libtool, and"
$echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
$echo "flag during linking and do at least one of the following:"
if test -n "$shlibpath_var"; then
$echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
$echo " during execution"
fi
if test -n "$runpath_var"; then
$echo " - add LIBDIR to the \`$runpath_var' environment variable"
$echo " during linking"
fi
if test -n "$hardcode_libdir_flag_spec"; then
libdir=LIBDIR
eval flag=\"$hardcode_libdir_flag_spec\"
$echo " - use the \`$flag' linker flag"
fi
if test -n "$admincmds"; then
$echo " - have your system administrator run these commands:$admincmds"
fi
if test -f /etc/ld.so.conf; then
$echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
fi
$echo
$echo "See any operating system documentation about shared libraries for"
$echo "more information, such as the ld(1) and ld.so(8) manual pages."
$echo "----------------------------------------------------------------------"
exit $EXIT_SUCCESS
;;
# libtool execute mode
execute)
modename="$modename: execute"
# The first argument is the command name.
cmd="$nonopt"
if test -z "$cmd"; then
$echo "$modename: you must specify a COMMAND" 1>&2
$echo "$help"
exit $EXIT_FAILURE
fi
# Handle -dlopen flags immediately.
for file in $execute_dlfiles; do
if test ! -f "$file"; then
$echo "$modename: \`$file' is not a file" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
dir=
case $file in
*.la)
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Read the libtool library.
dlname=
library_names=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Skip this library if it cannot be dlopened.
if test -z "$dlname"; then
# Warn if it was a shared library.
test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'"
continue
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
if test -f "$dir/$objdir/$dlname"; then
dir="$dir/$objdir"
else
$echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2
exit $EXIT_FAILURE
fi
;;
*.lo)
# Just add the directory containing the .lo file.
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
;;
*)
$echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2
continue
;;
esac
# Get the absolute pathname.
absdir=`cd "$dir" && pwd`
test -n "$absdir" && dir="$absdir"
# Now add the directory to shlibpath_var.
if eval "test -z \"\$$shlibpath_var\""; then
eval "$shlibpath_var=\"\$dir\""
else
eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
fi
done
# This variable tells wrapper scripts just to set shlibpath_var
# rather than running their programs.
libtool_execute_magic="$magic"
# Check if any of the arguments is a wrapper script.
args=
for file
do
case $file in
-*) ;;
*)
# Do a test to see if this is really a libtool program.
if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Transform arg to wrapped name.
file="$progdir/$program"
fi
;;
esac
# Quote arguments (to preserve shell metacharacters).
file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"`
args="$args \"$file\""
done
if test -z "$run"; then
if test -n "$shlibpath_var"; then
# Export the shlibpath_var.
eval "export $shlibpath_var"
fi
# Restore saved environment variables
if test "${save_LC_ALL+set}" = set; then
LC_ALL="$save_LC_ALL"; export LC_ALL
fi
if test "${save_LANG+set}" = set; then
LANG="$save_LANG"; export LANG
fi
# Now prepare to actually exec the command.
exec_cmd="\$cmd$args"
else
# Display what would be done.
if test -n "$shlibpath_var"; then
eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
$echo "export $shlibpath_var"
fi
$echo "$cmd$args"
exit $EXIT_SUCCESS
fi
;;
# libtool clean and uninstall mode
clean | uninstall)
modename="$modename: $mode"
rm="$nonopt"
files=
rmforce=
exit_status=0
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
for arg
do
case $arg in
-f) rm="$rm $arg"; rmforce=yes ;;
-*) rm="$rm $arg" ;;
*) files="$files $arg" ;;
esac
done
if test -z "$rm"; then
$echo "$modename: you must specify an RM program" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
rmdirs=
origobjdir="$objdir"
for file in $files; do
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
if test "X$dir" = "X$file"; then
dir=.
objdir="$origobjdir"
else
objdir="$dir/$origobjdir"
fi
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
test "$mode" = uninstall && objdir="$dir"
# Remember objdir for removal later, being careful to avoid duplicates
if test "$mode" = clean; then
case " $rmdirs " in
*" $objdir "*) ;;
*) rmdirs="$rmdirs $objdir" ;;
esac
fi
# Don't error if the file doesn't exist and rm -f was used.
if (test -L "$file") >/dev/null 2>&1 \
|| (test -h "$file") >/dev/null 2>&1 \
|| test -f "$file"; then
:
elif test -d "$file"; then
exit_status=1
continue
elif test "$rmforce" = yes; then
continue
fi
rmfiles="$file"
case $name in
*.la)
# Possibly a libtool archive, so verify it.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
. $dir/$name
# Delete the libtool libraries and symlinks.
for n in $library_names; do
rmfiles="$rmfiles $objdir/$n"
done
test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
test "$mode" = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
if test "$mode" = uninstall; then
if test -n "$library_names"; then
# Do each command in the postuninstall commands.
cmds=$postuninstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd"
if test "$?" -ne 0 && test "$rmforce" != yes; then
exit_status=1
fi
done
IFS="$save_ifs"
fi
if test -n "$old_library"; then
# Do each command in the old_postuninstall commands.
cmds=$old_postuninstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd"
if test "$?" -ne 0 && test "$rmforce" != yes; then
exit_status=1
fi
done
IFS="$save_ifs"
fi
# FIXME: should reinstall the best remaining shared library.
fi
fi
;;
*.lo)
# Possibly a libtool object, so verify it.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# Read the .lo file
. $dir/$name
# Add PIC object to the list of files to remove.
if test -n "$pic_object" \
&& test "$pic_object" != none; then
rmfiles="$rmfiles $dir/$pic_object"
fi
# Add non-PIC object to the list of files to remove.
if test -n "$non_pic_object" \
&& test "$non_pic_object" != none; then
rmfiles="$rmfiles $dir/$non_pic_object"
fi
fi
;;
*)
if test "$mode" = clean ; then
noexename=$name
case $file in
*.exe)
file=`$echo $file|${SED} 's,.exe$,,'`
noexename=`$echo $name|${SED} 's,.exe$,,'`
# $file with .exe has already been added to rmfiles,
# add $file without .exe
rmfiles="$rmfiles $file"
;;
esac
# Do a test to see if this is a libtool program.
if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
relink_command=
. $dir/$noexename
# note $name still contains .exe if it was in $file originally
# as does the version of $file that was added into $rmfiles
rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
if test "$fast_install" = yes && test -n "$relink_command"; then
rmfiles="$rmfiles $objdir/lt-$name"
fi
if test "X$noexename" != "X$name" ; then
rmfiles="$rmfiles $objdir/lt-${noexename}.c"
fi
fi
fi
;;
esac
$show "$rm $rmfiles"
$run $rm $rmfiles || exit_status=1
done
objdir="$origobjdir"
# Try to remove the ${objdir}s in the directories where we deleted files
for dir in $rmdirs; do
if test -d "$dir"; then
$show "rmdir $dir"
$run rmdir $dir >/dev/null 2>&1
fi
done
exit $exit_status
;;
"")
$echo "$modename: you must specify a MODE" 1>&2
$echo "$generic_help" 1>&2
exit $EXIT_FAILURE
;;
esac
if test -z "$exec_cmd"; then
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$generic_help" 1>&2
exit $EXIT_FAILURE
fi
fi # test -z "$show_help"
if test -n "$exec_cmd"; then
eval exec $exec_cmd
exit $EXIT_FAILURE
fi
# We need to display help for each of the modes.
case $mode in
"") $echo \
"Usage: $modename [OPTION]... [MODE-ARG]...
Provide generalized library-building support services.
--config show all configuration variables
--debug enable verbose shell tracing
-n, --dry-run display commands without modifying any files
--features display basic configuration information and exit
--finish same as \`--mode=finish'
--help display this help message and exit
--mode=MODE use operation mode MODE [default=inferred from MODE-ARGS]
--quiet same as \`--silent'
--silent don't print informational messages
--tag=TAG use configuration variables from tag TAG
--version print version information
MODE must be one of the following:
clean remove files from the build directory
compile compile a source file into a libtool object
execute automatically set library path, then run a program
finish complete the installation of libtool libraries
install install libraries or executables
link create a library or an executable
uninstall remove libraries from an installed directory
MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for
a more detailed description of MODE.
Report bugs to <[email protected]>."
exit $EXIT_SUCCESS
;;
clean)
$echo \
"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
Remove files from the build directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, object or program, all the files associated
with it are deleted. Otherwise, only FILE itself is deleted using RM."
;;
compile)
$echo \
"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
Compile a source file into a libtool library object.
This mode accepts the following additional options:
-o OUTPUT-FILE set the output file name to OUTPUT-FILE
-prefer-pic try to building PIC objects only
-prefer-non-pic try to building non-PIC objects only
-static always build a \`.o' file suitable for static linking
COMPILE-COMMAND is a command to be used in creating a \`standard' object file
from the given SOURCEFILE.
The output file name is determined by removing the directory component from
SOURCEFILE, then substituting the C source code suffix \`.c' with the
library object suffix, \`.lo'."
;;
execute)
$echo \
"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]...
Automatically set library path, then run a program.
This mode accepts the following additional options:
-dlopen FILE add the directory containing FILE to the library path
This mode sets the library path environment variable according to \`-dlopen'
flags.
If any of the ARGS are libtool executable wrappers, then they are translated
into their corresponding uninstalled binary, and any of their required library
directories are added to the library path.
Then, COMMAND is executed, with ARGS as arguments."
;;
finish)
$echo \
"Usage: $modename [OPTION]... --mode=finish [LIBDIR]...
Complete the installation of libtool libraries.
Each LIBDIR is a directory that contains libtool libraries.
The commands that this mode executes may require superuser privileges. Use
the \`--dry-run' option if you just want to see what would be executed."
;;
install)
$echo \
"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND...
Install executables or libraries.
INSTALL-COMMAND is the installation command. The first component should be
either the \`install' or \`cp' program.
The rest of the components are interpreted as arguments to that command (only
BSD-compatible install options are recognized)."
;;
link)
$echo \
"Usage: $modename [OPTION]... --mode=link LINK-COMMAND...
Link object files or libraries together to form another library, or to
create an executable program.
LINK-COMMAND is a command using the C compiler that you would use to create
a program from several object files.
The following components of LINK-COMMAND are treated specially:
-all-static do not do any dynamic linking at all
-avoid-version do not add a version suffix if possible
-dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
-dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
-export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
-export-symbols SYMFILE
try to export only the symbols listed in SYMFILE
-export-symbols-regex REGEX
try to export only the symbols matching REGEX
-LLIBDIR search LIBDIR for required installed libraries
-lNAME OUTPUT-FILE requires the installed library libNAME
-module build a library that can dlopened
-no-fast-install disable the fast-install mode
-no-install link a not-installable executable
-no-undefined declare that a library does not refer to external symbols
-o OUTPUT-FILE create OUTPUT-FILE from the specified objects
-objectlist FILE Use a list of object files found in FILE to specify objects
-precious-files-regex REGEX
don't remove output files matching REGEX
-release RELEASE specify package release information
-rpath LIBDIR the created library will eventually be installed in LIBDIR
-R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
-static do not do any dynamic linking of libtool libraries
-version-info CURRENT[:REVISION[:AGE]]
specify library version info [each variable defaults to 0]
All other options (arguments beginning with \`-') are ignored.
Every other argument is treated as a filename. Files ending in \`.la' are
treated as uninstalled libtool libraries, other files are standard or library
object files.
If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
only library objects (\`.lo' files) may be specified, and \`-rpath' is
required, except when creating a convenience library.
If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
using \`ar' and \`ranlib', or on Windows using \`lib'.
If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
is created, otherwise an executable program is created."
;;
uninstall)
$echo \
"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
Remove libraries from an installation directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, all the files associated with it are deleted.
Otherwise, only FILE itself is deleted using RM."
;;
*)
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
$echo
$echo "Try \`$modename --help' for more information about other modes."
exit $EXIT_SUCCESS
# The TAGs below are defined such that we never get into a situation
# in which we disable both kinds of libraries. Given conflicting
# choices, we go for a static library, that is the most portable,
# since we can't tell whether shared libraries were disabled because
# the user asked for that or because the platform doesn't support
# them. This is particularly important on AIX, because we don't
# support having both static and shared libraries enabled at the same
# time on that platform, so we default to a shared-only configuration.
# If a disable-shared tag is given, we'll fallback to a static-only
# configuration. But we'll never go from static-only to shared-only.
# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
build_libtool_libs=no
build_old_libs=yes
# ### END LIBTOOL TAG CONFIG: disable-shared
# ### BEGIN LIBTOOL TAG CONFIG: disable-static
build_old_libs=`case $build_libtool_libs in yes) $echo no;; *) $echo yes;; esac`
# ### END LIBTOOL TAG CONFIG: disable-static
# Local Variables:
# mode:shell-script
# sh-indentation:2
# End:
|
xbmc/atv2
|
xbmc/cores/paplayer/SIDCodec/libsidplay/unix/ltmain.sh
|
Shell
|
gpl-2.0
| 183,740 |
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../../..
source "${KUBE_ROOT}/hack/lib/util.sh"
# Register function to be called on EXIT to remove generated binary.
function cleanup {
rm "${KUBE_ROOT}/vendor/k8s.io/sample-apiserver/artifacts/simple-image/kube-sample-apiserver"
}
trap cleanup EXIT
pushd "${KUBE_ROOT}/vendor/k8s.io/sample-apiserver"
cp -v ../../../../_output/local/bin/linux/amd64/sample-apiserver ./artifacts/simple-image/kube-sample-apiserver
docker build -t kube-sample-apiserver:latest ./artifacts/simple-image
popd
|
liangxia/origin
|
vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/hack/build-image.sh
|
Shell
|
apache-2.0
| 1,147 |
#!/bin/bash
# Configuration script for data population
# The server to login to when provisioning users
export OPENSHIFT_SERVER="${OPENSHIFT_SERVER:-https://10.0.2.15:8443}"
# The admin user to populate
export OPENSHIFT_ADMIN_CONFIG="${OPENSHIFT_ADMIN_CONFIG:-./openshift.local.config/master/admin.kubeconfig}"
# The ca cert to present when provisioning users
export OPENSHIFT_CA_CERT="${OPENSHIFT_CA_CERT:-./openshift.local.config/master/ca.crt}"
# The number of users that are in the system
export NUM_USERS="${NUM_USERS:-10}"
# The number of applications to create across all projcets
export NUM_APPS="${NUM_APPS:-100}"
# The user name prefix
export USER_NAME_PREFIX=hal-
# The number of projects that are in the system
export NUM_PROJECTS="${NUM_PROJECTS:-3}"
# The project name prefix
export PROJECT_NAME_PREFIX=project-
# How many concurrent CLI requests to make
export MAX_PROCS=4
|
cdrage/kedge
|
vendor/github.com/openshift/origin/examples/data-population/common.sh
|
Shell
|
apache-2.0
| 898 |
#!/bin/sh
/usr/lib/rpm/perl.req $* | grep -E -v '(Net::LDAP|Crypt::SmbHash|CGI|Unicode::MapUTF8)'
|
scs/uclinux
|
user/samba/samba-3.0.25a/packaging/RHEL/setup/filter-requires-samba.sh
|
Shell
|
gpl-2.0
| 100 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.