code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
# Install/Updates the FPGA server
# Run from the fpga-api dir
# Parameter 1: server path
# Parameter 2: user
if [ "$#" -ne 2 ]; then
echo "Illegal number of parameters (server_path and user)"
exit 1;
fi
FOLDER_EXP='s|dir=\".*\"|dir=\"'${1}'\"|g'
SERVER_EXP='s|user=\".*\"|user=\"'${2}'\"|g'
sed -i -e ${FOLDER_EXP} -e ${SERVER_EXP} ./scripts/fpga-api
# Nodemon
sudo chmod +x ./node_modules/nodemon/bin/nodemon.js
# Install the service
sudo cp -f ./scripts/fpga-api /etc/init.d
sudo chmod +x /etc/init.d/fpga-api
# Set the service as a default on startup
sudo ln -sf /etc/init.d/fpga-api /etc/rc0.d/K01fpga-api
sudo ln -sf /etc/init.d/fpga-api /etc/rc1.d/K01fpga-api
sudo ln -sf /etc/init.d/fpga-api /etc/rc2.d/S03fpga-api
sudo ln -sf /etc/init.d/fpga-api /etc/rc3.d/S03fpga-api
sudo ln -sf /etc/init.d/fpga-api /etc/rc4.d/S03fpga-api
sudo ln -sf /etc/init.d/fpga-api /etc/rc5.d/S03fpga-api
sudo ln -sf /etc/init.d/fpga-api /etc/rc6.d/K01fpga-api
|
EduardoMiravalls/NetWatcher
|
fpga-api/scripts/install_service.sh
|
Shell
|
mit
| 972 |
#!/bin/sh
yum groupinstall -yq "development tools"
|
mitchellh/vagrant-installers
|
packer/vagrant/scripts/centos/install-dev-tools.sh
|
Shell
|
mit
| 52 |
#!/bin/bash
if [[ -z $QUERY_PORT_9411_TCP_ADDR ]]; then
echo "** ERROR: You need to link the query service as query."
exit 1
fi
QUERY_ADDR="${QUERY_PORT_9411_TCP_ADDR}:9411"
SERVICE_NAME="zipkin-web"
DEFAULT_ROOTURL=http://localhost:8080/
ROOTURL="-zipkin.web.rootUrl=${ROOTURL:-DEFAULT_ROOTURL}"
echo "** Starting ${SERVICE_NAME}..."
cd zipkin
bin/sbt 'project zipkin-web' "run -zipkin.web.query.dest=${QUERY_ADDR} $ROOTURL"
|
radiomix/zipkin-infrastructure
|
web/run.sh
|
Shell
|
mit
| 433 |
#!/bin/bash
SCRIPT_DIR=$(dirname $(readlink -f $0))
echo "[*] Build and Run of vulnerable FineUploader/php-traditional-server v1.1.0."
docker build -t vuln ${SCRIPT_DIR}
echo "[*] Running docker"
docker run -p 8000:80 -d vuln
echo "[*] Test now the following:"
echo "./CVE-2018-9209.sh http://localhost:8000/"
|
lcashdol/Exploits
|
generic_docker/docker/install.sh
|
Shell
|
mit
| 315 |
#!/bin/sh
find . -name \*.html -exec svn propset svn:mime-type text/html {} \;
|
jdurbin/durbinlib
|
fixmime.sh
|
Shell
|
mit
| 79 |
#!/bin/bash
FN="silva128.1MgDb_1.00.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/annotation/src/contrib/silva128.1MgDb_1.00.0.tar.gz"
"https://bioarchive.galaxyproject.org/silva128.1MgDb_1.00.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-silva128.1mgdb/bioconductor-silva128.1mgdb_1.00.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-silva128.1mgdb/bioconductor-silva128.1mgdb_1.00.0_src_all.tar.gz"
)
MD5="f73cc038441a0a166c07582eec9a77b4"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-silva128.1mgdb/post-link.sh
|
Shell
|
mit
| 1,445 |
#!/bin/bash
set -e
#######################################
# load environment variables for LRSDAY
source ./../../env.sh
#######################################
# set project-specific variables
prefix="SK1" # The file name prefix for the processing sample. Default = "SK1" for the testing example.
genome="./../07.Supervised_Final_Assembly/$prefix.assembly.final.fa" # The file path of the input genome assembly.
centromere_gff3="./../08.Centromere_Annotation/$prefix.centromere.gff3" # The path of the final centromere annotation gff3 generated by task 08.Centromere_Annotation_for_FinalAssembly, use "" if not existing.
TE_gff3="./../11.TE_Annotation/$prefix.TE.gff3" # The file path of the TE annotation gff3 generated by task 11.TE_Annotation, use "" if not existing.
X_element_gff3="./../12.Core_X_Element_Annotation/$prefix.X_element.gff3" # The file path of the X_element annotation gff3 generated by task 12.CoreX_Annotation, use "" if not existing.
Y_prime_element_gff3="./../13.Y_Prime_Element_Annotation/$prefix.Y_prime_element.gff3" # The file path of the Y_prime_element annotation gff3 generated by task 13.Yprime_Annotation, use "" if not existing.
nuclear_gene_gff3="./../14.Gene_Orthology_Identification/$prefix.nuclear_gene.updated.gff3" # The file path of the nuclear gene annotation gff3 generated by task 14.GeneOrthology_Identification (which is better) or 09.Nuclear_Gene_Annotation, use "" if not existing.
mitochondrial_gene_gff3="./../14.Gene_Orthology_Identification/$prefix.mitochondrial_gene.updated.gff3" # The file path of the mitochondrial gene annotation gff3 generated by task 14.GeneOrthology_Identification (which is better) or 10.Mitochondrial_Gene_Annotation, use "" if not existing.
chrMT_tag="chrMT" # The sequence name for the mitochondrial genome in the input genome assembly, if there are multiple corresponding contigs/scaffolds, use a single ';' to separate them. e.g. "chrMT_1;chrMT_2". Default = "chrMT".
chrMT_genetic_code_table=3 # The NCBI genetic code table (https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi) for the annotated mitochondrial genome. Default = 3 (i.e. Yeast Mitochondria)
debug="no" # use "yes" if prefer to keep intermediate files, otherwise use "no".
######################################
# process the pipeline
cat $centromere_gff3 $TE_gff3 $X_element_gff3 $Y_prime_element_gff3 $nuclear_gene_gff3 $mitochondrial_gene_gff3 >$prefix.concatenated.gff3
perl $LRSDAY_HOME/scripts/sort_gff3.pl -i $prefix.concatenated.gff3 -t $prefix -o $prefix.final.gff3 -r $genome
perl $LRSDAY_HOME/scripts/extract_cds_from_tidy_gff3.pl -r $genome -g $prefix.final.gff3 -o $prefix.final.cds.fa
cp $genome $prefix.assembly.final.fa
echo $chrMT_tag | sed -e "s/;/\n/g" > $prefix.assembly.chrMT.list
$LRSDAY_HOME/scripts/select_fasta_by_list.pl -i $genome -l $prefix.assembly.chrMT.list -m reverse -o $prefix.assembly.nuclear_genome.fa
$LRSDAY_HOME/scripts/select_fasta_by_list.pl -i $genome -l $prefix.assembly.chrMT.list -m normal -o $prefix.assembly.mitochondrial_genome.fa
if [[ ! -z $nuclear_gene_gff3 ]]
then
perl $LRSDAY_HOME/scripts/extract_cds_from_tidy_gff3.pl -r $prefix.assembly.nuclear_genome.fa -g $prefix.final.gff3 -o $prefix.nuclear_genome.cds.fa
perl $LRSDAY_HOME/scripts/cds2protein.pl -i $prefix.nuclear_genome.cds.fa -p $prefix.nuclear_genome -t 1
fi
if [[ ! -z $mitochondrial_gene_gff3 ]]
then
perl $LRSDAY_HOME/scripts/extract_cds_from_tidy_gff3.pl -r $prefix.assembly.mitochondrial_genome.fa -g $prefix.final.gff3 -o $prefix.mitochondrial_genome.cds.fa
perl $LRSDAY_HOME/scripts/cds2protein.pl -i $prefix.mitochondrial_genome.cds.fa -p $prefix.mitochondrial_genome -t $chrMT_genetic_code_table
fi
if [[ ! -z $nuclear_gene_gff3 && ! -z $mitochondrial_gene_gff3 ]]
then
cat $prefix.nuclear_genome.trimmed_cds.fa $prefix.mitochondrial_genome.trimmed_cds.fa > $prefix.final.trimmed_cds.fa
cat $prefix.nuclear_genome.pep.fa $prefix.mitochondrial_genome.pep.fa >$prefix.final.pep.fa
cat $prefix.nuclear_genome.manual_check.list $prefix.mitochondrial_genome.manual_check.list > $prefix.final.manual_check.list
fi
# clean up intermediate files
if [[ $debug == "no" ]]
then
rm $prefix.concatenated.gff3
fi
############################
# checking bash exit status
if [[ $? -eq 0 ]]
then
echo ""
echo "LRSDAY message: This bash script has been successfully processed! :)"
echo ""
echo ""
exit 0
fi
############################
|
yjx1217/LRSDAY
|
pipelines/LRSDAY.15.Annotation_Integration.sh
|
Shell
|
mit
| 4,461 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_CONF=default
CND_DISTDIR=dist
TMPDIR=build/${CND_CONF}/${IMAGE_TYPE}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=dist/${CND_CONF}/${IMAGE_TYPE}/Timer-ADC.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
OUTPUT_BASENAME=Timer-ADC.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
PACKAGE_TOP_DIR=timer-adc.x/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/package
rm -rf ${TMPDIR}
mkdir -p ${TMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory ${TMPDIR}/timer-adc.x/bin
copyFileToTmpDir "${OUTPUT_PATH}" "${TMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/package/timer-adc.x.tar
cd ${TMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/package/timer-adc.x.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${TMPDIR}
|
mzeitler/openstrom
|
firmware/testing - prototype/TimerADC/Timer-ADC.X/nbproject/Package-default.bash
|
Shell
|
mit
| 1,381 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2009-2017 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
. ./tup.sh
check_monitor_supported
# Apparently changing a Tupfile in between monitor invocations doesn't work
# properly (it doesn't get re-parsed).
monitor
cat > Tupfile << HERE
: |> echo hey |>
HERE
update
stop_monitor
tup_object_exist . 'echo hey'
cat > Tupfile << HERE
: |> echo yo |>
HERE
sleep 1
touch Tupfile
monitor
update
stop_monitor
tup_object_exist . 'echo yo'
tup_object_no_exist . 'echo hey'
eotup
|
fasterthanlime/tup-fuseless
|
test/t7017-delayed-tupfile.sh
|
Shell
|
gpl-2.0
| 1,166 |
#!/bin/sh
diff -aurN old new -x .svn > patch.patch
|
oat-sa/tao-core
|
test/integration/updateData/diff.sh
|
Shell
|
gpl-2.0
| 51 |
# $XConsortium: substring.sh /main/3 1995/11/01 16:56:33 rswiston $
function err_exit
{
print -u2 -n "\t"
print -u2 -r $Command: "$@"
let Errors+=1
}
Command=$0
integer Errors=0 j=4
base=/home/dgk/foo//bar
string1=$base/abcabcabc
if [[ ${string1:0} != "$string1" ]]
then err_exit "string1:0"
fi
if [[ ${string1:0:1000} != "$string1" ]]
then err_exit "string1:0"
fi
if [[ ${string1:1} != "${string1#?}" ]]
then err_exit "string1:1"
fi
if [[ ${string1:1:4} != home ]]
then err_exit "string1:1:4"
fi
if [[ ${string1:1:j} != home ]]
then err_exit "string1:1:j"
fi
if [[ ${string1:(j?1:0):j} != home ]]
then err_exit "string1:(j?1:0):j"
fi
if [[ ${string1%*zzz*} != "$string1" ]]
then err_exit "string1%*zzz*"
fi
if [[ ${string1%%*zzz*} != "$string1" ]]
then err_exit "string1%%*zzz*"
fi
if [[ ${string1#*zzz*} != "$string1" ]]
then err_exit "string1#*zzz*"
fi
if [[ ${string1##*zzz*} != "$string1" ]]
then err_exit "string1##*zzz*"
fi
if [[ ${string1%+(abc)} != "$base/abcabc" ]]
then err_exit "string1%+(abc)"
fi
if [[ ${string1%%+(abc)} != "$base/" ]]
then err_exit "string1%%+(abc)"
fi
if [[ ${string1%/*} != "$base" ]]
then err_exit "string1%/*"
fi
if [[ "${string1%/*}" != "$base" ]]
then err_exit '"string1%/*"'
fi
if [[ ${string1%"/*"} != "$string1" ]]
then err_exit 'string1%"/*"'
fi
if [[ ${string1%%/*} != "" ]]
then err_exit "string1%%/*"
fi
if [[ ${string1#*/bar} != /abcabcabc ]]
then err_exit "string1#*bar"
fi
if [[ ${string1##*/bar} != /abcabcabc ]]
then err_exit "string1#*bar"
fi
if [[ "${string1#@(*/bar|*/foo)}" != //bar/abcabcabc ]]
then err_exit "string1#@(*/bar|*/foo)"
fi
if [[ ${string1##@(*/bar|*/foo)} != /abcabcabc ]]
then err_exit "string1##@(*/bar|*/foo)"
fi
if [[ ${string1##*/@(bar|foo)} != /abcabcabc ]]
then err_exit "string1##*/@(bar|foo)"
fi
foo=abc
if [[ ${foo#a[b*} != abc ]]
then err_exit "abc#a[b*} != abc"
fi
if [[ ${foo//[0-9]/bar} != abc ]]
then err_exit '${foo//[0-9]/bar} not expanding correctly'
fi
foo='(abc)'
if [[ ${foo#'('} != 'abc)' ]]
then err_exit "(abc)#( != abc)"
fi
if [[ ${foo%')'} != '(abc' ]]
then err_exit "(abc)%) != (abc"
fi
foo=a123b456c
if [[ ${foo/[0-9]?/""} != a3b456c ]]
then err_exit '${foo/[0-9]?/""} not expanding correctly'
fi
if [[ ${foo//[0-9]/""} != abc ]]
then err_exit '${foo//[0-9]/""} not expanding correctly'
fi
if [[ ${foo/#a/b} != b123b456c ]]
then err_exit '${foo/#a/b} not expanding correctly'
fi
if [[ ${foo/#?/b} != b123b456c ]]
then err_exit '${foo/#?/b} not expanding correctly'
fi
if [[ ${foo/%c/b} != a123b456b ]]
then err_exit '${foo/%c/b} not expanding correctly'
fi
if [[ ${foo/%?/b} != a123b456b ]]
then err_exit '${foo/%?/b} not expanding correctly'
fi
while read -r pattern string expected
do if (( expected ))
then if [[ $string != $pattern ]]
then err_exit "$pattern does not match $string"
fi
if [[ ${string##$pattern} != "" ]]
then err_exit "\${$string##$pattern} not null"
fi
if [ "${string##$pattern}" != '' ]
then err_exit "\"\${$string##$pattern}\" not null"
fi
if [[ ${string/$pattern} != "" ]]
then err_exit "\${$string/$pattern} not null"
fi
else if [[ $string == $pattern ]]
then err_exit "$pattern matches $string"
fi
fi
done <<- \EOF
+(a)*+(a) aabca 1
!(*.o) foo.o 0
!(*.o) foo.c 1
EOF
xx=a/b/c/d/e
yy=${xx#*/}
if [[ $yy != b/c/d/e ]]
then err_exit '${xx#*/} != a/b/c/d/e when xx=a/b/c/d/e'
fi
if [[ ${xx//\//\\} != 'a\b\c\d\e' ]]
then err_exit '${xx//\//\\} not working'
fi
exit $((Errors))
|
sTeeLM/MINIME
|
toolkit/srpm/SOURCES/cde-2.2.4/programs/dtksh/ksh93/src/cmd/ksh93/tests/substring.sh
|
Shell
|
gpl-2.0
| 3,435 |
#! /bin/bash
set -x
set -e
trap 'previous_command=$this_command; this_command=$BASH_COMMAND' DEBUG
trap 'echo FAILED COMMAND: $previous_command' EXIT
#-------------------------------------------------------------------------------------------
# This script will download packages for, configure, build and install a QEMU.
# NOTES: On Debian
# - Install packages : apt-get install g++ make libglib2.0-dev libfdt-dev libpixman-1-dev zlib1g-dev ncurses-dev
#
#-------------------------------------------------------------------------------------------
INSTALL_PATH=/opt/qemu
SRC_PATH=/opt/sources
QEMU_VERSION=qemu-2.6.2
TARGET=i386-softmmu
PARALLEL_MAKE=-j4
export PATH=$INSTALL_PATH/bin:$PATH
mkdir -p $INSTALL_PATH
mkdir -p $SRC_PATH
cd $SRC_PATH
# Download packages
download_qemu () {
echo "Downloading qemu sources from http://wiki.qemu.org/Download"
exit 1
}
extract_qemu () {
tar xvfJ $QEMU_VERSION.tar.xz
}
build_qemu () {
echo "### build-qemu #####"
mkdir -p build-qemu
cd build-qemu
../$QEMU_VERSION/configure --enable-debug --disable-kvm --enable-curses --prefix=$INSTALL_PATH --target-list=$TARGET --disable-vnc
make $PARALLEL_MAKE
make install
cd ..
}
## MAIN ##
#download_qemu
extract_qemu
build_qemu
trap - EXIT
echo 'Success!'
|
munix2/munix
|
tools/build-qemu.sh
|
Shell
|
gpl-3.0
| 1,280 |
#!/bin/sh
# Test "rm -i".
# Copyright (C) 1997-2017 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ rm
t=t
mkdir -p $t || framework_failure_
echo > $t/a || framework_failure_
test -f $t/a || framework_failure_
echo y > $t/in-y
echo n > $t/in-n
rm -i $t/a < $t/in-n > /dev/null 2>&1 || fail=1
# The file should not have been removed.
test -f $t/a || fail=1
rm -i $t/a < $t/in-y > /dev/null 2>&1 || fail=1
# The file should have been removed this time.
test -f $t/a && fail=1
rm -rf $t
Exit $fail
|
adtools/coreutils
|
tests/rm/i-1.sh
|
Shell
|
gpl-3.0
| 1,181 |
#!/bin/bash
source /etc/profile
# CloudScape Agent
#
# This is a server designed to handle system polling for a managed CloudScape
# host. Typically used to submit periodic reports of system data.
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #
# Init Info \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #
#
# Provides: cloudscape-agent
# Default-Start: 3 4 5
# Short-Description: CloudScape Agent
# Description: Service designed to submit polling data to the API server
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #
# Chkconfig Info \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #
#
# cloudscape-agent: CloudScape Agent
# chkconfig: 345 97 03
# description: Service designed to submit polling data to the API server
# processname: cloudscape-agent
# pidfile: /opt/clousdscape/run/cloudscape-agent.pid
# Run the agent handler
python /usr/bin/cloudscape-agent "$@"
|
djtaylor/cloudscape
|
_local/etc/init.d/agent.centos.sh
|
Shell
|
gpl-3.0
| 997 |
#!/bin/bash
# Author: Ryan Young
# Description: Makes all binaries in $1 and places them into a folder $2 of your choosing as specified by the first argument. Got kind of tired of encumbersome GUI and
# dragging all compiled binaries into the right location.
#
# $3 is an optional argument indicating whether or not we ought to clean the folder
# $4-$n can be names of specific modules to compile, but if not provided,
# it assumes you want the entire repo compiled.
#
# If you want it to automatically open up the freshly compiled trodes repo after finishing,
# type 'export RUNTRODES=1' before executing this tool. After all make instances stop, your
# freshly copmiled trodes opens.
#
# I wrote this because I wanted a one button push experience when making changes to
# Trodes or modules, and seeing the resulting change in action.
function executeCompile()
{
_numberOfJobs=8
# HANDLE THE ARGUMENTS
if [[ $2 =~ "*qmake" ]]
then
echo executeCompile: First argument to execute compile wrong
return 1
fi
if [[ $3 =~ "*qmake" ]]
then
echo executeCompile: Second argument to execute compile wrong
return 2
fi
_qmake=$1;
# Acquire the binary name
binary_name=$(echo $2 | grep -o '[A-Za-z0-9]*$')
if [ $binary_name = "Source" ]
then
binary_name=Trodes
fi
if [ $binary_name = "exportPHY" ]
then
binary_name=exportphy
fi
# Move into the folder, generate makefile, and compile
cd $2; #echo executeCompile: running $_qmake in $(pwd) with ${binary_name}...; sleep 10;
$_qmake
mkdir $3/MakeLog 2> /dev/null
make -j${_numberOfJobs} 1> $3/MakeLog/${binary_name}.stdout.makelog 2> $3/MakeLog/${binary_name}.stderr.makelog
# Move binary into desired folder
mv $binary_name $3
return 0
}
#################
# Parsing arguments 4-end ...
# should we specifically only compile some subset of programs
#################
gitfolder=$1
targetfolder=$2
clean=$3
targetlist=""
greplist=""
while [[ -n $4 ]]
do
targetlist="$4 ${targetlist}"
greplist="-e $4 ${greplist}"
shift
done
if [[ -n $targetlist ]]
then
echo "Programs targeted for compilation -> " $targetlist
else
echo "All programs targeted for compilation"
fi
sleep 1
################
# Should we clean before building?
################
if [[ -n $clean ]] && [[ $clean == 1 ]]
then
# If user requests we clean directories before compiling, remove all compiled objects, moc, obj, and generated Make files, then we do that here
echo Cleaning before we start ..; sleep 1
find $gitfolder -name moc -type d -exec rm -rf {} \; 2> /dev/null
find $gitfolder -name obj -type d -exec rm -rf {} \; 2> /dev/null
find $gitfolder -name *.o -type f -exec rm -rf {} \; 2> /dev/null
find $gitfolder -name Makefile -type f -exec rm -rf {} \;
else
echo Not cleaning out makefile and object files before starting ..; sleep 1
fi
export _qmake="/opt/Qt/5.4/gcc_64/bin/qmake"
currdir=$(pwd)
#################
# Compile Modules
#################
echo
echo ============================================================================
echo Compiling modules!;
ListOfModules="cameraModule FSGui FSGui/FSData RewardGui stateScript stateScript/stateScriptEditor";sleep 2;
echo ListOfModules = $ListOfModules;
for m in $ListOfModules
do
if [[ -z $targetlist ]] || [[ -n $(echo $m | grep $greplist) ]]
then
echo MakeTrodeBinaries: Making $m in "${gitfolder}/Modules/${m}" ...
executeCompile $_qmake "${gitfolder}/Modules/${m}" ${targetfolder} &
fi
done;
#################
# Compile Export Functions
#################
echo
echo ============================================================================
echo Compiling exports!;
ListOfExports="exportdio exportLFP exportPHY exportspikes exporttime exportanalog";sleep 2;
echo ListOfExports = $ListOfExports;
for i in $ListOfExports
do
if [[ -z $targetlist ]] || [[ -n $(echo $i | grep $greplist) ]]
then
echo MakeTrodeBinaries: Making $i in ${gitfolder}/Export/$i ${targetfolder} ...
executeCompile $_qmake "${gitfolder}/Export/${i}" ${targetfolder} &
fi
done;
#################
# Compile Trodes
#################
echo ============================================================================
echo Compiling Trodes main!; sleep 2;
if [[ -z $targetlist ]] || [[ -n $(echo "Trodes" | grep $greplist) ]]
then
echo Making Trodes ...
executeCompile $_qmake "${gitfolder}/Source" ${targetfolder} &
fi
cd $currdir
##################
# Run completed instance if proper env variable set
###################
if [[ -n $RUNTRODES ]] && ( [[ $RUNTRODES == "1" ]] || [[ $RUNTRODES == "true" ]] )
then
makeprocesses=$(ps -A | grep make)
while [[ -n $makeprocesses ]]
do
clear
echo "Waiting for make to complete"
makeprocesses=$(ps -A | grep make)
done
cd ${targetfolder}
./Trodes.sh
cd ${currdir}
fi
|
JadhavLab/TrodesDataProcess
|
OtherTools/MakeTrodeBinaries.sh
|
Shell
|
gpl-3.0
| 4,770 |
# shellcheck shell=sh
# Compatible with ranger 1.5.3 through 1.9.*
#
# Change the prompt when you open a shell from inside ranger
#
# Source this file from your shell startup file (.bashrc, .zshrc etc) for it to
# work.
[ -n "$RANGER_LEVEL" ] && PS1="$PS1"'(in ranger) '
|
ranger/ranger
|
examples/shell_subshell_notice.sh
|
Shell
|
gpl-3.0
| 273 |
#!/bin/bash
source ./../setup.bash
strCategory="node"
strName="5-form"
strFile="5-form.js"
wget -q -O /dev/stdout --auth-no-challenge --user="${strUser}" --password="${strPassword}" --post-file="${strFile}" "${strServer}/test?strCategory=${strCategory}&strAssignment=${strName}"
|
Gabib32/webdev
|
node/5-form.bash
|
Shell
|
gpl-3.0
| 281 |
#!/bin/bash
set -e -x
cd $(dirname $0)/../..
make -f Makefile.dune coq coqide-server
|
Matafou/coq
|
dev/ci/azure-build.sh
|
Shell
|
lgpl-2.1
| 88 |
#!/bin/bash
# Needs help!!
# Populate Apache configuration.
# You will need to enable the sites after running this script.
HOME_DIR=/home/ubuntu
TUKEY_MIDDLEWARE=$HOME_DIR/tukey-middleware
#TUKEY_SITE=$HOME_DIR/tukey-site
APACHE_SITES_AVAILABLE=/etc/apache2/sites-available
for site_name in auth glance nova
do
sudo ln -s $TUKEY_MIDDLEWARE/bin/${site_name}-apache.conf $APACHE_SITES_AVAILABLE/${site_name}
done
# This action will be performed by the tukey-portal install.sh
#ln -s $TUKEY_SITE/tukey/openstack-dashboard.conf $APACHE_SITES_AVAILABLE/openstack-dashboard.conf
|
opencloudconsortium/tukey-middleware
|
tools/create_apache.sh
|
Shell
|
apache-2.0
| 581 |
#!/bin/bash
echo "FILES TO MERGE : $*" 1>&2
cat $* | sort -k2 | awk -F\ '{a[$2]+=$1} END {for (i in a) print i" "a[i]}' | sort
echo "" > wrapper.error
|
swift-lang/swift-k
|
tests/aimes_testing/wordcount/merge.sh
|
Shell
|
apache-2.0
| 155 |
#
# Set runtime for kernel.dmesg_restrict
#
sysctl -q -n -w kernel.dmesg_restrict=1
#
# If kernel.dmesg_restrict present in /etc/sysctl.conf, change value to "1"
# else, add "kernel.dmesg_restrict = 1" to /etc/sysctl.conf
#
if grep --silent ^kernel.dmesg_restrict /etc/sysctl.conf ; then
sed -i 's/^kernel.dmesg_restrict.*/kernel.dmesg_restrict = 1/g' /etc/sysctl.conf
else
echo -e "\n# Set kernel.dmesg_restrict to 1 per security requirements" >> /etc/sysctl.conf
echo "kernel.dmesg_restrict = 1" >> /etc/sysctl.conf
fi
|
rprevette/clip
|
packages/scap-security-guide/scap-security-guide-0.1.25/RHEL/7/input/fixes/bash/sysctl_kernel_dmesg_restrict.sh
|
Shell
|
apache-2.0
| 525 |
#!/bin/bash
sudo dtruss -f -t open "/Applications/Adobe\ Reader.app/Contents/MacOS/AdobeReader" $1 > dtruss-adobereader.log 2>&1
|
openpreserve/scape
|
pc-cc-strender/osx-dtruss/dtruss-AdobeReader.sh
|
Shell
|
apache-2.0
| 130 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# This command builds and runs a local kubernetes cluster.
# You may need to run this as root to allow kubelet to open docker's socket,
# and to write the test CA in /var/run/kubernetes.
DOCKER_OPTS=${DOCKER_OPTS:-""}
DOCKER=(docker ${DOCKER_OPTS})
DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""}
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
ALLOW_SECURITY_CONTEXT=${ALLOW_SECURITY_CONTEXT:-""}
PSP_ADMISSION=${PSP_ADMISSION:-""}
RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
KUBELET_AUTHORIZATION_WEBHOOK=${KUBELET_AUTHORIZATION_WEBHOOK:-""}
KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
POD_MANIFEST_PATH=${POD_MANIFEST_PATH:-"/var/run/kubernetes/static-pods"}
KUBELET_FLAGS=${KUBELET_FLAGS:-""}
# Name of the network plugin, eg: "kubenet"
NET_PLUGIN=${NET_PLUGIN:-""}
# Place the binaries required by NET_PLUGIN in this directory, eg: "/home/kubernetes/bin".
NET_PLUGIN_DIR=${NET_PLUGIN_DIR:-""}
SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
FIRST_SERVICE_CLUSTER_IP=${FIRST_SERVICE_CLUSTER_IP:-10.0.0.1}
# if enabled, must set CGROUP_ROOT
CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-true}
# name of the cgroup driver, i.e. cgroupfs or systemd
CGROUP_DRIVER=${CGROUP_DRIVER:-""}
# owner of client certs, default to current user if not specified
USER=${USER:-$(whoami)}
# enables testing eviction scenarios locally.
EVICTION_HARD=${EVICTION_HARD:-"memory.available<100Mi"}
EVICTION_SOFT=${EVICTION_SOFT:-""}
EVICTION_PRESSURE_TRANSITION_PERIOD=${EVICTION_PRESSURE_TRANSITION_PERIOD:-"1m"}
# We disable cluster DNS by default because this script uses docker0 (or whatever
# container bridge docker is currently using) and we don't know the IP of the
# DNS pod to pass in as --cluster-dns. To set this up by hand, set this flag
# and change DNS_SERVER_IP to the appropriate IP.
ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-false}
DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10}
DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
KUBECTL=${KUBECTL:-cluster/kubectl.sh}
WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-10}
ENABLE_DAEMON=${ENABLE_DAEMON:-false}
HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"}
CLOUD_PROVIDER=${CLOUD_PROVIDER:-""}
CLOUD_CONFIG=${CLOUD_CONFIG:-""}
FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=true"}
STORAGE_BACKEND=${STORAGE_BACKEND:-"etcd3"}
# enable swagger ui
ENABLE_SWAGGER_UI=${ENABLE_SWAGGER_UI:-false}
# enable audit log
ENABLE_APISERVER_BASIC_AUDIT=${ENABLE_APISERVER_BASIC_AUDIT:-false}
# RBAC Mode options
ALLOW_ANY_TOKEN=${ALLOW_ANY_TOKEN:-false}
ENABLE_RBAC=${ENABLE_RBAC:-false}
KUBECONFIG_TOKEN=${KUBECONFIG_TOKEN:-""}
AUTH_ARGS=${AUTH_ARGS:-""}
# Install a default storage class (enabled by default)
DEFAULT_STORAGE_CLASS=${KUBE_DEFAULT_STORAGE_CLASS:-true}
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
export KUBE_CACHE_MUTATION_DETECTOR
ADMISSION_CONTROL_CONFIG_FILE=${ADMISSION_CONTROL_CONFIG_FILE:-""}
# START_MODE can be 'all', 'kubeletonly', or 'nokubelet'
START_MODE=${START_MODE:-"all"}
# sanity check for OpenStack provider
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
if [ "${CLOUD_CONFIG}" == "" ]; then
echo "Missing CLOUD_CONFIG env for OpenStack provider!"
exit 1
fi
if [ ! -f "${CLOUD_CONFIG}" ]; then
echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
exit 1
fi
fi
if [ "$(id -u)" != "0" ]; then
echo "WARNING : This script MAY be run as root for docker socket / iptables functionality; if failures occur, retry as root." 2>&1
fi
# Stop right away if the build fails
set -e
source "${KUBE_ROOT}/hack/lib/init.sh"
function usage {
echo "This script starts a local kube cluster. "
echo "Example 0: hack/local-up-cluster.sh -h (this 'help' usage description)"
echo "Example 1: hack/local-up-cluster.sh -o _output/dockerized/bin/linux/amd64/ (run from docker output)"
echo "Example 2: hack/local-up-cluster.sh -O (auto-guess the bin path for your platform)"
echo "Example 3: hack/local-up-cluster.sh (build a local copy of the source)"
}
# This function guesses where the existing cached binary build is for the `-O`
# flag
function guess_built_binary_path {
local hyperkube_path=$(kube::util::find-binary "hyperkube")
if [[ -z "${hyperkube_path}" ]]; then
return
fi
echo -n "$(dirname "${hyperkube_path}")"
}
### Allow user to supply the source directory.
GO_OUT=${GO_OUT:-}
while getopts "ho:O" OPTION
do
case $OPTION in
o)
echo "skipping build"
GO_OUT="$OPTARG"
echo "using source $GO_OUT"
;;
O)
GO_OUT=$(guess_built_binary_path)
if [ $GO_OUT == "" ]; then
echo "Could not guess the correct output directory to use."
exit 1
fi
;;
h)
usage
exit
;;
?)
usage
exit
;;
esac
done
if [ "x$GO_OUT" == "x" ]; then
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl cmd/hyperkube"
else
echo "skipped the build."
fi
function test_rkt {
if [[ -n "${RKT_PATH}" ]]; then
${RKT_PATH} list 2> /dev/null 1> /dev/null
if [ "$?" != "0" ]; then
echo "Failed to successfully run 'rkt list', please verify that ${RKT_PATH} is the path of rkt binary."
exit 1
fi
else
rkt list 2> /dev/null 1> /dev/null
if [ "$?" != "0" ]; then
echo "Failed to successfully run 'rkt list', please verify that rkt is in \$PATH."
exit 1
fi
fi
}
# Shut down anyway if there's an error.
set +e
API_PORT=${API_PORT:-8080}
API_SECURE_PORT=${API_SECURE_PORT:-6443}
API_HOST=${API_HOST:-localhost}
API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"}
KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
# By default only allow CORS for requests on localhost
API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
KUBELET_PORT=${KUBELET_PORT:-10250}
LOG_LEVEL=${LOG_LEVEL:-3}
LOG_DIR=${LOG_DIR:-"/tmp"}
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
CONTAINER_RUNTIME_ENDPOINT=${CONTAINER_RUNTIME_ENDPOINT:-""}
IMAGE_SERVICE_ENDPOINT=${IMAGE_SERVICE_ENDPOINT:-""}
ENABLE_CRI=${ENABLE_CRI:-"true"}
RKT_PATH=${RKT_PATH:-""}
RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""}
CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
CPU_CFS_QUOTA=${CPU_CFS_QUOTA:-true}
ENABLE_HOSTPATH_PROVISIONER=${ENABLE_HOSTPATH_PROVISIONER:-"false"}
CLAIM_BINDER_SYNC_PERIOD=${CLAIM_BINDER_SYNC_PERIOD:-"15s"} # current k8s default
ENABLE_CONTROLLER_ATTACH_DETACH=${ENABLE_CONTROLLER_ATTACH_DETACH:-"true"} # current default
# This is the default dir and filename where the apiserver will generate a self-signed cert
# which should be able to be used as the CA to verify itself
CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
ROOT_CA_FILE=${CERT_DIR}/server-ca.crt
# name of the cgroup driver, i.e. cgroupfs or systemd
if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then
# default cgroup driver to match what is reported by docker to simplify local development
if [[ -z ${CGROUP_DRIVER} ]]; then
# match driver with docker runtime reported value (they must match)
CGROUP_DRIVER=$(docker info | grep "Cgroup Driver:" | cut -f3- -d' ')
echo "Kubelet cgroup driver defaulted to use: ${CGROUP_DRIVER}"
fi
fi
# Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
function test_apiserver_off {
# For the common local scenario, fail fast if server is already running.
# this can happen if you run local-up-cluster.sh twice and kill etcd in between.
if [[ "${API_PORT}" -gt "0" ]]; then
curl --silent -g $API_HOST:$API_PORT
if [ ! $? -eq 0 ]; then
echo "API SERVER insecure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on $API_HOST is serving already on $API_PORT"
exit 1
fi
fi
curl --silent -k -g $API_HOST:$API_SECURE_PORT
if [ ! $? -eq 0 ]; then
echo "API SERVER secure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on $API_HOST is serving already on $API_SECURE_PORT"
exit 1
fi
}
function detect_binary {
# Detect the OS name/arch so that we can find our binary
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
aarch64*)
host_arch=arm64
;;
arm64*)
host_arch=arm64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
echo "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." >&2
exit 1
;;
esac
GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
}
cleanup_dockerized_kubelet()
{
if [[ -e $KUBELET_CIDFILE ]]; then
docker kill $(<$KUBELET_CIDFILE) > /dev/null
rm -f $KUBELET_CIDFILE
fi
}
cleanup()
{
echo "Cleaning up..."
# delete running images
# if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
# Still need to figure why this commands throw an error: Error from server: client: etcd cluster is unavailable or misconfigured
# ${KUBECTL} --namespace=kube-system delete service kube-dns
# And this one hang forever:
# ${KUBECTL} --namespace=kube-system delete rc kube-dns-v10
# fi
# Check if the API server is still running
[[ -n "${APISERVER_PID-}" ]] && APISERVER_PIDS=$(pgrep -P ${APISERVER_PID} ; ps -o pid= -p ${APISERVER_PID})
[[ -n "${APISERVER_PIDS-}" ]] && sudo kill ${APISERVER_PIDS}
# Check if the controller-manager is still running
[[ -n "${CTLRMGR_PID-}" ]] && CTLRMGR_PIDS=$(pgrep -P ${CTLRMGR_PID} ; ps -o pid= -p ${CTLRMGR_PID})
[[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill ${CTLRMGR_PIDS}
if [[ -n "$DOCKERIZE_KUBELET" ]]; then
cleanup_dockerized_kubelet
else
# Check if the kubelet is still running
[[ -n "${KUBELET_PID-}" ]] && KUBELET_PIDS=$(pgrep -P ${KUBELET_PID} ; ps -o pid= -p ${KUBELET_PID})
[[ -n "${KUBELET_PIDS-}" ]] && sudo kill ${KUBELET_PIDS}
fi
# Check if the proxy is still running
[[ -n "${PROXY_PID-}" ]] && PROXY_PIDS=$(pgrep -P ${PROXY_PID} ; ps -o pid= -p ${PROXY_PID})
[[ -n "${PROXY_PIDS-}" ]] && sudo kill ${PROXY_PIDS}
# Check if the scheduler is still running
[[ -n "${SCHEDULER_PID-}" ]] && SCHEDULER_PIDS=$(pgrep -P ${SCHEDULER_PID} ; ps -o pid= -p ${SCHEDULER_PID})
[[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill ${SCHEDULER_PIDS}
# Check if the etcd is still running
[[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
[[ -n "${ETCD_DIR-}" ]] && kube::etcd::clean_etcd_dir
exit 0
}
function warning {
message=$1
echo $(tput bold)$(tput setaf 1)
echo "WARNING: ${message}"
echo $(tput sgr0)
}
function start_etcd {
echo "Starting etcd"
kube::etcd::start
}
function set_service_accounts {
SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-false}
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-/tmp/kube-serviceaccount.key}
# Generate ServiceAccount key if needed
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
fi
}
function start_apiserver {
security_admission=""
if [[ -z "${ALLOW_SECURITY_CONTEXT}" ]]; then
security_admission=",SecurityContextDeny"
fi
if [[ -n "${PSP_ADMISSION}" ]]; then
security_admission=",PodSecurityPolicy"
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount${security_admission},ResourceQuota,DefaultStorageClass,DefaultTolerationSeconds
# This is the default dir and filename where the apiserver will generate a self-signed cert
# which should be able to be used as the CA to verify itself
audit_arg=""
APISERVER_BASIC_AUDIT_LOG=""
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" = true ]]; then
# We currently only support enabling with a fixed path and with built-in log
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
# External log rotation should be set up the same as for kube-apiserver.log.
APISERVER_BASIC_AUDIT_LOG=/tmp/kube-apiserver-audit.log
audit_arg=" --audit-log-path=${APISERVER_BASIC_AUDIT_LOG}"
audit_arg+=" --audit-log-maxage=0"
audit_arg+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
audit_arg+=" --audit-log-maxsize=2000000000"
fi
swagger_arg=""
if [[ "${ENABLE_SWAGGER_UI}" = true ]]; then
swagger_arg="--enable-swagger-ui=true "
fi
anytoken_arg=""
if [[ "${ALLOW_ANY_TOKEN}" = true ]]; then
anytoken_arg="--insecure-allow-any-token "
KUBECONFIG_TOKEN="${KUBECONFIG_TOKEN:-system:admin/system:masters}"
fi
authorizer_arg=""
if [[ "${ENABLE_RBAC}" = true ]]; then
authorizer_arg="--authorization-mode=RBAC "
fi
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged "
fi
runtime_config=""
if [[ -n "${RUNTIME_CONFIG}" ]]; then
runtime_config="--runtime-config=${RUNTIME_CONFIG}"
fi
# Let the API server pick a default address when API_HOST_IP
# is set to 127.0.0.1
advertise_address=""
if [[ "${API_HOST_IP}" != "127.0.0.1" ]]; then
advertise_address="--advertise_address=${API_HOST_IP}"
fi
# Create CA signers
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"'
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
# Create auth proxy client ca
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
# serving cert for kube-apiserver
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" ${API_HOST_IP} ${API_HOST} ${FIRST_SERVICE_CLUSTER_IP}
# Create client certs signed with client-ca, given id, given CN and a number of groups
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
# Create matching certificates for kube-aggregator
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" ${API_HOST_IP}
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
# TODO remove masters and add rolebinding
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator
APISERVER_LOG=${LOG_DIR}/kube-apiserver.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${swagger_arg} ${audit_arg} ${anytoken_arg} ${authorizer_arg} ${priv_arg} ${runtime_config}\
${advertise_address} \
--v=${LOG_LEVEL} \
--cert-dir="${CERT_DIR}" \
--client-ca-file="${CERT_DIR}/client-ca.crt" \
--service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
--service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
--admission-control="${ADMISSION_CONTROL}" \
--admission-control-config-file="${ADMISSION_CONTROL_CONFIG_FILE}" \
--bind-address="${API_BIND_ADDR}" \
--secure-port="${API_SECURE_PORT}" \
--tls-cert-file="${CERT_DIR}/serving-kube-apiserver.crt" \
--tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
--tls-ca-file="${CERT_DIR}/server-ca.crt" \
--insecure-bind-address="${API_HOST_IP}" \
--insecure-port="${API_PORT}" \
--storage-backend=${STORAGE_BACKEND} \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
--feature-gates="${FEATURE_GATES}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--requestheader-username-headers=X-Remote-User \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
--requestheader-allowed-names=system:auth-proxy \
--proxy-client-cert-file="${CERT_DIR}/client-auth-proxy.crt" \
--proxy-client-key-file="${CERT_DIR}/client-auth-proxy.key" \
--cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
APISERVER_PID=$!
# Wait for kube-apiserver to come up before launching the rest of the components.
echo "Waiting for apiserver to come up"
# this uses the API port because if you don't have any authenticator, you can't seem to use the secure port at all.
# this matches what happened with the combination in 1.4.
# TODO change this conditionally based on whether API_PORT is on or off
kube::util::wait_for_url "http://${API_HOST_IP}:${API_SECURE_PORT}/healthz" "apiserver: " 1 ${WAIT_FOR_URL_API_SERVER} \
|| { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
# Create kubeconfigs for all components, using client certs
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" admin
${CONTROLPLANE_SUDO} chown "${USER}" "${CERT_DIR}/client-admin.key" # make readable for kubectl
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" controller
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" scheduler
if [[ -z "${AUTH_ARGS}" ]]; then
if [[ "${ALLOW_ANY_TOKEN}" = true ]]; then
# use token authentication
if [[ -n "${KUBECONFIG_TOKEN}" ]]; then
AUTH_ARGS="--token=${KUBECONFIG_TOKEN}"
else
AUTH_ARGS="--token=system:admin/system:masters"
fi
else
# default to the admin client cert/key
AUTH_ARGS="--client-key=${CERT_DIR}/client-admin.key --client-certificate=${CERT_DIR}/client-admin.crt"
fi
fi
${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${CONTROLPLANE_SUDO} chown $(whoami) "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator.kubeconfig" --server="https://${API_HOST_IP}:31090"
echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator.kubeconfig' to use the aggregated API server"
}
function start_controller_manager {
node_cidr_args=""
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 "
fi
CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" controller-manager \
--v=${LOG_LEVEL} \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
${node_cidr_args} \
--pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
--feature-gates="${FEATURE_GATES}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--kubeconfig "$CERT_DIR"/controller.kubeconfig \
--use-service-account-credentials \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
CTLRMGR_PID=$!
}
function start_kubelet {
KUBELET_LOG=${LOG_DIR}/kubelet.log
mkdir -p ${POD_MANIFEST_PATH} || true
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged "
fi
mkdir -p /var/lib/kubelet
if [[ -z "${DOCKERIZE_KUBELET}" ]]; then
# Enable dns
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
dns_args="--cluster-dns=${DNS_SERVER_IP} --cluster-domain=${DNS_DOMAIN}"
else
# To start a private DNS server set ENABLE_CLUSTER_DNS and
# DNS_SERVER_IP/DOMAIN. This will at least provide a working
# DNS server for real world hostnames.
dns_args="--cluster-dns=8.8.8.8"
fi
net_plugin_args=""
if [[ -n "${NET_PLUGIN}" ]]; then
net_plugin_args="--network-plugin=${NET_PLUGIN}"
fi
auth_args=""
if [[ -n "${KUBELET_AUTHORIZATION_WEBHOOK:-}" ]]; then
auth_args="${auth_args} --authorization-mode=Webhook"
fi
if [[ -n "${KUBELET_AUTHENTICATION_WEBHOOK:-}" ]]; then
auth_args="${auth_args} --authentication-token-webhook"
fi
if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
auth_args="${auth_args} --client-ca-file=${CLIENT_CA_FILE}"
fi
net_plugin_dir_args=""
if [[ -n "${NET_PLUGIN_DIR}" ]]; then
net_plugin_dir_args="--network-plugin-dir=${NET_PLUGIN_DIR}"
fi
container_runtime_endpoint_args=""
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
container_runtime_endpoint_args="--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
fi
image_service_endpoint_args=""
if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
image_service_endpoint_args="--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}"
fi
sudo -E "${GO_OUT}/hyperkube" kubelet ${priv_arg}\
--enable-cri="${ENABLE_CRI}" \
--v=${LOG_LEVEL} \
--chaos-chance="${CHAOS_CHANCE}" \
--container-runtime="${CONTAINER_RUNTIME}" \
--rkt-path="${RKT_PATH}" \
--rkt-stage1-image="${RKT_STAGE1_IMAGE}" \
--hostname-override="${HOSTNAME_OVERRIDE}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--address="${KUBELET_HOST}" \
--require-kubeconfig \
--kubeconfig "$CERT_DIR"/kubelet.kubeconfig \
--feature-gates="${FEATURE_GATES}" \
--cpu-cfs-quota=${CPU_CFS_QUOTA} \
--enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" \
--cgroups-per-qos=${CGROUPS_PER_QOS} \
--cgroup-driver=${CGROUP_DRIVER} \
--keep-terminated-pod-volumes=true \
--eviction-hard=${EVICTION_HARD} \
--eviction-soft=${EVICTION_SOFT} \
--eviction-pressure-transition-period=${EVICTION_PRESSURE_TRANSITION_PERIOD} \
--pod-manifest-path="${POD_MANIFEST_PATH}" \
${auth_args} \
${dns_args} \
${net_plugin_dir_args} \
${net_plugin_args} \
${container_runtime_endpoint_args} \
${image_service_endpoint_args} \
--port="$KUBELET_PORT" \
${KUBELET_FLAGS} >"${KUBELET_LOG}" 2>&1 &
KUBELET_PID=$!
# Quick check that kubelet is running.
if ps -p $KUBELET_PID > /dev/null ; then
echo "kubelet ( $KUBELET_PID ) is running."
else
cat ${KUBELET_LOG} ; exit 1
fi
else
# Docker won't run a container with a cidfile (container id file)
# unless that file does not already exist; clean up an existing
# dockerized kubelet that might be running.
cleanup_dockerized_kubelet
cred_bind=""
# path to cloud credentials.
cloud_cred=""
if [ "${CLOUD_PROVIDER}" == "aws" ]; then
cloud_cred="${HOME}/.aws/credentials"
fi
if [ "${CLOUD_PROVIDER}" == "gce" ]; then
cloud_cred="${HOME}/.config/gcloud"
fi
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
cloud_cred="${CLOUD_CONFIG}"
fi
if [[ -n "${cloud_cred}" ]]; then
cred_bind="--volume=${cloud_cred}:${cloud_cred}:ro"
fi
docker run \
--volume=/:/rootfs:ro \
--volume=/var/run:/var/run:rw \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
--volume=/dev:/dev \
${cred_bind} \
--net=host \
--privileged=true \
-i \
--cidfile=$KUBELET_CIDFILE \
gcr.io/google_containers/kubelet \
/kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="127.0.0.1" --require-kubeconfig --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --api-servers="https://${API_HOST}:${API_SECURE_PORT}" --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG &
fi
}
function start_kubeproxy {
PROXY_LOG=${LOG_DIR}/kube-proxy.log
sudo "${GO_OUT}/hyperkube" proxy \
--v=${LOG_LEVEL} \
--hostname-override="${HOSTNAME_OVERRIDE}" \
--feature-gates="${FEATURE_GATES}" \
--kubeconfig "$CERT_DIR"/kube-proxy.kubeconfig \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${PROXY_LOG}" 2>&1 &
PROXY_PID=$!
SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" scheduler \
--v=${LOG_LEVEL} \
--kubeconfig "$CERT_DIR"/scheduler.kubeconfig \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${SCHEDULER_LOG}" 2>&1 &
SCHEDULER_PID=$!
}
function start_kubedns {
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
echo "Creating kube-system namespace"
sed -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" "${KUBE_ROOT}/cluster/addons/dns/kubedns-controller.yaml.in" >| kubedns-deployment.yaml
if [[ "${FEDERATION:-}" == "true" ]]; then
FEDERATIONS_DOMAIN_MAP="${FEDERATIONS_DOMAIN_MAP:-}"
if [[ -z "${FEDERATIONS_DOMAIN_MAP}" && -n "${FEDERATION_NAME:-}" && -n "${DNS_ZONE_NAME:-}" ]]; then
FEDERATIONS_DOMAIN_MAP="${FEDERATION_NAME}=${DNS_ZONE_NAME}"
fi
if [[ -n "${FEDERATIONS_DOMAIN_MAP}" ]]; then
sed -i -e "s/{{ pillar\['federations_domain_map'\] }}/- --federations=${FEDERATIONS_DOMAIN_MAP}/g" kubedns-deployment.yaml
else
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" kubedns-deployment.yaml
fi
else
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" kubedns-deployment.yaml
fi
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/kubedns-svc.yaml.in" >| kubedns-svc.yaml
# TODO update to dns role once we have one.
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create clusterrolebinding system:kube-dns --clusterrole=cluster-admin --serviceaccount=kube-system:default
# use kubectl to create kubedns deployment and service
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f ${KUBE_ROOT}/cluster/addons/dns/kubedns-sa.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f ${KUBE_ROOT}/cluster/addons/dns/kubedns-cm.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kubedns-deployment.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kubedns-svc.yaml
echo "Kube-dns deployment and service successfully deployed."
rm kubedns-deployment.yaml kubedns-svc.yaml
fi
}
function create_psp_policy {
echo "Create podsecuritypolicy policies for RBAC."
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml
}
function create_storage_class {
if [ -z "$CLOUD_PROVIDER" ]; then
# No cloud provider -> no default storage class
return
fi
CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/${CLOUD_PROVIDER}/default.yaml
if [ -e $CLASS_FILE ]; then
echo "Create default storage class for $CLOUD_PROVIDER"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f $CLASS_FILE
else
echo "No storage class available for $CLOUD_PROVIDER."
fi
}
function print_success {
if [[ "${START_MODE}" != "kubeletonly" ]]; then
cat <<EOF
Local Kubernetes cluster is running. Press Ctrl-C to shut it down.
Logs:
${APISERVER_LOG:-}
${CTLRMGR_LOG:-}
${PROXY_LOG:-}
${SCHEDULER_LOG:-}
EOF
fi
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" = true ]]; then
echo " ${APISERVER_BASIC_AUDIT_LOG}"
fi
if [[ "${START_MODE}" == "all" ]]; then
echo " ${KUBELET_LOG}"
elif [[ "${START_MODE}" == "nokubelet" ]]; then
echo
echo "No kubelet was started because you set START_MODE=nokubelet"
echo "Run this script again with START_MODE=kubeletonly to run a kubelet"
fi
if [[ "${START_MODE}" != "kubeletonly" ]]; then
echo
cat <<EOF
To start using your cluster, you can open up another terminal/tab and run:
export KUBECONFIG=${CERT_DIR}/admin.kubeconfig
cluster/kubectl.sh
Alternatively, you can write to the default kubeconfig:
export KUBERNETES_PROVIDER=local
cluster/kubectl.sh config set-cluster local --server=https://${API_HOST}:${API_SECURE_PORT} --certificate-authority=${ROOT_CA_FILE}
cluster/kubectl.sh config set-credentials myself ${AUTH_ARGS}
cluster/kubectl.sh config set-context local --cluster=local --user=myself
cluster/kubectl.sh config use-context local
cluster/kubectl.sh
EOF
else
cat <<EOF
The kubelet was started.
Logs:
${KUBELET_LOG}
EOF
fi
}
# validate that etcd is: not running, in path, and has minimum required version.
kube::etcd::validate
if [ "${CONTAINER_RUNTIME}" == "docker" ] && ! kube::util::ensure_docker_daemon_connectivity; then
exit 1
fi
if [[ "${CONTAINER_RUNTIME}" == "rkt" ]]; then
test_rkt
fi
if [[ "${START_MODE}" != "kubeletonly" ]]; then
test_apiserver_off
fi
kube::util::test_openssl_installed
kube::util::test_cfssl_installed
### IF the user didn't supply an output/ for the build... Then we detect.
if [ "$GO_OUT" == "" ]; then
detect_binary
fi
echo "Detected host and ready to start services. Doing some housekeeping first..."
echo "Using GO_OUT $GO_OUT"
KUBELET_CIDFILE=/tmp/kubelet.cid
if [[ "${ENABLE_DAEMON}" = false ]]; then
trap cleanup EXIT
fi
echo "Starting services now!"
if [[ "${START_MODE}" != "kubeletonly" ]]; then
start_etcd
set_service_accounts
start_apiserver
start_controller_manager
start_kubeproxy
start_kubedns
fi
if [[ "${START_MODE}" != "nokubelet" ]]; then
## TODO remove this check if/when kubelet is supported on darwin
# Detect the OS name/arch and display appropriate error.
case "$(uname -s)" in
Darwin)
warning "kubelet is not currently supported in darwin, kubelet aborted."
KUBELET_LOG=""
;;
Linux)
start_kubelet
;;
*)
warning "Unsupported host OS. Must be Linux or Mac OS X, kubelet aborted."
;;
esac
fi
if [[ -n "${PSP_ADMISSION}" && "${ENABLE_RBAC}" = true ]]; then
create_psp_policy
fi
if [[ "$DEFAULT_STORAGE_CLASS" = "true" ]]; then
create_storage_class
fi
print_success
if [[ "${ENABLE_DAEMON}" = false ]]; then
while true; do sleep 1; done
fi
|
haveatry/kubernetes
|
hack/local-up-cluster.sh
|
Shell
|
apache-2.0
| 34,144 |
ck replay experiment:demo-autotune-flags-susan-linux-best @replay_base_best.json --subpoint=$1
|
supriyantomaftuh/ck-autotuning
|
demo/autotuning-compiler-flags-susan-linux/replay_base_best.sh
|
Shell
|
bsd-3-clause
| 95 |
# load tpch data into db2
# load_tpch.sh <database> <datadir>
db2 connect to $1
for tab in customer lineitem nation orders partsupp part region supplier; do
echo $tab;
db2 "load from $2/$tab.tbl of del modified by coldel| insert into $tab";
done
|
ulricha/dsh-tpc-h
|
scripts/db2/load_tpch.sh
|
Shell
|
bsd-3-clause
| 263 |
#!/bin/sh
set -e
UNSIGNED=$1
SIGNATURE=$2
ARCH=x86_64
ROOTDIR=dist
BUNDLE=${ROOTDIR}/Dash-Qt.app
TEMPDIR=signed.temp
OUTDIR=signed-app
if [ -z "$UNSIGNED" ]; then
echo "usage: $0 <unsigned app> <signature>"
exit 1
fi
if [ -z "$SIGNATURE" ]; then
echo "usage: $0 <unsigned app> <signature>"
exit 1
fi
rm -rf ${TEMPDIR} && mkdir -p ${TEMPDIR}
tar -C ${TEMPDIR} -xf ${UNSIGNED}
tar -C ${TEMPDIR} -xf ${SIGNATURE}
if [ -z "${PAGESTUFF}" ]; then
PAGESTUFF=${TEMPDIR}/pagestuff
fi
if [ -z "${CODESIGN_ALLOCATE}" ]; then
CODESIGN_ALLOCATE=${TEMPDIR}/codesign_allocate
fi
for i in `find ${TEMPDIR} -name "*.sign"`; do
SIZE=`stat -c %s ${i}`
TARGET_FILE=`echo ${i} | sed 's/\.sign$//'`
echo "Allocating space for the signature of size ${SIZE} in ${TARGET_FILE}"
${CODESIGN_ALLOCATE} -i ${TARGET_FILE} -a ${ARCH} ${SIZE} -o ${i}.tmp
OFFSET=`${PAGESTUFF} ${i}.tmp -p | tail -2 | grep offset | sed 's/[^0-9]*//g'`
if [ -z ${QUIET} ]; then
echo "Attaching signature at offset ${OFFSET}"
fi
dd if=$i of=${i}.tmp bs=1 seek=${OFFSET} count=${SIZE} 2>/dev/null
mv ${i}.tmp ${TARGET_FILE}
rm ${i}
echo "Success."
done
mv ${TEMPDIR}/${ROOTDIR} ${OUTDIR}
rm -rf ${TEMPDIR}
echo "Signed: ${OUTDIR}"
|
mytestcoin/mytestcoin
|
contrib/macdeploy/detached-sig-apply.sh
|
Shell
|
mit
| 1,226 |
#! /bin/sh
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TAP support:
# - log file creation
# - log file removal
# - stdout and stderr of a script go in its log file
# - TEST_SUITE_LOG redefinition, at either automake or make time
# - VERBOSE environment variable support
# Keep in sync with 'test-log.sh'.
. test-init.sh
cat > Makefile.am << 'END'
TESTS = pass.test skip.test xfail.test fail.test xpass.test error.test
TEST_SUITE_LOG = global.log
END
. tap-setup.sh
# Custom markers, for use in grepping checks.
cmarker=::: # comment marker
pmarker=%%% # plain maker
cat > pass.test <<END
#! /bin/sh -e
echo 1..1
echo "$pmarker pass $pmarker" >&2
echo "# $cmarker pass $cmarker" >&2
echo "ok 1"
END
cat > skip.test <<END
#! /bin/sh -e
echo 1..1
echo "$pmarker skip $pmarker"
echo "# $cmarker skip $cmarker"
echo "ok 1 # SKIP"
END
cat > xfail.test <<END
#! /bin/sh -e
echo 1..1
echo "$pmarker xfail $pmarker" >&2
echo "# $cmarker xfail $cmarker" >&2
echo "not ok 1 # TODO"
END
cat > fail.test <<END
#! /bin/sh -e
echo 1..1
echo "$pmarker fail $pmarker"
echo "# $cmarker fail $cmarker"
echo "not ok 1"
END
cat > xpass.test <<END
#! /bin/sh -e
echo 1..1
echo "$pmarker xpass $pmarker" >&2
echo "# $cmarker xpass $cmarker" >&2
echo "ok 1 # TODO"
END
cat > error.test <<END
#! /bin/sh -e
echo 1..1
echo "$pmarker error $pmarker"
echo "# $cmarker error $cmarker"
echo 'Bail out!'
END
chmod a+x *.test
run_make -e FAIL TEST_SUITE_LOG=my.log check
ls -l # For debugging.
test ! -e test-suite.log
test ! -e global.log
test -f my.log
st=0
for result in pass fail xfail xpass skip error; do
cat $result.log # For debugging.
$FGREP "$pmarker $result $pmarker" $result.log || st=1
$FGREP "$cmarker $result $cmarker" $result.log || st=1
done
test $st -eq 0 || exit 1
cat my.log # For debugging.
for result in xfail fail xpass skip error; do
cat $result.log # For debugging.
$FGREP "$pmarker $result $pmarker" my.log || st=1
$FGREP "$cmarker $result $cmarker" my.log || st=1
done
test $($FGREP -c "$pmarker" my.log) -eq 5
test $($FGREP -c "$cmarker" my.log) -eq 5
# Passed test scripts shouldn't be mentioned in the global log.
$EGREP '(^pass|[^x]pass)\.test' my.log && exit 1
# But failing (expectedly or not) and skipped ones should.
$FGREP 'xfail.test' my.log
$FGREP 'skip.test' my.log
$FGREP 'fail.test' my.log
$FGREP 'xpass.test' my.log
$FGREP 'error.test' my.log
touch error2.log test-suite.log global.log
run_make TEST_SUITE_LOG=my.log mostlyclean
ls -l # For debugging.
test ! -e my.log
test ! -e pass.log
test ! -e fail.log
test ! -e xfail.log
test ! -e xpass.log
test ! -e skip.log
test ! -e error.log
# "make mostlyclean" shouldn't remove unrelated log files.
test -f error2.log
test -f test-suite.log
test -f global.log
rm -f *.log
run_make -O -e FAIL check VERBOSE=yes
cat global.log
test ! -e my.log
test ! -e test-suite.log
# Check that VERBOSE causes the global testsuite log to be
# emitted on stdout.
out=$(cat stdout)
log=$(cat global.log)
case $out in *"$log"*) ;; *) exit 1;; esac
touch error2.log test-suite.log my.log
$MAKE clean
ls -l # For debugging.
test ! -e global.log
test ! -e pass.log
test ! -e fail.log
test ! -e xfail.log
test ! -e xpass.log
test ! -e skip.log
test ! -e error.log
# "make clean" shouldn't remove unrelated log files.
test -f error2.log
test -f test-suite.log
test -f my.log
rm -f *.log
:
|
kuym/openocd
|
tools/automake-1.15/t/tap-log.sh
|
Shell
|
gpl-2.0
| 4,007 |
#!/bin/sh
if [ "$#" -lt 1 ]
then
echo "Usage: $0 <version> [destination]"
exit 1
fi
APP="kanboard"
VERSION=$1
DESTINATION=$2
if [ -z "$2" ]
then
DESTINATION=~/Devel/websites/$APP
fi
echo "Build package for version $VERSION => $DESTINATION"
# Cleanup
rm -rf /tmp/$APP /tmp/$APP-*.zip 2>/dev/null
# Download source code
cd /tmp
git clone --depth 1 -q https://github.com/fguillot/$APP.git >/dev/null
# Install vendors
cd /tmp/$APP
composer --prefer-dist --no-dev --optimize-autoloader --quiet install
# Remove useless files
rm -rf data/*.sqlite \
.git \
.gitignore \
scripts \
tests \
Vagrantfile \
.*.yml \
README.markdown \
docs \
Dockerfile \
composer.* \
app.json
find ./vendor -name doc -type d -exec rm -rf {} +;
find ./vendor -name notes -type d -exec rm -rf {} +;
find ./vendor -name test -type d -exec rm -rf {} +;
find ./vendor -name tests -type d -exec rm -rf {} +;
find ./vendor -name composer.json -delete
find ./vendor -name phpunit.xml -delete
find ./vendor -name .travis.yml -delete
find ./vendor -name README.* -delete
find ./vendor -name .gitignore -delete
# Set the version number
sed -i.bak s/master/$VERSION/g app/constants.php && rm -f app/*.bak
# Make the archive
cd /tmp
zip -r $APP-$VERSION.zip $APP > /dev/null
mv $APP-$VERSION.zip $DESTINATION
cd $DESTINATION
# Make symlink for generic archive
if [ -L $APP-latest.zip ]
then
unlink $APP-latest.zip
ln -s $APP-$VERSION.zip $APP-latest.zip
fi
rm -rf /tmp/$APP 2>/dev/null
|
crwilliams/kanboard
|
scripts/make-archive.sh
|
Shell
|
agpl-3.0
| 1,556 |
#!/bin/bash
# Author: Fred Brooker <[email protected]>
# URL: http://fredbrooker.cz/
if [ $# -eq 0 ]
then
echo -e "\nConvert WAV files to MP3 recursively.\n\nSyntax: $(basename $0) <folder>\n"
exit 1
fi
if [ -n "$1" ]
then
if [ -d "$1" ]
then
cd "$1"
else
echo -e "Invalid folder: $1\n"
exit 1
fi
fi
which lame >/dev/null 2>&1
if [ $? -eq 1 ]
then
echo -e "Installing lame package...\n"
sudo apt-get install lame
fi
which lame >/dev/null 2>&1
if [ $? -eq 1 ]
then
echo -e "Lame is not installed!\n"
exit 1
fi
for i in *
do
if [ -d "$i" ]
then
echo "Recursing into directory: $i"
$0 "$i"
fi
done
for i in *.wav
do
if [ -d "$i" ]
then
echo "Recursing into directory: $i"
$0 "$i"
fi
if [ -f "$i" ]
then
echo "Converting: $i"
lame -b 320 "$i" "${i%.wav}.mp3"
fi
done
sync
echo -e "\nDone.\n"
exit 0
|
gauravdatir/linux-bash-scripts
|
wav2mp3.sh
|
Shell
|
unlicense
| 846 |
#!/bin/bash -eux
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
docker build --pull -t gcr.io/oss-fuzz-base/base-image "$@" infra/base-images/base-image
docker build -t gcr.io/oss-fuzz-base/base-clang "$@" infra/base-images/base-clang
docker build -t gcr.io/oss-fuzz-base/base-builder "$@" infra/base-images/base-builder
docker build -t gcr.io/oss-fuzz-base/base-builder-go "$@" infra/base-images/base-builder-go
docker build -t gcr.io/oss-fuzz-base/base-builder-jvm "$@" infra/base-images/base-builder-jvm
docker build -t gcr.io/oss-fuzz-base/base-builder-python "$@" infra/base-images/base-builder-python
docker build -t gcr.io/oss-fuzz-base/base-builder-rust "$@" infra/base-images/base-builder-rust
docker build -t gcr.io/oss-fuzz-base/base-builder-swift "$@" infra/base-images/base-builder-swift
docker build -t gcr.io/oss-fuzz-base/base-runner "$@" infra/base-images/base-runner
docker build -t gcr.io/oss-fuzz-base/base-runner-debug "$@" infra/base-images/base-runner-debug
|
skia-dev/oss-fuzz
|
infra/base-images/all.sh
|
Shell
|
apache-2.0
| 1,594 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
install_ubuntu_16_pip_deps pip3.7
# Update bazel
update_bazel_linux
# Export required variables for running pip.sh
export OS_TYPE="UBUNTU"
export CONTAINER_TYPE="CPU"
export TF_PYTHON_VERSION='python3.7'
# Run configure.
export TF_NEED_GCP=1
export TF_NEED_HDFS=1
export TF_NEED_S3=1
export TF_NEED_CUDA=0
export CC_OPT_FLAGS='-mavx'
export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION})
yes "" | "$PYTHON_BIN_PATH" configure.py
# Get the default test targets for bazel.
source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh
# Export optional variables for running pip.sh
export TF_BUILD_FLAGS="--config=opt --config=v2 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain"
export TF_TEST_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py --test_output=errors --verbose_failures=true --keep_going --test_env=TF2_BEHAVIOR=1"
export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... "
export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean"
export TF_TEST_FILTER_TAGS='-no_oss,-oss_serial,-no_oss_py37,-v1only'
export IS_NIGHTLY=0 # Not nightly
export TF_PROJECT_NAME="tensorflow_cpu"
export TF_PIP_TEST_ROOT="pip_test"
./tensorflow/tools/ci_build/builds/pip_new.sh
|
ppwwyyxx/tensorflow
|
tensorflow/tools/ci_build/release/ubuntu_16/cpu_py37_full/pip.sh
|
Shell
|
apache-2.0
| 2,062 |
#!/bin/sh
printf "\033[01;38msequential write only channel\033[00m test has"
make clean all>/dev/null
result=$(grep "FAILED" result.log | awk '{print $4}')
if [ "" = "$result" ] && [ -s result.log ]; then
echo " \033[01;32mpassed\033[00m"
make clean>/dev/null
else
echo " \033[01;31mfailed with $result errors\033[00m"
fi
|
zerovm/zerovm
|
tests/functional/channels/seqwo/test.sh
|
Shell
|
apache-2.0
| 347 |
#!/usr/bin/perl
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
my $command = "mksysnum_nacl.sh ". join(' ', @ARGV);
print <<EOF;
// $command
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
package syscall
const(
EOF
while(<>){
if(/^#define NACL_sys_(\w+)\s+([0-9]+)/){
my $name = "SYS_$1";
my $num = $2;
$name =~ y/a-z/A-Z/;
print " $name = $num;\n";
}
}
print <<EOF;
)
EOF
|
8l/go-learn
|
src/pkg/syscall/mksysnum_nacl.sh
|
Shell
|
bsd-3-clause
| 507 |
#! /bin/sh
killall -9 chunkserver
killall -9 nameserver
cd nameserver;
./bin/nameserver 1>nlog1 2>&1 &
echo $! > pid
cd ../chunkserver1;
./bin/chunkserver --chunkserver_port=8021 1>clog1 2>&1 &
echo $! > pid
cd ../chunkserver2;
./bin/chunkserver --chunkserver_port=8022 1>clog2 2>&1 &
echo $! > pid
|
00k/ToyDFS
|
sandbox/restart_bfs.sh
|
Shell
|
bsd-3-clause
| 304 |
#!/bin/bash
FN="hgu133plus2cdf_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/annotation/src/contrib/hgu133plus2cdf_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/hgu133plus2cdf_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-hgu133plus2cdf/bioconductor-hgu133plus2cdf_2.18.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-hgu133plus2cdf/bioconductor-hgu133plus2cdf_2.18.0_src_all.tar.gz"
)
MD5="284fef2f0b777d7b53451538ddd53de3"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
ostrokach/bioconda-recipes
|
recipes/bioconductor-hgu133plus2cdf/post-link.sh
|
Shell
|
mit
| 1,445 |
#!/bin/bash
FN="gskb_1.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/experiment/src/contrib/gskb_1.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/gskb_1.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-gskb/bioconductor-gskb_1.18.0_src_all.tar.gz"
)
MD5="9c8267af08b0eeeb90758aea28120241"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-gskb/post-link.sh
|
Shell
|
mit
| 1,272 |
#!/bin/bash
FN="BSgenome.Mmusculus.UCSC.mm10.masked_1.3.99.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/BSgenome.Mmusculus.UCSC.mm10.masked_1.3.99.tar.gz"
"https://bioarchive.galaxyproject.org/BSgenome.Mmusculus.UCSC.mm10.masked_1.3.99.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-bsgenome.mmusculus.ucsc.mm10.masked/bioconductor-bsgenome.mmusculus.ucsc.mm10.masked_1.3.99_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-bsgenome.mmusculus.ucsc.mm10.masked/bioconductor-bsgenome.mmusculus.ucsc.mm10.masked_1.3.99_src_all.tar.gz"
)
MD5="c6a5365fa1a0d2a5ecb63752960eb3d7"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-bsgenome.mmusculus.ucsc.mm10.masked/post-link.sh
|
Shell
|
mit
| 1,592 |
#!/bin/sh
#
# This program launch a web browser on the html page
# describing a git command.
#
# Copyright (c) 2007 Christian Couder
# Copyright (c) 2006 Theodore Y. Ts'o
#
# This file is heavily stolen from git-mergetool.sh, by
# Theodore Y. Ts'o (thanks) that is:
#
# Copyright (c) 2006 Theodore Y. Ts'o
#
# This file is licensed under the GPL v2, or a later version
# at the discretion of Junio C Hamano or any other official
# git maintainer.
#
USAGE='[--browser=browser|--tool=browser] [--config=conf.var] url/file ...'
# This must be capable of running outside of git directory, so
# the vanilla git-sh-setup should not be used.
NONGIT_OK=Yes
. git-sh-setup
valid_custom_tool()
{
browser_cmd="$(git config "browser.$1.cmd")"
test -n "$browser_cmd"
}
valid_tool() {
case "$1" in
firefox | iceweasel | konqueror | w3m | links | lynx | dillo | open | start)
;; # happy
*)
valid_custom_tool "$1" || return 1
;;
esac
}
init_browser_path() {
browser_path=$(git config "browser.$1.path")
test -z "$browser_path" && browser_path="$1"
}
while test $# != 0
do
case "$1" in
-b|--browser*|-t|--tool*)
case "$#,$1" in
*,*=*)
browser=`expr "z$1" : 'z-[^=]*=\(.*\)'`
;;
1,*)
usage ;;
*)
browser="$2"
shift ;;
esac
;;
-c|--config*)
case "$#,$1" in
*,*=*)
conf=`expr "z$1" : 'z-[^=]*=\(.*\)'`
;;
1,*)
usage ;;
*)
conf="$2"
shift ;;
esac
;;
--)
break
;;
-*)
usage
;;
*)
break
;;
esac
shift
done
test $# = 0 && usage
if test -z "$browser"
then
for opt in "$conf" "web.browser"
do
test -z "$opt" && continue
browser="`git config $opt`"
test -z "$browser" || break
done
if test -n "$browser" && ! valid_tool "$browser"; then
echo >&2 "git config option $opt set to unknown browser: $browser"
echo >&2 "Resetting to default..."
unset browser
fi
fi
if test -z "$browser" ; then
if test -n "$DISPLAY"; then
browser_candidates="firefox iceweasel konqueror w3m links lynx dillo"
if test "$KDE_FULL_SESSION" = "true"; then
browser_candidates="konqueror $browser_candidates"
fi
else
browser_candidates="w3m links lynx"
fi
# SECURITYSESSIONID indicates an OS X GUI login session
if test -n "$SECURITYSESSIONID" \
-o "$TERM_PROGRAM" = "Apple_Terminal" ; then
browser_candidates="open $browser_candidates"
fi
# /bin/start indicates MinGW
if test -x /bin/start; then
browser_candidates="start $browser_candidates"
fi
for i in $browser_candidates; do
init_browser_path $i
if type "$browser_path" > /dev/null 2>&1; then
browser=$i
break
fi
done
test -z "$browser" && die "No known browser available."
else
valid_tool "$browser" || die "Unknown browser '$browser'."
init_browser_path "$browser"
if test -z "$browser_cmd" && ! type "$browser_path" > /dev/null 2>&1; then
die "The browser $browser is not available as '$browser_path'."
fi
fi
case "$browser" in
firefox|iceweasel)
# Check version because firefox < 2.0 does not support "-new-tab".
vers=$(expr "$($browser_path -version)" : '.* \([0-9][0-9]*\)\..*')
NEWTAB='-new-tab'
test "$vers" -lt 2 && NEWTAB=''
"$browser_path" $NEWTAB "$@" &
;;
konqueror)
case "$(basename "$browser_path")" in
konqueror)
# It's simpler to use kfmclient to open a new tab in konqueror.
browser_path="$(echo "$browser_path" | sed -e 's/konqueror$/kfmclient/')"
type "$browser_path" > /dev/null 2>&1 || die "No '$browser_path' found."
eval "$browser_path" newTab "$@"
;;
kfmclient)
eval "$browser_path" newTab "$@"
;;
*)
"$browser_path" "$@" &
;;
esac
;;
w3m|links|lynx|open)
eval "$browser_path" "$@"
;;
start)
exec "$browser_path" '"web-browse"' "$@"
;;
dillo)
"$browser_path" "$@" &
;;
*)
if test -n "$browser_cmd"; then
( eval $browser_cmd "$@" )
fi
;;
esac
|
samv/git
|
git-web--browse.sh
|
Shell
|
gpl-2.0
| 3,961 |
#!/usr/bin/env bash
#
# Process (API) docs after a successful build on Travis (via ../.travis.yml).
#
# Updated/changed documentation for "master" is pushed to gh-pages.
# In case of pull requests or other branches, it will get added to a separate branch.
# In case of a pull request, a compare view comment will be posted.
#
# NOTE: stdout/stderr might/should be discarded to not leak sensitive information.
echo "Post-processing (API) documentation."
echo "TRAVIS_PULL_REQUEST: $TRAVIS_PULL_REQUEST"
echo "TRAVIS_BRANCH: $TRAVIS_BRANCH"
if [ -z "$GH_APIDOC_TOKEN" ]; then
echo "No GH_APIDOC_TOKEN available. Skipping."
exit
fi
# NOTE: DO NOT USE "set -x", or anything else that would reveal GH_APIDOC_TOKEN!
set -e
set +x
# Display exit code in term of failure (probably due to 'set -x').
trap '[ "$?" = 0 ] || echo "EXIT CODE: $?"' EXIT
REPO_APIDOC="https://${GH_APIDOC_TOKEN}@github.com/awesomeWM/apidoc"
REPO_DIR="$PWD"
export GIT_AUTHOR_NAME="awesome-robot on Travis CI"
export GIT_AUTHOR_EMAIL="[email protected]"
export GIT_COMMITTER_NAME="$GIT_AUTHOR_NAME"
export GIT_COMMITTER_EMAIL="$GIT_AUTHOR_EMAIL"
git clone --depth 1 --branch gh-pages "$REPO_APIDOC" build/apidoc \
2>&1 | sed "s/$GH_APIDOC_TOKEN/GH_APIDOC_TOKEN/g"
cd build/apidoc
# This will re-use already existing branches (updated PR).
if [ "$TRAVIS_PULL_REQUEST" != false ]; then
BRANCH="pr-$TRAVIS_PULL_REQUEST"
elif [ "$TRAVIS_BRANCH" != master ]; then
# Use merge-base of master in branch name, to keep different branches with
# the same name apart.
# shellcheck disable=SC2015
BRANCH="$TRAVIS_BRANCH-$(cd "$REPO_DIR" \
&& git fetch --unshallow origin master \
&& git rev-parse --short "$(git merge-base HEAD FETCH_HEAD || true)" || true)"
else
BRANCH="gh-pages"
fi
if [ "$BRANCH" != "gh-pages" ]; then
git checkout -b "$BRANCH" "origin/${BRANCH}" || git checkout -b "$BRANCH"
fi
# Use a temporary branch for the two commits, which allows for a better UI.
git checkout -b merged-update
# Create the README for the Git repo (https://github.com/awesomeWM/apidoc).
cat > ../doc/README.md <<END
# Awesome API documentation
This repository contains the built API documentation for the
[awesome](https://github.com/awesomeWM/awesome) window manager. It is
automatically updated via Travis when the master branch changes. Hence:
## Do NOT send pull requests here
Instead, please update the source code of
[awesome](https://github.com/awesomeWM/awesome) instead.
END
# Create a patch without irrelevant changes (version / timestamp).
diff -Nur . ../doc -I "Last updated" -I "<strong>Release</strong>:" \
-I "<h2>API documentation for awesome, a highly configurable X window manager (version .*)\.</h2>" \
-x .git | patch -p1
git add --all .
if git diff --cached --exit-code --quiet; then
echo "Documentation has not changed."
exit
fi
LAST_COMMIT_MSG="$(cd "$REPO_DIR" && git log -1 --pretty=format:%s)"
LAST_COMMIT="$(cd "$REPO_DIR" && git rev-parse --short HEAD)"
# Commit the relevant changes.
COMMIT_MSG="Update docs for $AWESOME_VERSION via Travis
Last commit message:
$LAST_COMMIT_MSG
Commits: https://github.com/awesomeWM/awesome/compare/${TRAVIS_COMMIT_RANGE/.../..}
Build URL: https://travis-ci.org/awesomeWM/awesome/builds/${TRAVIS_BUILD_ID}"
git commit -m "[relevant] $COMMIT_MSG"
# Commit the irrelevant changes.
mv .git ../doc
cd ../doc
git add --all .
git commit -m "[boilerplate] $COMMIT_MSG"
# Reorder/swap commits, to have "relevant" after "boilerplate".
# This makes it show up earlier in the Github interface etc.
git tag _old
git reset --hard HEAD~2
git cherry-pick _old _old~1
RELEVANT_REV="$(git rev-parse --short HEAD)"
git tag -d _old
git checkout "$BRANCH"
OLD_REV="$(git rev-parse --short HEAD)"
if [ "$TRAVIS_PULL_REQUEST" != false ]; then
MERGE_COMMIT_MSG="$COMMIT_MSG
Pull request: https://github.com/awesomeWM/awesome/pull/${TRAVIS_PULL_REQUEST}"
else
PR_OR_ISSUE="$(echo "$COMMIT_MSG" | head -n 1 | grep -o '#[0-9]\+' || true)"
if [ -n "$PR_OR_ISSUE" ]; then
MERGE_COMMIT_MSG="$COMMIT_MSG
Ref: https://github.com/awesomeWM/awesome/pull/${PR_OR_ISSUE}"
else
PR_OR_ISSUE_URL="$(echo "$COMMIT_MSG" \
| grep -Eo 'https://github.com/awesomeWM/awesome/(issues|pull)/[0-9]+' || true)"
if [ -n "$PR_OR_ISSUE_URL" ]; then
MERGE_COMMIT_MSG="$COMMIT_MSG
Ref: $PR_OR_ISSUE_URL"
else
MERGE_COMMIT_MSG="$COMMIT_MSG
Commit: https://github.com/awesomeWM/awesome/commit/${LAST_COMMIT}
Tree: https://github.com/awesomeWM/awesome/commits/${LAST_COMMIT}"
fi
fi
fi
git merge --no-ff -m "$MERGE_COMMIT_MSG" merged-update
NEW_REV="$(git rev-parse --short HEAD)"
git push origin "$BRANCH" 2>&1 | sed "s/$GH_APIDOC_TOKEN/GH_APIDOC_TOKEN/g"
# Generate compare view links.
# NOTE: use "\n" for line endings, not real ones for valid json!
COMPARE_LINKS="Compare view: https://github.com/awesomeWM/apidoc/compare/${OLD_REV}...${NEW_REV}"
COMPARE_LINKS="$COMPARE_LINKS\nRelevant changes: https://github.com/awesomeWM/apidoc/commit/${RELEVANT_REV}"
if [ "$BRANCH" != "gh-pages" ]; then
COMPARE_LINKS="$COMPARE_LINKS\nComparison against master (gh-pages): https://github.com/awesomeWM/apidoc/compare/gh-pages...${NEW_REV}"
fi
# shellcheck disable=SC2028
echo "Compare links:\n$COMPARE_LINKS"
# Post a comment to the PR.
if [ "$TRAVIS_PULL_REQUEST" != false ]; then
curl -H "Authorization: token $GH_APIDOC_TOKEN" \
-d "{\"body\": \"Documentation has been updated for this PR.\n\n$COMPARE_LINKS\"}" \
"https://api.github.com/repos/awesomeWM/awesome/issues/${TRAVIS_PULL_REQUEST}/comments" \
2>&1 | sed "s/$GH_APIDOC_TOKEN/GH_APIDOC_TOKEN/g"
fi
# vim: filetype=sh:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:textwidth=80
|
blueyed/awesome
|
build-utils/travis-apidoc.sh
|
Shell
|
gpl-2.0
| 5,785 |
#!/bin/sh
printf "choose a file from the list: \n"
ls *.mol
printf "> "
read firstarg
filename=${firstarg%%.*}
awk -f check_1_foe.awk $firstarg
awk -f foe_bubbles_09_10.awk $firstarg
cp graph_before.json "$filename"_0.json
cp graph_before.json file_0.json
for ((a=1; a <= 20 ; a++))
do
awk -f check_1_foe.awk temp_final_nodes
cp graph_before.json "$filename"_"$a".json
cp graph_before.json file_"$a".json
cp temp_final_nodes continue_"$a".mol
awk -f foe_bubbles_09_10.awk temp_final_nodes
done
cp temp_final_nodes continue.mol
printf "20 reduction steps available, look at them with look.html in your browser"
printf "\n ... if you want the next 20 steps then: \n - write again \"bash main_viral.sh\" \n - choose the file continue.mol from the list \n - reload look.html in your browser \n"
|
zitterbewegung/chemlambda-gui
|
scripts_and_mol_files/main_viral_foe_bubbles.sh
|
Shell
|
gpl-2.0
| 823 |
#!/bin/sh
test_description='test smart fetching over http via http-backend'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
test_expect_success 'setup repository' '
git config push.default matching &&
echo content >file &&
git add file &&
git commit -m one
'
test_expect_success 'create http-accessible bare repository' '
mkdir "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
git --bare init
) &&
git remote add public "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
git push public master:master
'
setup_askpass_helper
test_expect_success 'clone http repository' '
cat >exp <<-\EOF &&
> GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1
> Accept: */*
> Accept-Encoding: ENCODINGS
> Pragma: no-cache
< HTTP/1.1 200 OK
< Pragma: no-cache
< Cache-Control: no-cache, max-age=0, must-revalidate
< Content-Type: application/x-git-upload-pack-advertisement
> POST /smart/repo.git/git-upload-pack HTTP/1.1
> Accept-Encoding: ENCODINGS
> Content-Type: application/x-git-upload-pack-request
> Accept: application/x-git-upload-pack-result
> Content-Length: xxx
< HTTP/1.1 200 OK
< Pragma: no-cache
< Cache-Control: no-cache, max-age=0, must-revalidate
< Content-Type: application/x-git-upload-pack-result
EOF
GIT_TRACE_CURL=true GIT_TEST_PROTOCOL_VERSION= \
git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err &&
test_cmp file clone/file &&
tr '\''\015'\'' Q <err |
sed -e "
s/Q\$//
/^[*] /d
/^== Info:/d
/^=> Send header, /d
/^=> Send header:$/d
/^<= Recv header, /d
/^<= Recv header:$/d
s/=> Send header: //
s/= Recv header://
/^<= Recv data/d
/^=> Send data/d
/^$/d
/^< $/d
/^[^><]/{
s/^/> /
}
/^> User-Agent: /d
/^> Host: /d
/^> POST /,$ {
/^> Accept: [*]\\/[*]/d
}
s/^> Content-Length: .*/> Content-Length: xxx/
/^> 00..want /d
/^> 00.*done/d
/^< Server: /d
/^< Expires: /d
/^< Date: /d
/^< Content-Length: /d
/^< Transfer-Encoding: /d
" >actual &&
# NEEDSWORK: If the overspecification of the expected result is reduced, we
# might be able to run this test in all protocol versions.
if test -z "$GIT_TEST_PROTOCOL_VERSION"
then
sed -e "s/^> Accept-Encoding: .*/> Accept-Encoding: ENCODINGS/" \
actual >actual.smudged &&
test_cmp exp actual.smudged &&
grep "Accept-Encoding:.*gzip" actual >actual.gzip &&
test_line_count = 2 actual.gzip
fi
'
test_expect_success 'fetch changes via http' '
echo content >>file &&
git commit -a -m two &&
git push public &&
(cd clone && git pull) &&
test_cmp file clone/file
'
test_expect_success 'used upload-pack service' '
cat >exp <<-\EOF &&
GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/repo.git/git-upload-pack HTTP/1.1 200
GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/repo.git/git-upload-pack HTTP/1.1 200
EOF
# NEEDSWORK: If the overspecification of the expected result is reduced, we
# might be able to run this test in all protocol versions.
if test -z "$GIT_TEST_PROTOCOL_VERSION"
then
check_access_log exp
fi
'
test_expect_success 'follow redirects (301)' '
git clone $HTTPD_URL/smart-redir-perm/repo.git --quiet repo-p
'
test_expect_success 'follow redirects (302)' '
git clone $HTTPD_URL/smart-redir-temp/repo.git --quiet repo-t
'
test_expect_success 'redirects re-root further requests' '
git clone $HTTPD_URL/smart-redir-limited/repo.git repo-redir-limited
'
test_expect_success 're-rooting dies on insane schemes' '
test_must_fail git clone $HTTPD_URL/insane-redir/repo.git insane
'
test_expect_success 'clone from password-protected repository' '
echo two >expect &&
set_askpass user@host pass@host &&
git clone --bare "$HTTPD_URL/auth/smart/repo.git" smart-auth &&
expect_askpass both user@host &&
git --git-dir=smart-auth log -1 --format=%s >actual &&
test_cmp expect actual
'
test_expect_success 'clone from auth-only-for-push repository' '
echo two >expect &&
set_askpass wrong &&
git clone --bare "$HTTPD_URL/auth-push/smart/repo.git" smart-noauth &&
expect_askpass none &&
git --git-dir=smart-noauth log -1 --format=%s >actual &&
test_cmp expect actual
'
test_expect_success 'clone from auth-only-for-objects repository' '
echo two >expect &&
set_askpass user@host pass@host &&
git clone --bare "$HTTPD_URL/auth-fetch/smart/repo.git" half-auth &&
expect_askpass both user@host &&
git --git-dir=half-auth log -1 --format=%s >actual &&
test_cmp expect actual
'
test_expect_success 'no-op half-auth fetch does not require a password' '
set_askpass wrong &&
# NEEDSWORK: When using HTTP(S), protocol v0 supports a "half-auth"
# configuration with authentication required only when downloading
# objects and not refs, by having the HTTP server only require
# authentication for the "git-upload-pack" path and not "info/refs".
# This is not possible with protocol v2, since both objects and refs
# are obtained from the "git-upload-pack" path. A solution to this is
# to teach the server and client to be able to inline ls-refs requests
# as an Extra Parameter (see pack-protocol.txt), so that "info/refs"
# can serve refs, just like it does in protocol v0.
GIT_TEST_PROTOCOL_VERSION=0 git --git-dir=half-auth fetch &&
expect_askpass none
'
test_expect_success 'redirects send auth to new location' '
set_askpass user@host pass@host &&
git -c credential.useHttpPath=true \
clone $HTTPD_URL/smart-redir-auth/repo.git repo-redir-auth &&
expect_askpass both user@host auth/smart/repo.git
'
test_expect_success 'disable dumb http on server' '
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
config http.getanyfile false
'
test_expect_success 'GIT_SMART_HTTP can disable smart http' '
(GIT_SMART_HTTP=0 &&
export GIT_SMART_HTTP &&
cd clone &&
test_must_fail git fetch)
'
test_expect_success 'invalid Content-Type rejected' '
test_must_fail git clone $HTTPD_URL/broken_smart/repo.git 2>actual &&
test_i18ngrep "not valid:" actual
'
test_expect_success 'create namespaced refs' '
test_commit namespaced &&
git push public HEAD:refs/namespaces/ns/refs/heads/master &&
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
symbolic-ref refs/namespaces/ns/HEAD refs/namespaces/ns/refs/heads/master
'
test_expect_success 'smart clone respects namespace' '
git clone "$HTTPD_URL/smart_namespace/repo.git" ns-smart &&
echo namespaced >expect &&
git --git-dir=ns-smart/.git log -1 --format=%s >actual &&
test_cmp expect actual
'
test_expect_success 'dumb clone via http-backend respects namespace' '
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
config http.getanyfile true &&
GIT_SMART_HTTP=0 git clone \
"$HTTPD_URL/smart_namespace/repo.git" ns-dumb &&
echo namespaced >expect &&
git --git-dir=ns-dumb/.git log -1 --format=%s >actual &&
test_cmp expect actual
'
test_expect_success 'cookies stored in http.cookiefile when http.savecookies set' '
cat >cookies.txt <<-\EOF &&
127.0.0.1 FALSE /smart_cookies/ FALSE 0 othername othervalue
EOF
sort >expect_cookies.txt <<-\EOF &&
127.0.0.1 FALSE /smart_cookies/ FALSE 0 othername othervalue
127.0.0.1 FALSE /smart_cookies/repo.git/info/ FALSE 0 name value
EOF
git config http.cookiefile cookies.txt &&
git config http.savecookies true &&
git ls-remote $HTTPD_URL/smart_cookies/repo.git master &&
# NEEDSWORK: If the overspecification of the expected result is reduced, we
# might be able to run this test in all protocol versions.
if test -z "$GIT_TEST_PROTOCOL_VERSION"
then
tail -3 cookies.txt | sort >cookies_tail.txt &&
test_cmp expect_cookies.txt cookies_tail.txt
fi
'
test_expect_success 'transfer.hiderefs works over smart-http' '
test_commit hidden &&
test_commit visible &&
git push public HEAD^:refs/heads/a HEAD:refs/heads/b &&
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
config transfer.hiderefs refs/heads/a &&
git clone --bare "$HTTPD_URL/smart/repo.git" hidden.git &&
test_must_fail git -C hidden.git rev-parse --verify a &&
git -C hidden.git rev-parse --verify b
'
# create an arbitrary number of tags, numbered from tag-$1 to tag-$2
create_tags () {
rm -f marks &&
for i in $(test_seq "$1" "$2")
do
# don't use here-doc, because it requires a process
# per loop iteration
echo "commit refs/heads/too-many-refs-$1" &&
echo "mark :$i" &&
echo "committer git <[email protected]> $i +0000" &&
echo "data 0" &&
echo "M 644 inline bla.txt" &&
echo "data 4" &&
echo "bla" &&
# make every commit dangling by always
# rewinding the branch after each commit
echo "reset refs/heads/too-many-refs-$1" &&
echo "from :$1"
done | git fast-import --export-marks=marks &&
# now assign tags to all the dangling commits we created above
tag=$(perl -e "print \"bla\" x 30") &&
sed -e "s|^:\([^ ]*\) \(.*\)$|\2 refs/tags/$tag-\1|" <marks >>packed-refs
}
test_expect_success 'create 2,000 tags in the repo' '
(
cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
create_tags 1 2000
)
'
test_expect_success CMDLINE_LIMIT \
'clone the 2,000 tag repo to check OS command line overflow' '
run_with_limited_cmdline git clone $HTTPD_URL/smart/repo.git too-many-refs &&
(
cd too-many-refs &&
git for-each-ref refs/tags >actual &&
test_line_count = 2000 actual
)
'
test_expect_success 'large fetch-pack requests can be sent using chunked encoding' '
GIT_TRACE_CURL=true git -c http.postbuffer=65536 \
clone --bare "$HTTPD_URL/smart/repo.git" split.git 2>err &&
grep "^=> Send header: Transfer-Encoding: chunked" err
'
test_expect_success 'test allowreachablesha1inwant' '
test_when_finished "rm -rf test_reachable.git" &&
server="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
master_sha=$(git -C "$server" rev-parse refs/heads/master) &&
git -C "$server" config uploadpack.allowreachablesha1inwant 1 &&
git init --bare test_reachable.git &&
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
git -C test_reachable.git fetch origin "$master_sha"
'
test_expect_success 'test allowreachablesha1inwant with unreachable' '
test_when_finished "rm -rf test_reachable.git; git reset --hard $(git rev-parse HEAD)" &&
#create unreachable sha
echo content >file2 &&
git add file2 &&
git commit -m two &&
git push public HEAD:refs/heads/doomed &&
git push public :refs/heads/doomed &&
server="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
master_sha=$(git -C "$server" rev-parse refs/heads/master) &&
git -C "$server" config uploadpack.allowreachablesha1inwant 1 &&
git init --bare test_reachable.git &&
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
# Some protocol versions (e.g. 2) support fetching
# unadvertised objects, so restrict this test to v0.
test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
'
test_expect_success 'test allowanysha1inwant with unreachable' '
test_when_finished "rm -rf test_reachable.git; git reset --hard $(git rev-parse HEAD)" &&
#create unreachable sha
echo content >file2 &&
git add file2 &&
git commit -m two &&
git push public HEAD:refs/heads/doomed &&
git push public :refs/heads/doomed &&
server="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
master_sha=$(git -C "$server" rev-parse refs/heads/master) &&
git -C "$server" config uploadpack.allowreachablesha1inwant 1 &&
git init --bare test_reachable.git &&
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
# Some protocol versions (e.g. 2) support fetching
# unadvertised objects, so restrict this test to v0.
test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
git -C test_reachable.git fetch origin "$(git rev-parse HEAD)" &&
git -C "$server" config uploadpack.allowanysha1inwant 1 &&
git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
'
test_expect_success EXPENSIVE 'http can handle enormous ref negotiation' '
(
cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
create_tags 2001 50000
) &&
git -C too-many-refs fetch -q --tags &&
(
cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
create_tags 50001 100000
) &&
git -C too-many-refs fetch -q --tags &&
git -C too-many-refs for-each-ref refs/tags >tags &&
test_line_count = 100000 tags
'
test_expect_success 'custom http headers' '
test_must_fail git -c http.extraheader="x-magic-two: cadabra" \
fetch "$HTTPD_URL/smart_headers/repo.git" &&
git -c http.extraheader="x-magic-one: abra" \
-c http.extraheader="x-magic-two: cadabra" \
fetch "$HTTPD_URL/smart_headers/repo.git" &&
git update-index --add --cacheinfo 160000,$(git rev-parse HEAD),sub &&
git config -f .gitmodules submodule.sub.path sub &&
git config -f .gitmodules submodule.sub.url \
"$HTTPD_URL/smart_headers/repo.git" &&
git submodule init sub &&
test_must_fail git submodule update sub &&
git -c http.extraheader="x-magic-one: abra" \
-c http.extraheader="x-magic-two: cadabra" \
submodule update sub
'
test_expect_success 'using fetch command in remote-curl updates refs' '
SERVER="$HTTPD_DOCUMENT_ROOT_PATH/twobranch" &&
rm -rf "$SERVER" client &&
git init "$SERVER" &&
test_commit -C "$SERVER" foo &&
git -C "$SERVER" update-ref refs/heads/anotherbranch foo &&
git clone $HTTPD_URL/smart/twobranch client &&
test_commit -C "$SERVER" bar &&
git -C client -c protocol.version=0 fetch &&
git -C "$SERVER" rev-parse master >expect &&
git -C client rev-parse origin/master >actual &&
test_cmp expect actual
'
test_expect_success 'fetch by SHA-1 without tag following' '
SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
rm -rf "$SERVER" client &&
git init "$SERVER" &&
test_commit -C "$SERVER" foo &&
git clone $HTTPD_URL/smart/server client &&
test_commit -C "$SERVER" bar &&
git -C "$SERVER" rev-parse bar >bar_hash &&
git -C client -c protocol.version=0 fetch \
--no-tags origin $(cat bar_hash)
'
test_expect_success 'GIT_REDACT_COOKIES redacts cookies' '
rm -rf clone &&
echo "Set-Cookie: Foo=1" >cookies &&
echo "Set-Cookie: Bar=2" >>cookies &&
GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Bar,Baz \
git -c "http.cookieFile=$(pwd)/cookies" clone \
$HTTPD_URL/smart/repo.git clone 2>err &&
grep "Cookie:.*Foo=1" err &&
grep "Cookie:.*Bar=<redacted>" err &&
! grep "Cookie:.*Bar=2" err
'
test_expect_success 'GIT_REDACT_COOKIES handles empty values' '
rm -rf clone &&
echo "Set-Cookie: Foo=" >cookies &&
GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Foo \
git -c "http.cookieFile=$(pwd)/cookies" clone \
$HTTPD_URL/smart/repo.git clone 2>err &&
grep "Cookie:.*Foo=<redacted>" err
'
test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' '
rm -rf clone &&
GIT_TRACE_CURL=true \
git clone $HTTPD_URL/smart/repo.git clone 2>err &&
grep "=> Send data" err &&
rm -rf clone &&
GIT_TRACE_CURL=true GIT_TRACE_CURL_NO_DATA=1 \
git clone $HTTPD_URL/smart/repo.git clone 2>err &&
! grep "=> Send data" err
'
test_expect_success 'server-side error detected' '
test_must_fail git clone $HTTPD_URL/error_smart/repo.git 2>actual &&
test_i18ngrep "server-side error" actual
'
test_done
|
devzero2000/git-core
|
t/t5551-http-fetch-smart.sh
|
Shell
|
gpl-2.0
| 15,072 |
#! /bin/sh
$XGETTEXT *.cpp -o $podir/kfile_ics.pot
|
chusopr/kdepim-ktimetracker-akonadi
|
strigi-analyzer/ics/Messages.sh
|
Shell
|
gpl-2.0
| 51 |
#!/bin/bash -eu
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# The real script is maintained in the Samba repo
exec lib/fuzzing/oss-fuzz/build_samba.sh
|
skia-dev/oss-fuzz
|
projects/samba/build.sh
|
Shell
|
apache-2.0
| 767 |
#!/bin/bash
vagrant ssh -c "rsync -avP --exclude vbox --exclude .chef /chef-bcpc-host/ /home/vagrant/chef-bcpc/"
vagrant ssh -c "cd chef-bcpc && knife environment from file environments/*.json && knife role from file roles/*.json && knife role from file roles/*.rb && knife cookbook upload -a -o cookbooks"
|
mlongob/chef-bach
|
vbox_update.sh
|
Shell
|
apache-2.0
| 307 |
conda config --set anaconda_upload yes
conda build flann
conda build --py all pyflann
conda build --python 2.7 --python 3.4 --python 3.5 --numpy 1.9 --numpy 1.10 pyamg
conda build --python 2.7 --python 3.4 --python 3.5 --numpy 1.10 megaman
|
jakevdp/megaman
|
conda_recipes/build_all.sh
|
Shell
|
bsd-2-clause
| 240 |
#!/bin/bash
storage_account_name=$1
storage_account_key=$2
curl --silent https://raw.githubusercontent.com/spinnaker/spinnaker/master/InstallSpinnaker.sh | sudo bash -s -- --quiet --noinstall_cassandra
# Enable Azure storage
sudo /opt/spinnaker/install/change_cassandra.sh --echo=inMemory --front50=azs
sudo sed -i "s|storageAccountName:|storageAccountName: ${storage_account_name}|" /opt/spinnaker/config/spinnaker-local.yml
sudo sed -i "s|storageAccountKey:|storageAccountKey: ${storage_account_key}|" /opt/spinnaker/config/spinnaker-local.yml
# Restart spinnaker so that config changes take effect
sudo service spinnaker restart
|
AbelHu/azure-quickstart-templates
|
spinnaker-vm-simple/scripts/set_spinnaker.sh
|
Shell
|
mit
| 635 |
#!/bin/bash
# Copyright (c) 2012-2016 Codenvy, S.A.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Tyler Jewell - Initial Implementation
#
help_cmd_rmi() {
text "\n"
text "USAGE: ${CHE_IMAGE_FULLNAME} rmi\n"
text "\n"
text "Removes bootstrap, utility, and system Docker images used to run ${CHE_MINI_PRODUCT_NAME}\n"
text "\n"
}
pre_cmd_rmi() {
:
}
post_cmd_rmi() {
:
}
cmd_rmi() {
info "rmi" "Checking registry for version '$CHE_VERSION' images"
if ! has_version_registry $CHE_VERSION; then
version_error $CHE_VERSION
return 1;
fi
WARNING="rmi !!! Removing images disables ${CHE_FORMAL_PRODUCT_NAME} and forces a pull !!!"
if ! confirm_operation "${WARNING}" "$@"; then
return;
fi
IMAGE_LIST=$(cat "$CHE_MANIFEST_DIR"/$CHE_VERSION/images)
IMAGE_LIST+=$'\n'${BOOTSTRAP_IMAGE_LIST}
IMAGE_LIST+=$'\n'${UTILITY_IMAGE_LIST}
IFS=$'\n'
info "rmi" "Removing ${CHE_MINI_PRODUCT_NAME} Docker images..."
for SINGLE_IMAGE in $IMAGE_LIST; do
VALUE_IMAGE=$(echo $SINGLE_IMAGE | cut -d'=' -f2)
info "rmi" "Removing $VALUE_IMAGE..."
log "docker rmi -f ${VALUE_IMAGE} >> \"${LOGS}\" 2>&1 || true"
docker rmi -f $VALUE_IMAGE >> "${LOGS}" 2>&1 || true
done
}
|
bartlomiej-laczkowski/che
|
dockerfiles/base/scripts/base/commands/cmd_rmi.sh
|
Shell
|
epl-1.0
| 1,434 |
#! /bin/sh
# Copyright (C) 2007-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make sure the "deleted header file" issue is fixed w.r.t. aclocal.m4
# dependencies. See also related the tests 'remake-deleted-m4-file.sh'
# and 'remake-renamed-m4-macro-and-file.sh'.
. test-init.sh
cat >>configure.ac <<EOF
AC_CONFIG_MACRO_DIR([.])
FOO
AC_OUTPUT
EOF
cat >foo.m4 <<EOF
AC_DEFUN([FOO], [AC_SUBST([GREPFOO])])
EOF
cat >bar.m4 <<EOF
AC_DEFUN([BAR], [AC_SUBST([GREPBAR])])
EOF
: >Makefile.am
$ACLOCAL
$AUTOMAKE
$AUTOCONF
./configure
$MAKE
grep GREPFOO Makefile
grep GREPBAR Makefile && exit 1
sed 's/FOO/BAR/' < configure.ac > t
mv -f t configure.ac
rm -f foo.m4
$MAKE
grep GREPFOO Makefile && exit 1
grep GREPBAR Makefile
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/aclocal-deleted-header.sh
|
Shell
|
gpl-2.0
| 1,342 |
#!/usr/bin/env bash
HT_HOME=${INSTALL_DIR:-"/opt/hypertable/current"}
SCRIPT_DIR=`dirname $0`
echo "======================="
echo "Defect #783 - OFFLOAD 2"
echo "======================="
function wait_for_file()
{
file=$1
echo "wait_for_file $file"
while [ ! -f $file ]
do
sleep 2
done
ls -l $file
}
function grep_or_exit_if_found()
{
needle=$1
haystack=$2
grep "$needle" $haystack
if [ $? -eq 0 ];
then
kill `cat rs1.pid`
kill `cat rs2.pid`
kill `cat rs3.pid`
echo "found '$1' in $2 but it shouldn't be there"
exit -1
fi
}
function grep_or_exit_if_not_found()
{
needle=$1
haystack=$2
grep "$needle" $haystack
if [ $? -ne 0 ];
then
kill `cat rs1.pid`
kill `cat rs2.pid`
kill `cat rs3.pid`
echo "did not find '$1' in $2 but it should be there"
exit -1
fi
}
# delete old monitoring data
\rm -rf $HT_HOME/run/monitoring
\rm -rf $HT_HOME/log/*
# start the cluster with 3 RangeServers and load them with data
$HT_HOME/bin/ht-start-test-servers.sh --clear --no-thriftbroker --no-rangeserver \
--Hypertable.Monitoring.Interval=3000
sleep 5
$HT_HOME/bin/ht RangeServer --verbose --pidfile=rs1.pid \
--Hypertable.RangeServer.ProxyName=rs1 \
--Hypertable.RangeServer.Port=15870 \
--Hypertable.RangeServer.Maintenance.Interval 100 \
--Hypertable.RangeServer.Range.SplitSize=400K 2>1 > rangeserver.rs1.output&
$HT_HOME/bin/ht RangeServer --verbose --pidfile=rs2.pid \
--Hypertable.RangeServer.ProxyName=rs2 \
--Hypertable.RangeServer.Port=15871 \
--induce-failure=fsstat-disk-full:signal:0 \
--Hypertable.RangeServer.Maintenance.Interval 100 \
--Hypertable.RangeServer.Range.SplitSize=400K 2>1 > rangeserver.rs2.output&
$HT_HOME/bin/ht RangeServer --verbose --pidfile=rs3.pid \
--Hypertable.RangeServer.ProxyName=rs3 \
--Hypertable.RangeServer.Port=15872 \
--induce-failure=fsstat-disk-full:signal:0 \
--Hypertable.RangeServer.Maintenance.Interval 100 \
--Hypertable.RangeServer.Range.SplitSize=400K 2>1 > rangeserver.rs3.output&
sleep 3
$HT_HOME/bin/ht shell --no-prompt < $SCRIPT_DIR/create-table.hql
$HT_HOME/bin/ht ht_load_generator update --spec-file=${SCRIPT_DIR}/data.spec \
--table=BalanceTest --max-keys=100000 2>&1
# wait till the rrd data from both RangeServers is available
wait_for_file $HT_HOME/run/monitoring/rangeservers/rs1_stats_v0.rrd
wait_for_file $HT_HOME/run/monitoring/rangeservers/rs2_stats_v0.rrd
wait_for_file $HT_HOME/run/monitoring/rangeservers/rs3_stats_v0.rrd
# dump all keys
${HT_HOME}/bin/ht shell --no-prompt --Hypertable.Request.Timeout=30000 --exec "USE '/'; SELECT * FROM BalanceTest KEYS_ONLY INTO FILE 'dump.pre';"
# offload ranges from rs1
${HT_HOME}/bin/ht shell --no-prompt --exec "BALANCE ALGORITHM='OFFLOAD rs1';"
sleep 15
# make sure that no range was moved to rs2
grep_or_exit_if_found "dest_location=rs2" $HT_HOME/log/Master.log
grep_or_exit_if_found "dest_location=rs3" $HT_HOME/log/Master.log
grep_or_exit_if_not_found "RangeServer rs2: disk use 100% exceeds threshold" \
$HT_HOME/log/Master.log
grep_or_exit_if_not_found "RangeServer rs3: disk use 100% exceeds threshold" \
$HT_HOME/log/Master.log
# once more dump all keys
${HT_HOME}/bin/ht shell --no-prompt --Hypertable.Request.Timeout=30000 --exec "USE '/'; SELECT * FROM BalanceTest KEYS_ONLY INTO FILE 'dump.post';"
# clean up before leaving
kill `cat rs1.pid`
kill `cat rs2.pid`
kill `cat rs3.pid`
# make sure that before/after we have the same keys
diff dump.pre dump.post
if [ $? -ne 0 ];
then
echo "keys differ; exiting"
exit -1
fi
echo "SUCCESS"
|
nkhuyu/hypertable
|
tests/integration/defects/issue783/run2.sh
|
Shell
|
gpl-3.0
| 3,636 |
#!/bin/bash
# exit on errors
set -e
if test x$donotrun != xtrue; then
for omega in 0 1; do
gerris2D -DOMEGA=$omega nonlinear.gfs
gerris2D -DOMEGA=$omega river.gfs
gerris3D -DOMEGA=$omega ocean.gfs
done
fi
for omega in 0 1; do
gnuplot <<EOF
set term postscript eps color solid lw 2 18
set output 'error-$omega.eps'
set xlabel 'Time'
set ylabel 'Maximum relative error'
set xrange [0.01:]
set logscale y
plot 'error-$omega' u 3:9 w l t 'Incompressible', \
'error-ocean-$omega' u 3:9 w l t 'Linearised free surface', \
'error-river-$omega' u 3:9 w l t 'Saint-Venant'
EOF
echo "Save end-$omega.eps { format = EPS }" | gfsview-batch2D end-$omega.gfs error.gfv
echo "Save end-river-$omega.eps { format = EPS }" | \
gfsview-batch2D end-river-$omega.gfs error.gfv
echo "Save end-ocean-$omega.eps { format = EPS }" | \
gfsview-batch3D end-ocean-$omega.gfs error-ocean.gfv
done
python <<EOF
from check import *
from sys import *
if (Curve('error-1',3,9) - Curve('error-1.ref',3,9)).max() > 1e-7 or\
(Curve('error-river-1',3,9) - Curve('error-river-1.ref',3,9)).max() > 1e-7 or\
(Curve('error-ocean-1',3,9) - Curve('error-ocean-1.ref',3,9)).max() > 1e-7:
print (Curve('error-1',3,9) - Curve('error-1.ref',3,9)).max()
print (Curve('error-river-1',3,9) - Curve('error-river-1.ref',3,9)).max()
print (Curve('error-ocean-1',3,9) - Curve('error-ocean-1.ref',3,9)).max()
exit(1)
EOF
|
peterlzhang/gerris
|
test/nonlinear/nonlinear.sh
|
Shell
|
gpl-3.0
| 1,433 |
#!/bin/sh
# Run this to generate all the initial makefiles, etc.
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
PKG_NAME="libmatemixer"
(test -f $srcdir/configure.ac) || {
echo -n "**Error**: Directory "\`$srcdir\'" does not look like the"
echo " top-level $PKG_NAME directory"
exit 1
}
which mate-autogen || {
echo "You need to install mate-common from the MATE Git"
exit 1
}
REQUIRED_AUTOMAKE_VERSION=1.9
REQUIRED_INTLTOOL_VERSION=0.35
REQUIRED_GTK_DOC_VERSION=1.9
USE_COMMON_DOC_BUILD=yes
. mate-autogen
|
monsta/libmatemixer
|
autogen.sh
|
Shell
|
lgpl-2.1
| 536 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
KUBE_APISERVER_REQUEST_TIMEOUT=300
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-debian}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
# By default a cluster will be started with the master on GCI and nodes on
# containervm. If you are updating the containervm version, update this
# variable. Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170214}
GCI_VERSION=${KUBE_GCI_VERSION:-gci-stable-56-9000-84-2}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${CVM_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-google-containers}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
GCI_DOCKER_VERSION=${KUBE_GCI_DOCKER_VERSION:-}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
NETWORK=${KUBE_GCE_NETWORK:-e2e}
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.180.0.0/14}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100}
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
# Enable the docker debug mode.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --debug"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
# googleinfluxdb - Enable influxdb and google (except GCM)
# standalone - Heapster only. Metrics available via Heapster REST API.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Set etcd image (e.g. 3.0.17-alpha.1) and version (e.g. 3.0.17) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_VERSION="${TEST_ETCD_VERSION:-}"
# Default Log level for all components in test clusters and variables to override it in specific components.
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
KUBELET_TEST_LOG_LEVEL="${KUBELET_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
DOCKER_TEST_LOG_LEVEL="${DOCKER_TEST_LOG_LEVEL:---log-level=info}"
API_SERVER_TEST_LOG_LEVEL="${API_SERVER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}"
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m}"
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false --outofdisk-transition-frequency=0 ${TEST_CLUSTER_API_CONTENT_TYPE}"
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_DISK:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Install node problem detector.
# none - Not run node problem detector.
# daemonset - Run node problem detector as daemonset.
# standalone - Run node problem detector as standalone system daemon.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
# Enable standalone mode by default for gci.
# TODO: Consider upgrade test.
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}"
else
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}"
fi
# Optional: Create autoscaler for cluster's nodes.
ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-false}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
ADMISSION_CONTROL="${KUBE_ADMISSION_CONTROL:-NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota}"
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Optional: setting it to true denotes this is a testing cluster,
# so that we can use pulled kubernetes binaries, even if binaries
# are pre-installed in the image. Note that currently this logic
# is only supported in trusty or GCI.
TEST_CLUSTER="${TEST_CLUSTER:-true}"
# Storage backend. 'etcd2' and 'etcd3' are supported.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
# TODO: Pipe this through GKE e2e clusters once we know it helps.
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-true}" # true, false
|
elipapa/kubernetes
|
cluster/gce/config-test.sh
|
Shell
|
apache-2.0
| 11,174 |
#!/usr/bin/env node
/*
* Copyright (c) 2015-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
'use strict';
var Runner = require('../dist/Runner.js');
var path = require('path');
var opts = require('nomnom')
.script('jscodeshift')
.options({
path: {
position: 0,
help: 'Files or directory to transform',
list: true,
metavar: 'FILE',
required: true
},
transform: {
abbr: 't',
default: './transform.js',
help: 'Path to the transform file',
metavar: 'FILE'
},
cpus: {
abbr: 'c',
help: '(all by default) Determines the number of processes started.'
},
verbose: {
abbr: 'v',
choices: [0, 1, 2],
default: 0,
help: 'Show more information about the transform process'
},
dry: {
abbr: 'd',
flag: true,
help: 'Dry run (no changes are made to files)'
},
print: {
abbr: 'p',
flag: true,
help: 'Print output, useful for development'
},
babel: {
flag: true,
default: true,
help: 'Apply Babel to transform files'
},
extensions: {
default: 'js',
help: 'File extensions the transform file should be applied to'
}
})
.parse();
Runner.run(
path.resolve(opts.transform),
opts.path,
opts
);
|
kentaromiura/RPGMV-Destructure
|
node_modules/reshape/node_modules/jscodeshift/bin/jscodeshift.sh
|
Shell
|
mit
| 1,550 |
#!/bin/sh
# Copyright (C) 2005-2012 MaNGOS project <http://getmangos.com/>
#
# This file is free software; as a special exception the author gives
# unlimited permission to copy and/or distribute it, with or without
# modifications, as long as this notice is preserved.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## Expected param 1 to be 'a' for all, else ask some questions
## Normal log file (if not overwritten by second param
LOG_FILE="MaNGOSExtractor.log"
## Detailed log file
DETAIL_LOG_FILE="MaNGOSExtractor_detailed.log"
## Change this to a value fitting for your sys!
NUM_CPU="2"
## ! Use below only for finetuning or if you know what you are doing !
USE_AD="0"
USE_VMAPS="0"
USE_MMAPS="0"
USE_MMAPS_OFFMESH="0"
if [ "$1" == "a" ]
then
## extract all
USE_AD="1"
USE_VMAPS="1"
USE_MMAPS="1"
else
## do some questioning!
echo
echo "Welcome to helper script to extract required dataz for MaNGOS!"
echo "Should all dataz (dbc, maps, vmaps and mmaps be extracted? (y/n)"
read line
if [ "$line" == "y" ]
then
## extract all
USE_AD="1"
USE_VMAPS="1"
USE_MMAPS="1"
else
echo
echo "Should dbc and maps be extracted? (y/n)"
read line
if [ "$line" == "y" ]; then USE_AD="1"; fi
echo
echo "Should vmaps be extracted? (y/n)"
read line
if [ "$line" == "y" ]; then USE_VMAPS="1"; fi
echo
echo "Should mmaps be extracted? (y/n)"
echo "WARNING! This will take several hours!"
read line
if [ "$line" == "y" ]
then
USE_MMAPS="1";
else
echo "Only reextract offmesh tiles for mmaps?"
read line
if [ "$line" == "y" ]
then
USE_MMAPS_OFFMESH="1";
fi
fi
fi
fi
## Special case: Only reextract offmesh tiles
if [ "$USE_MMAPS_OFFMESH" == "1" ]
then
echo "Only extracting offmesh meshes"
MovemapGen.sh offmesh $LOG_FILE $DETAIL_LOG_FILE
exit 0
fi
## Obtain numbe ob processes
if [ "$USE_MMAPS" == "1" ]
then
echo "How many CPUs should be used for extracting mmaps? (1-4)"
read line
if [ "$line" -ge "1" -a "$line" -le "4" ]
then
NUM_CPU=$line
else
echo "Only number between 1 and 4 supported!"
exit 1
fi
fi
## Give some status
echo "Current Settings: Extract DBCs/maps: $USE_AD, Extract vmaps: $USE_VMAPS, Extract mmaps: $USE_MMAPS on $NUM_CPU processes"
if [ "$1" != "a" ]
then
echo "If you don't like this settings, interrupt with CTRL+C"
read line
fi
echo "`date`: Start extracting dataz for MaNGOS" | tee $LOG_FILE
## Handle log messages
if [ "$USE_AD" == "1" ];
then
echo "DBC and map files will be extracted" | tee -a $LOG_FILE
else
echo "DBC and map files won't be extracted!" | tee -a $LOG_FILE
fi
if [ "$USE_VMAPS" == "1" ]
then
echo "Vmaps will be extracted" | tee -a $LOG_FILE
else
echo "Vmaps won't be extracted!" | tee -a $LOG_FILE
fi
if [ "$USE_MMAPS" == "1" ]
then
echo "Mmaps will be extracted with $NUM_CPU processes" | tee -a $LOG_FILE
else
echo "Mmaps files won't be extracted!" | tee -a $LOG_FILE
fi
echo | tee -a $LOG_FILE
echo "`date`: Start extracting dataz for MaNGOS, DBCs/maps $USE_AD, vmaps $USE_VMAPS, mmaps $USE_MMAPS on $NUM_CPU processes" | tee $DETAIL_LOG_FILE
echo | tee -a $DETAIL_LOG_FILE
## Extract dbcs and maps
if [ "$USE_AD" == "1" ]
then
echo "`date`: Start extraction of DBCs and map files..." | tee -a $LOG_FILE
ad | tee -a $DETAIL_LOG_FILE
echo "`date`: Extracting of DBCs and map files finished" | tee -a $LOG_FILE
echo | tee -a $LOG_FILE
echo | tee -a $DETAIL_LOG_FILE
fi
## Extract vmaps
if [ "$USE_VMAPS" == "1" ]
then
echo "`date`: Start extraction of vmaps..." | tee -a $LOG_FILE
vmapExtractor3 | tee -a $DETAIL_LOG_FILE
echo "`date`: Extracting of vmaps finished" | tee -a $LOG_FILE
mkdir vmaps
echo "`date`: Start assembling of vmaps..." | tee -a $LOG_FILE
vmap_assembler.exe buildings vmaps | tee -a $DETAIL_LOG_FILE
echo "`date`: Assembling of vmaps finished" | tee -a $LOG_FILE
echo | tee -a $LOG_FILE
echo | tee -a $DETAIL_LOG_FILE
fi
## Extract mmaps
if [ "$USE_MMAPS" == "1" ]
then
MovemapGen.sh $NUM_CPU $LOG_FILE $DETAIL_LOG_FILE
fi
|
Xadras-01/looking4group-core
|
tools/extractor_binary/ExtractResources.sh
|
Shell
|
gpl-2.0
| 4,290 |
#!/bin/sh
# Tests for ln -L/-P.
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ ln
# ===================================================
# ensure -s silently overrides -L, -P
touch a || framework_failure_
ln -L -s a symlink1 || fail=1
ln -P -s symlink1 symlink2 || fail=1
ln -s -L -P symlink2 symlink3 || fail=1
# ===================================================
# ensure that -L follows symlinks, and overrides -P
ln -P -L symlink3 hard-to-a || fail=1
ls=$(ls -lG hard-to-a)x
case "$ls" in
*'hard-to-ax') ;;
*'hard-to-a -> '*x) fail=1 ;;
*) framework_failure_ ;;
esac
# ===================================================
# ensure that -P links (or at least duplicates) symlinks, and overrides -L
ln -L -P symlink3 hard-to-3 || fail=1
ls=$(ls -lG hard-to-3)x
case "$ls" in
*'hard-to-3 -> symlink2x') ;;
*'hard-to-3x') fail=1 ;;
*'hard-to-3 -> '*x) fail=1 ;;
*) framework_failure_ ;;
esac
# ===================================================
# Create a hard link to a dangling symlink.
ln -s /no-such-dir || framework_failure_
ln -L no-such-dir hard-to-dangle 2>err && fail=1
case $(cat err) in
*" failed to access 'no-such-dir'":*) ;;
*) fail=1 ;;
esac
ln -P no-such-dir hard-to-dangle || fail=1
# ===================================================
# Create a hard link to a symlink to a directory.
mkdir d || framework_failure_
ln -s d link-to-dir || framework_failure_
ln -L link-to-dir hard-to-dir-link 2>err && fail=1
case $(cat err) in
*": 'link-to-dir': hard link not allowed for directory"*) ;;
*) fail=1 ;;
esac
ln -P link-to-dir/ hard-to-dir-link 2>err && fail=1
case $(cat err) in
*": 'link-to-dir/': hard link not allowed for directory"*) ;;
*) fail=1 ;;
esac
ln -P link-to-dir hard-to-dir-link || fail=1
Exit $fail
|
mmayer/coreutils
|
tests/ln/hard-to-sym.sh
|
Shell
|
gpl-3.0
| 2,472 |
#!/bin/bash
# This file is part of darktable.
#
# darktable is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# darktable is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with darktable. If not, see <http://www.gnu.org/licenses/>.
# Continuous Integration script for darktable
# Author: Peter Budai <[email protected]>
# it is supposed to be run by appveyor-ci
# Enable colors
normal=$(tput sgr0)
red=$(tput setaf 1)
green=$(tput setaf 2)
cyan=$(tput setaf 6)
# Basic status function
_status() {
local type="${1}"
local status="${package:+${package}: }${2}"
local items=("${@:3}")
case "${type}" in
failure) local -n nameref_color='red'; title='[DARKTABLE CI] FAILURE:' ;;
success) local -n nameref_color='green'; title='[DARKTABLE CI] SUCCESS:' ;;
message) local -n nameref_color='cyan'; title='[DARKTABLE CI]'
esac
printf "\n${nameref_color}${title}${normal} ${status}\n\n"
}
# Run command with status
execute(){
local status="${1}"
local command="${2}"
local arguments=("${@:3}")
cd "${package:-.}"
message "${status}"
if [[ "${command}" != *:* ]]
then ${command} ${arguments[@]}
else ${command%%:*} | ${command#*:} ${arguments[@]}
fi || failure "${status} failed"
cd - > /dev/null
}
# Build
build_darktable() {
cd $(cygpath ${APPVEYOR_BUILD_FOLDER})
mkdir build && cd build
cmake -G "MSYS Makefiles" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$(cygpath ${APPVEYOR_BUILD_FOLDER})/build $(cygpath ${APPVEYOR_BUILD_FOLDER})
cmake --build .
cmake --build . --target package
}
# Status functions
failure() { local status="${1}"; local items=("${@:2}"); _status failure "${status}." "${items[@]}"; exit 1; }
success() { local status="${1}"; local items=("${@:2}"); _status success "${status}." "${items[@]}"; exit 0; }
message() { local status="${1}"; local items=("${@:2}"); _status message "${status}" "${items[@]}"; }
# Install build environment and build
PATH=/c/msys64/mingw64/bin:$PATH
# reduce time required to install packages by disabling pacman's disk space checking
sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf
# write a custom fonts.conf to speed up fc-cache
export FONTCONFIG_FILE=$(cygpath -a fonts.conf)
cat > "$FONTCONFIG_FILE" <<EOF
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig><dir>$(cygpath -aw fonts)</dir></fontconfig>
EOF
execute 'Installing base-devel and toolchain' pacman -S --needed --noconfirm mingw-w64-x86_64-{toolchain,clang,cmake}
execute 'Installing dependencies' pacman -S --needed --noconfirm mingw-w64-x86_64-{exiv2,lcms2,lensfun,dbus-glib,openexr,sqlite3,libxslt,libsoup,libwebp,libsecret,lua,graphicsmagick,openjpeg2,gtk3,pugixml,libexif,osm-gps-map,libgphoto2,flickcurl,drmingw,gettext,python3,iso-codes}
execute 'Updating lensfun databse' lensfun-update-data
execute 'Building darktable' build_darktable
|
edgardoh/darktable
|
.ci/ci-script-windows.sh
|
Shell
|
gpl-3.0
| 3,401 |
#!/bin/bash
# Update Doxygen documentation after push to 'master'.
# Author: @pah
set -e
SUDO=sudo
DOXYGEN_VER=doxygen-1.8.7
DOXYGEN_TAR=${DOXYGEN_VER}.linux.bin.tar.gz
DOXYGEN_URL="http://ftp.stack.nl/pub/users/dimitri/${DOXYGEN_TAR}"
DOXYGEN_BIN="/usr/local/bin/doxygen"
: ${GITHUB_REPO:="miloyip/rapidjson"}
GITHUB_HOST="github.com"
GITHUB_CLONE="git://${GITHUB_HOST}/${GITHUB_REPO}"
GITHUB_URL="https://${GITHUB_HOST}/${GITHUB_PUSH-${GITHUB_REPO}}"
# if not set, ignore password
#GIT_ASKPASS="${TRAVIS_BUILD_DIR}/gh_ignore_askpass.sh"
skip() {
echo "$@" 1>&2
echo "Exiting..." 1>&2
exit 0
}
abort() {
echo "Error: $@" 1>&2
echo "Exiting..." 1>&2
exit 1
}
# TRAVIS_BUILD_DIR not set, exiting
[ -d "${TRAVIS_BUILD_DIR-/nonexistent}" ] || \
abort '${TRAVIS_BUILD_DIR} not set or nonexistent.'
# check for pull-requests
[ "${TRAVIS_PULL_REQUEST}" = "false" ] || \
skip "Not running Doxygen for pull-requests."
# check for branch name
[ "${TRAVIS_BRANCH}" = "master" ] || \
skip "Running Doxygen only for updates on 'master' branch (current: ${TRAVIS_BRANCH})."
# check for job number
[ "${TRAVIS_JOB_NUMBER}" = "${TRAVIS_BUILD_NUMBER}.1" ] || \
skip "Running Doxygen only on first job of build ${TRAVIS_BUILD_NUMBER} (current: ${TRAVIS_JOB_NUMBER})."
# install doxygen binary distribution
doxygen_install()
{
wget -O - "${DOXYGEN_URL}" | \
tar xz -C ${TMPDIR-/tmp} ${DOXYGEN_VER}/bin/doxygen
$SUDO install -m 755 ${TMPDIR-/tmp}/${DOXYGEN_VER}/bin/doxygen \
${DOXYGEN_BIN};
}
doxygen_run()
{
cd "${TRAVIS_BUILD_DIR}";
doxygen build/Doxyfile;
}
gh_pages_prepare()
{
cd "${TRAVIS_BUILD_DIR}/doc";
[ ! -d "html" ] || \
abort "Doxygen target directory already exists."
git --version
git clone --single-branch -b gh-pages "${GITHUB_CLONE}" html
cd html
# setup git config (with defaults)
git config user.name "${GIT_NAME-travis}"
git config user.email "${GIT_EMAIL-"travis@localhost"}"
# clean working dir
rm -f .git/index
git clean -df
}
gh_pages_commit() {
cd "${TRAVIS_BUILD_DIR}/doc/html";
git add --all;
git diff-index --quiet HEAD || git commit -m "Automatic doxygen build";
}
gh_setup_askpass() {
cat > ${GIT_ASKPASS} <<EOF
#!/bin/bash
echo
exit 0
EOF
chmod a+x "$GIT_ASKPASS"
}
gh_pages_push() {
# check for secure variables
[ "${TRAVIS_SECURE_ENV_VARS}" = "true" ] || \
skip "Secure variables not available, not updating GitHub pages."
# check for GitHub access token
[ "${GH_TOKEN+set}" = set ] || \
skip "GitHub access token not available, not updating GitHub pages."
[ "${#GH_TOKEN}" -eq 40 ] || \
abort "GitHub token invalid: found ${#GH_TOKEN} characters, expected 40."
cd "${TRAVIS_BUILD_DIR}/doc/html";
# setup credentials (hide in "set -x" mode)
git remote set-url --push origin "${GITHUB_URL}"
git config credential.helper 'store'
# ( set +x ; git config credential.username "${GH_TOKEN}" )
( set +x ; [ -f ${HOME}/.git-credentials ] || \
( echo "https://${GH_TOKEN}:@${GITHUB_HOST}" > ${HOME}/.git-credentials ; \
chmod go-rw ${HOME}/.git-credentials ) )
# push to GitHub
git push origin gh-pages
}
doxygen_install
gh_pages_prepare
doxygen_run
gh_pages_commit
gh_pages_push
|
flingone/frameworks_base_cmds_remoted
|
libs/rapidjson/build/travis-doxygen.sh
|
Shell
|
apache-2.0
| 3,168 |
#! /bin/sh
# $FreeBSD$
. $(dirname $0)/../../common.sh
# Description
DESC="New path for builtin shells (2)."
# Setup
TEST_COPY_FILES="shell 755"
# Run
TEST_N=3
TEST_1="sh_test"
TEST_2="csh_test"
TEST_3="ksh_test"
TEST_3_SKIP="no ksh on FreeBSD"
eval_cmd $*
|
jrobhoward/SCADAbase
|
usr.bin/bmake/tests/shell/path_select/legacy_test.sh
|
Shell
|
bsd-3-clause
| 262 |
#!/bin/sh
# $FreeBSD$
base=`basename $0`
echo "1..2"
name="pkill -U <uid>"
ruid=`id -ur`
sleep=$(pwd)/sleep.txt
ln -sf /bin/sleep $sleep
$sleep 5 &
sleep 0.3
pkill -f -U $ruid $sleep
ec=$?
case $ec in
0)
echo "ok 1 - $name"
;;
*)
echo "not ok 1 - $name"
;;
esac
rm -f $sleep
name="pkill -U <user>"
ruid=`id -urn`
sleep=$(pwd)/sleep.txt
ln -sf /bin/sleep $sleep
$sleep 5 &
sleep 0.3
pkill -f -U $ruid $sleep
ec=$?
case $ec in
0)
echo "ok 2 - $name"
;;
*)
echo "not ok 2 - $name"
;;
esac
rm -f $sleep
|
jrobhoward/SCADAbase
|
bin/pkill/tests/pkill-U_test.sh
|
Shell
|
bsd-3-clause
| 511 |
#!/bin/bash
FN="mAPKLData_1.26.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/experiment/src/contrib/mAPKLData_1.26.0.tar.gz"
"https://bioarchive.galaxyproject.org/mAPKLData_1.26.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mapkldata/bioconductor-mapkldata_1.26.0_src_all.tar.gz"
)
MD5="e4b59441109316d1b2c4e85294c8f8f2"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
cokelaer/bioconda-recipes
|
recipes/bioconductor-mapkldata/post-link.sh
|
Shell
|
mit
| 1,297 |
#!/usr/bin/env bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
set -o errexit
set -o nounset
set -o pipefail
while (( "${#}" ))
do
case "${1}" in
--servo)
engine="--engine servo"
;;
--gecko)
engine="--engine gecko"
;;
--submit)
submit=1
;;
*)
echo "Unknown option ${1}."
exit
;;
esac
shift
done
if [[ -z "${engine:-}" ]];
then echo "You didn't specify the engine to run: --servo or --gecko."; exit;
fi
echo "Starting the local server"
python3 -m http.server > /dev/null 2>&1 &
# TODO: enable the full manifest when #11087 is fixed
# https://github.com/servo/servo/issues/11087
# MANIFEST="page_load_test/test.manifest"
MANIFEST="page_load_test/tp5n/20160509.manifest" # A manifest that excludes
# timeout test cases
PERF_FILE="output/perf-$(date --iso-8601=seconds).json"
echo "Running tests"
python3 runner.py ${engine} --runs 3 "${MANIFEST}" "${PERF_FILE}"
if [[ "${submit:-}" ]];
then
echo "Submitting to Perfherder"
# Perfherder SSL check will fail if time is not accurate,
# sync time before you submit
# TODO: we are using Servo's revision hash for Gecko's result to make both
# results appear on the same date. Use the correct result when Perfherder
# allows us to change the date.
python3 submit_to_perfherder.py \
"${output:-}" "${engine}" "${PERF_FILE}" servo/revision.json
fi
echo "Stopping the local server"
trap 'kill $(jobs -pr)' SIGINT SIGTERM EXIT
|
6112/servo
|
etc/ci/performance/test_all.sh
|
Shell
|
mpl-2.0
| 1,662 |
#!/bin/sh
# uzbl window detection
. "$UZBL_UTIL_DIR/uzbl-util.sh"
readonly UZBL_WIN_POS="$( xwininfo -id "$UZBL_XID" | \
sed -n -e 's/[ ]*Corners:[ ]*[+-]\([0-9]*\)[+-]\([0-9]*\).*$/\1 \2/p' )"
readonly UZBL_WIN_SIZE="$( xwininfo -id "$UZBL_XID" | \
sed -n -e 's/[ ]*-geometry[ ]*\([0-9]*\)x\([0-9]*\).*$/\1 \2/p' )"
readonly UZBL_WIN_POS_X="$( print "$UZBL_WIN_POS" | cut -d ' ' -f 1 )"
readonly UZBL_WIN_POS_Y="$( print "$UZBL_WIN_POS" | cut -d ' ' -f 2 )"
readonly UZBL_WIN_WIDTH="$( print "$UZBL_WIN_SIZE" | cut -d ' ' -f 1 )"
readonly UZBL_WIN_HEIGHT="$( print "$UZBL_WIN_SIZE" | cut -d ' ' -f 2 )"
|
GSI/uzbl
|
examples/data/scripts/util/uzbl-window.sh
|
Shell
|
gpl-3.0
| 659 |
#!/bin/sh
# PR 文件变更收集
set -e
################################################################################
# 读入:(无)
# 写出:
# - /tmp/changes # 文件变更列表
################################################################################
echo "[收集] 计算 PR 分支与目标分支的分叉点……"
TARGET_BRANCH="${GITHUB_BASE_REF:-master}"
echo "[收集] 目标分支设定为:${TARGET_BRANCH}"
MERGE_BASE='HEAD^'
[ "$PULL_REQUEST_ID" != 'false' ] \
&& MERGE_BASE="$(git merge-base "$TARGET_BRANCH" HEAD)"
echo "[收集] 找到分叉节点:${MERGE_BASE}"
echo "[收集] 变更摘要:"
git --no-pager show --summary "${MERGE_BASE}..HEAD"
{
git --no-pager log --oneline "${MERGE_BASE}..HEAD" | grep -Eq '绕过检查' && {
touch /tmp/bypass
echo "[收集] 已标记为绕过检查项"
}
} || true
echo "[收集] 写出文件变更列表……"
RAW_CHANGES="$(git diff "$MERGE_BASE" HEAD --no-renames --name-status -z \
| tr '\0' '\n')"
[ -z "$RAW_CHANGES" ] && {
echo "[收集] 无变更,退出……"
exit 1
}
echo "$RAW_CHANGES" | while read -r STAT; do
read -r NAME
echo "${STAT} ${NAME}"
done > /tmp/changes
echo "[收集] 已写出文件变更列表:"
cat /tmp/changes
{ [ -z "$(cat /tmp/changes)" ] && echo "(无变更)"; } || true
|
runningwater/TranslateProject
|
scripts/check/collect.sh
|
Shell
|
apache-2.0
| 1,332 |
#!/bin/bash
#Bash disables history in noninteractive shells by default, but we turn it on here.
HISTFILE=~/.bash_history
set -o history
cd /opt/QA/sources/suricata/ && \
/usr/bin/suricata --engine-analysis -l /var/log/suricata/
cp /var/log/suricata/*.{txt,json} /opt/QA/results/logs/
|
pevma/SQARD-S
|
staging-area/tests/ubuntu/sequence/01-generic/60-verify/08-verify.sh
|
Shell
|
gpl-2.0
| 287 |
#! /bin/sh
# Copyright 2010 Ovidiu Predescu <[email protected]>
# Date: June 2010
ARDUINO=192.42.172.237
if [ $# != 0 ]; then
FILES="$*"
else
FILES=html/*
fi
for f in $FILES; do
if [[ $(echo $f | egrep "~|CVS") ]]; then
echo Skipping $f
else
size=`ls -l $f | awk '{print $5}'`
echo "Uploading $f ($size bytes)"
curl -0 -T $f http://$ARDUINO/upload/
fi
done
|
ldv46/Lichtschakelsysteem
|
libraries/TinyWebServer/examples/FileUpload/upload.sh
|
Shell
|
gpl-3.0
| 391 |
#!/bin/bash
set -o nounset # Treat unset variables as an error
if [ -a $1 ]
then
awk '/tc state/ {print $5,$8,$11}' $1 > state_data.log
fi
|
SebKuzminsky/linuxcnc-mirror
|
tests/trajectory-planner/circular-arcs/util/save_state.sh
|
Shell
|
lgpl-2.1
| 174 |
#
# Common variables that can be used by xbps-src.
#
# SITE used for ditfiles mirrors. For use in $distfiles.
set -a
SOURCEFORGE_SITE="http://downloads.sourceforge.net/sourceforge"
NONGNU_SITE="http://download.savannah.nongnu.org/releases"
UBUNTU_SITE="http://archive.ubuntu.com/ubuntu/pool"
XORG_SITE="http://xorg.freedesktop.org/releases/individual"
DEBIAN_SITE="http://ftp.debian.org/debian/pool"
GNOME_SITE="http://ftp.gnome.org/pub/GNOME/sources"
KERNEL_SITE="http://www.kernel.org/pub/linux"
#KERNEL_SITE="http://mirror.be.gbxs.net/pub/linux"
CPAN_SITE="http://cpan.perl.org/modules/by-module"
PYPI_SITE="https://files.pythonhosted.org/packages/source"
MOZILLA_SITE="http://ftp.mozilla.org/pub"
GNU_SITE="http://ftp.gnu.org/gnu"
FREEDESKTOP_SITE="http://freedesktop.org/software"
# Repetitive sub homepage's with no real project page
# ie. some gnome and xorg projects. For use in $homepage.
XORG_HOME="http://xorg.freedesktop.org/wiki/"
set +a
|
k00mi/void-packages
|
common/environment/setup/misc.sh
|
Shell
|
bsd-2-clause
| 954 |
#!/bin/bash
#Error
func_error() {
[ $error == "0" ] && return 0
echo "
Unexpected Error:
=================
at: $current
... Please try again."
exit 1
}
#Get current architecture
current="1:Set architecture"
if [[ $(uname --machine) == "x86_64" ]] ; then
arch="64" && error=0
elif [[ $(uname --machine) == "i"*"86" ]] ; then
arch="32" && error=0
else
error=1
fi
func_error
#Variables
version="BT_VERSION"
tos="http://butterproject.org/tos.html"
#Disclaimer
clear
echo "
Butter $version - Linux $arch bits
==================================
Please read our Terms of service:
$tos
This installer will install Butter in:
~/.Butter
~/.local/share/applications
~/.local/share/icons
"
{ read -p "To continue, type 'I agree': " r </dev/tty ; if [ "$r" != "I agree" ] || [ ! "$r" ] ; then echo "
Did not get the user agreement. Exiting." && exit 0 ; fi ; }
#if agreed, start install
clear
echo "
Butter $version - Linux $arch bits
=================================="
#extract archive
current="1: Copy files"
echo "
- Copying files to ~/.Butter"
mkdir -p "$HOME/.Butter"
cp -r locales node_modules src .git.json CHANGELOG.md icudtl.dat libffmpegsumo.so LICENSE.txt nw.pak package.nw package.json Butter README.md "$HOME/.Butter" &> /dev/null && error=0 || error=1
#move icon
mkdir -p "$HOME/.local/share/icons"
cp butter.png "$HOME/.local/share/icons/butter.png" &> /dev/null && error=0 || error=1
func_error
#create .desktop in home
echo "
- Creating new configuration files..."
current="2: Desktop file"
mkdir -p "$HOME/.local/share/applications"
echo "[Desktop Entry]
Comment=Watch Movies and TV Shows instantly
Name=Butter
Exec=$HOME/.Butter/Butter
Icon=butter.png
MimeType=application/x-bittorrent;x-scheme-handler/magnet;
StartupNotify=false
Categories=AudioVideo;Video;Network;Player;P2P;
Type=Application" > "$HOME/.local/share/applications/Butter.desktop" && error=0 || error=1
func_error
# Work-around for missing libudev.so.1 on Ubuntu 12.04
if [ ! -e /lib/$(uname --machine)-linux-gnu/libudev.so.1 ]; then
ln -s /lib/$(uname --machine)-linux-gnu/libudev.so.0 $HOME/.Butter/libudev.so.1
sed -i 's,Exec=,Exec=env LD_LIBRARY_PATH='"$HOME"'/.Butter ,g' $HOME/.local/share/applications/Butter.desktop
fi
#chmod .desktop
current="3: Chmod files"
chmod +x "$HOME/.Butter/Butter/Butter" &> /dev/null && error=0 || error=1
chmod +x "$HOME/.local/share/applications/Butter.desktop" &> /dev/null && error=0 || error=1
func_error
#uninstaller
echo "How to uninstall Butter ?
===============================
1) Main application:
- Delete ~/.Butter
- Delete ~/.local/share/applications/Butter.desktop
- Delete ~/.local/share/icons/butter.png
2) Configuration files and databases:
- Delete ~/.config/Butter" > "$HOME/.Butter/Uninstall.txt"
#installation success
echo "
Butter is now installed in:
«$HOME/.Butter»
"
|
KnirkeBarken/butter
|
dist/linux/exec_basefile.sh
|
Shell
|
gpl-3.0
| 2,836 |
#!/bin/bash
# A simple script to compute translation stats for each locale
#
# T. Sutton June, 2012
cd safe_qgis
REPORT=`lrelease inasafe.pro 2>/dev/null`
echo $REPORT| grep -o '[0-9]*\ finished\ and\ [0-9]*\ unfinished\|_[a-z][a-z]\.qm'|sed 's/_/Locale: /g'| sed 's/.qm//g'
cd ..
|
opengeogroep/inasafe
|
scripts/string-stats.sh
|
Shell
|
gpl-3.0
| 284 |
#!/bin/bash
export CPP_INCLUDE_PATH=${PREFIX}/include
export CXX_INCLUDE_PATH=${PREFIX}/include
export CPLUS_INCLUDE_PATH=${PREFIX}/include
export LIBRARY_PATH=${PREFIX}/lib
outdir=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p "${outdir}/libexec" "$PREFIX/bin"
chmod u+x install_kraken.sh
./install_kraken.sh "${outdir}/libexec"
for bin in livekraken livekraken-build livekraken-filter livekraken-mpa-report livekraken-report livekraken-translate; do
chmod +x "${outdir}/libexec/$bin"
ln -s "${outdir}/libexec/$bin" "$PREFIX/bin/$bin"
# Change from double quotes to single in case of special chars
sed -i.bak "s#my \$KRAKEN_DIR = \"${outdir}/libexec\";#my \$KRAKEN_DIR = '${outdir}/libexec';#g" "${outdir}/libexec/${bin}"
rm -rf "${outdir}/libexec/${bin}.bak"
done
cp "visualisation/livekraken_sankey_diagram.py" "$PREFIX/bin/"
|
gvlproject/bioconda-recipes
|
recipes/livekraken/build.sh
|
Shell
|
mit
| 866 |
#!/bin/sh
# convenience wrapper for the snpEff jar file
java -jar snpEff.jar "$@"
|
iulian787/spack
|
var/spack/repos/builtin/packages/snpeff/snpEff.sh
|
Shell
|
lgpl-2.1
| 82 |
make
mkdir -p $PREFIX/bin
cp abawaca $PREFIX/bin
|
guowei-he/bioconda-recipes
|
recipes/abawaca/build.sh
|
Shell
|
mit
| 49 |
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
lib_dir=$(dirname $0)/../../../../net/forwarding
NUM_NETIFS=6
source $lib_dir/lib.sh
source $lib_dir/tc_common.sh
source devlink_lib_spectrum.sh
current_test=""
cleanup()
{
pre_cleanup
if [ ! -z $current_test ]; then
${current_test}_cleanup
fi
devlink_sp_size_kvd_to_default
}
devlink_sp_read_kvd_defaults
trap cleanup EXIT
ALL_TESTS="router tc_flower mirror_gre tc_police port"
for current_test in ${TESTS:-$ALL_TESTS}; do
RET_FIN=0
source ${current_test}_scale.sh
num_netifs_var=${current_test^^}_NUM_NETIFS
num_netifs=${!num_netifs_var:-$NUM_NETIFS}
for profile in $KVD_PROFILES; do
RET=0
devlink_sp_resource_kvd_profile_set $profile
if [[ $RET -gt 0 ]]; then
log_test "'$current_test' [$profile] setting"
continue
fi
for should_fail in 0 1; do
RET=0
target=$(${current_test}_get_target "$should_fail")
${current_test}_setup_prepare
setup_wait $num_netifs
${current_test}_test "$target" "$should_fail"
${current_test}_cleanup
if [[ "$should_fail" -eq 0 ]]; then
log_test "'$current_test' [$profile] $target"
else
log_test "'$current_test' [$profile] overflow $target"
fi
done
RET_FIN=$(( RET_FIN || RET ))
done
done
current_test=""
exit "$RET_FIN"
|
Linutronix/linux
|
tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
|
Shell
|
gpl-2.0
| 1,275 |
#!/bin/sh
#
# this script finds unused lp_*() functions
#
# use it like this:
#
# user@host:~/samba/source>./script/find_unused_options.sh
#
LIST_GLOBAL=`grep '^FN_GLOBAL' param/loadparm.c |sed -e's/^FN_GLOBAL.*(\(.*\).*,.*\(&Globals\..*\)).*/\1:\2/'`
LIST_LOCAL=`grep '^FN_LOCAL' param/loadparm.c |sed -e's/^FN_LOCAL.*(\(.*\).*,[ ]*\(.*\)).*/\1:\2/'`
CFILES=`find . -name "*.c"`
for i in $LIST_GLOBAL;do
key=`echo $i|cut -d ':' -f1`
val=`echo $i|cut -d ':' -f2`
found=`grep "$key[ ]*()" $CFILES`
if test -z "$found"; then
echo "Not Used Global: $key() -> $val"
fi
done
for i in $LIST_LOCAL;do
key=`echo $i|cut -d ':' -f1`
val=`echo $i|cut -d ':' -f2`
found=`grep "$key[ ]*(" $CFILES`
if test -z "$found"; then
echo "Not Used LOCAL: $key() -> $val"
fi
done
echo "# do a 'make clean;make everything' before removing anything!"
|
zarboz/XBMC-PVR-mac
|
tools/darwin/depends/samba/samba-3.6.6/source4/script/find_unused_options.sh
|
Shell
|
gpl-2.0
| 857 |
#!/bin/bash
source config.sh
source find-prototype-region.sh
if [[ "$region" != "" ]]; then
source get-tags.sh
source vars.sh
source deploy-app-code.sh
source create-ami.sh
source get-security-group-id.sh
source create-launch-configuration.sh
source use-new-launch-configuration.sh
source scale-up.sh
source wait-scale-up.sh
source scale-down.sh
else
echo "Error: prototype not found"
fi
|
stevenharradine/ansible-aws-deployment
|
deploy.sh
|
Shell
|
isc
| 402 |
pacat --format=s16be --channels=1 --channel-map=mono --rate=44100 --device=alsa_output.usb-Burr-Brown_from_TI_USB_Audio_CODEC-00.analog-stereo
|
strickyak/rxtx
|
qrss/player.sh
|
Shell
|
mit
| 144 |
#!/bin/bash
# Simple call of the start.jar with used port.
SCRIPT_ABS_PATH="$(cd "${0%/*}" 2>/dev/null; echo "$PWD")"
. "$SCRIPT_ABS_PATH/settings.sh"
# Go to the installation directory.
cd "$PM_EXIST_INSTALL_DIR"
if [ ! -f "start.jar" ]; then
echo "No eXist installation found, at:"
echo "$PM_EXIST_INSTALL_DIR"
echo "Please run from the directory from where the initial_install.sh was run."
exit 1
fi
exist_start_foreground
|
nusselder/politicalmashup
|
exist/start_jetty_bound.sh
|
Shell
|
mit
| 439 |
#!/bin/sh
# create public folder if not exist
mkdir -p ./public
# copy fonts to public folder
cp -R ./src/font ./public
# copy JS libraries to public folder
cp -R ./src/libs ./public
# build JS files
yarn build:js
# build SASS files
yarn build:sass
# build mustacher files
yarn build:html
|
sixertoy/mustacher
|
example/.scripts/build.sh
|
Shell
|
mit
| 295 |
#!/bin/bash
ghc orcamento.hs comandos/comandos.dominio.hs && ./orcamento
|
LorhanSohaky/UFSCar
|
2019/workshop-funcional/haskell/run.sh
|
Shell
|
mit
| 73 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies the dSYM of a vendored framework
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DWARF_DSYM_FOLDER_PATH}"
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/AFNetworking/AFNetworking.framework"
install_framework "${BUILT_PRODUCTS_DIR}/JamifiSDK/JamifiSDK.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/AFNetworking/AFNetworking.framework"
install_framework "${BUILT_PRODUCTS_DIR}/JamifiSDK/JamifiSDK.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
palneel/JamifiSDK
|
Example/Pods/Target Support Files/Pods-JamifiSDK_Example/Pods-JamifiSDK_Example-frameworks.sh
|
Shell
|
mit
| 4,826 |
#!/usr/bin/env bash
# @author Joschi <[email protected]>
# created 2015-02-17 14:26
base_dir=`pwd`
server_pid_file="${base_dir}/server_pid.txt"
USAGE="Usage: $0 -hqc [--no-open] [--] <url>"
usage() {
echo $USAGE
echo "-h: print help"
echo "-q: be quiet"
echo "-c: check whether the server is running"
echo "--no-open: only prints the URL"
echo "<url>: the URL to open"
exit 1
}
url=
no_open=
quiet=
check_server=
if [ $# -eq 0 ]; then
usage
fi
while [ $# -gt 0 ]; do
if [ ! -z "${url}" ]; then
echo "too many arguments: $@"
usage
fi
case "$1" in
-h)
usage
;;
-q)
quiet=1
;;
-c)
check_server=1
;;
--no-open)
no_open=1
;;
--)
shift
url="$1"
;;
*)
url="$1"
;;
esac
shift
done
if [[ $# -gt 1 ]]; then
echo "illegal option -- $1"
usage
fi
if [[ -z "${url}" ]]; then
echo "require URL"
usage
fi
print() {
if [ -z "${quiet}" ]; then
echo "$@"
fi
}
if [[ ! -z "${check_server}" && ! -s "${server_pid_file}" && ! -f "${server_pid_file}" ]]; then
echo "No server running..."
exit 1
fi
if [ ! -z $no_open ]; then
echo "${url}"
else
mac_chrome="/Applications/Google Chrome.app"
mac_firefox="/Applications/Firefox.app"
if [ `command -v firefox 2>/dev/null 1>&2; echo $?` -eq 0 ]; then
# linux firefox
if [[ -z `ps | grep [f]irefox` ]]; then
print "open window: ${url}"
firefox -new-window "${url}" &
else
print "open url: ${url}"
firefox -remote "openURL(${url})" &
fi
elif [ `command -v google-chrome 2>/dev/null 1>&2; echo $?` -eq 0 ]; then
# linux chrome
print "open window: ${url}"
google-chrome "${url}"
elif [ `ls "${mac_chrome}" 2>/dev/null 1>&2; echo $?` -eq 0 ]; then
# mac chrome
print "open window: ${url}"
open "${mac_chrome}" "${url}"
elif [ `ls "${mac_firefox}" 2>/dev/null 1>&2; echo $?` -eq 0 ]; then
# mac firefox
print "open window: ${url}"
open "${mac_firefox}" "${url}"
else
echo "Could not find chrome or firefox..."
echo "${url}"
exit 1
fi
fi
|
nyuvis/patient-viz
|
open_url.sh
|
Shell
|
mit
| 2,103 |
#!/bin/bash
_user="$(id -u -n)"
if [ "$_user" == "researcher" ]; then
export JULIA_PKGDIR=/opt/julia
fi
|
dit4c/dockerfile-dit4c-container-ijulia
|
etc/profile.d/opt_julia.sh
|
Shell
|
mit
| 108 |
pyrcc5 -o qtrst/ui/resources_rc.py design/resources.qrc
pyuic5 -o qtrst/ui/main.py design/main.ui
|
petrushev/qtrst
|
build.sh
|
Shell
|
mit
| 98 |
#!/usr/bin/env bash
if [ ! -d "bin" ]; then
printf "No compiled classes. Did you run compile.sh?\n"
exit
fi
java -classpath bin/ com/scottlessans/simpledtw/examples/Problem6
|
slessans/simple-dynamic-time-warping-java
|
run_problem6.sh
|
Shell
|
mit
| 178 |
#!/usr/bin/env zsh
#
# Currently DIT4C portal's SSH REPL is provided by Ammonite-SSHD, which uses
# jline2 for terminal interactions. Unfortunately, this means it requires a
# TTY to work correctly.
#
# This script wraps the entire portal server in its own pseudo-terminal as a
# workaround.
#
zmodload zsh/zpty
zpty -b -e dit4c-portal TERM=dumb /opt/dit4c-portal/bin/dit4c-portal -Dcom.datastax.driver.FORCE_NIO=true "$@"
while zpty -t dit4c-portal
do
zpty -r dit4c-portal
sleep 0.001
done
|
dit4c/dit4c
|
scripts/extra/start_portal.zsh
|
Shell
|
mit
| 498 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-220-1
#
# Security announcement date: 2015-05-15 00:00:00 UTC
# Script generation date: 2017-01-01 21:08:56 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - dpkg:1.15.12
#
# Last versions recommanded by security team:
# - dpkg:1.15.12
#
# CVE List:
# - CVE-2015-0840
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade dpkg=1.15.12 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/i386/2015/DLA-220-1.sh
|
Shell
|
mit
| 582 |
#!/bin/bash
HOST="localhost"
PORT="1883"
# Array for storing PIDS of future processes
PIDS=()
# Start the two Python processes and store the PIDs
timeout -k 15s 15s python3 test_node.py -host $HOST -port $PORT node_desc_b.json -val -1 &
PIDS+=("$!")
timeout -k 15s 15s python3 test_node.py -host $HOST -port $PORT node_desc_a.json -val 0 &
PIDS+=("$!")
# Kill all the processes we started
for i in "${PIDS[@]}"
do
wait $i || kill -9 $i
done
|
robotarium/vizier
|
examples/example_0/test_nodes.sh
|
Shell
|
mit
| 446 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-621-1
#
# Security announcement date: 2016-09-15 00:00:00 UTC
# Script generation date: 2017-01-01 21:09:18 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: armv7l
#
# Vulnerable packages fix on version:
# - autotrace:0.31.1-16+deb7u1
#
# Last versions recommanded by security team:
# - autotrace:0.31.1-16+deb7u1
#
# CVE List:
# - CVE-2016-7392
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade autotrace=0.31.1-16+deb7u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/armv7l/2016/DLA-621-1.sh
|
Shell
|
mit
| 625 |
#!/bin/bash
#if [ -z "$SHARELATEX_UID" ]; then
# SHARELATEX_UID=1000
#fi
#if [ -z "$SHARELATEX_GID" ]; then
# SHARELATEX_GID=1000
#fi
if [ -z "$MONGOD_SMALLFILES" ]; then
MONGOD_SMALLFILES=""
else
MONGOD_SMALLFILES="--smallfiles"
fi
#addgroup --gid $SHARELATEX_GID sharelatex
#adduser --system --home /data --no-create-home --uid $SHARELATEX_GID --group sharelatex
mkdir -p /data/db
mkdir -p /data/user_files
mkdir -p /data/tmp/uploads
mkdir -p /data/tmp/dumpFolder
rm -r /sharelatex/data
rm -r /sharelatex/tmp
ln -s /data/tmp /sharelatex/tmp
ln -s /data /sharelatex/data
mkdir -p /data/compiles
mkdir -p /data/cache
mkdir -p /data/redis
rm -r /var/lib/redis
ln -s /data/redis /var/lib/redis
chown sharelatex:sharelatex -R /data
#chown sharelatex:sharelatex -R /sharelatex
# HACK:
# start mongod as root, as it wont run fine as user?
cd /data
mongod $MONGOD_SMALLFILES &
# start nginx (will fork to sharelatex user by itself)
nginx &
exec sudo -H -u sharelatex -g sharelatex /usr/bin/sharelatex.sh
|
sb-minimal/docker-ubuntu-sharelatex
|
overlay/sharelatex.sh
|
Shell
|
mit
| 1,022 |
#!/bin/bash
# Place this file in the same folder as a freetype SVN checkout.
VERSION="9.1" # SDK version
MINVERSION="6.1" # Target version.
ARCHES=("arm64" "armv7" "armv7s" "i386")
PLATFORMS=("iPhoneOS" "iPhoneOS" "iPhoneOS" "iPhoneSimulator")
MERGE="lipo -create -output libfreetype-fat.a "
for i in ${!ARCHES[@]}
do
# Create required paths:
ARCH=${ARCHES[${i}]}
PLATFORM=${PLATFORMS[${i}]}
SYSROOT="/Applications/Xcode.app/Contents/Developer/Platforms/${PLATFORM}.platform/Developer/SDKs/${PLATFORM}${VERSION}.sdk"
SDK=${SYSROOT}
OUT="./libFreetype2-${ARCH}.a"
if [ ! -e ${SDK} ]; then
echo "SDK not found at:"
echo ${SDK}
echo ""
echo "Try changing the 'version' inside this bash file. Here are some installed SDKs:"
echo $(ls $(dirname ${SDK}))
echo ""
exit 1
fi
# Clean old remnants:
make clean; rm -rf objs/.libs/*
# Usual configure and make routines:
./configure --prefix=${PWD}/build-${ARCH} --host=arm-apple-darwin --enable-static=yes --enable-shared=no --with-bzip2=no \
CPPFLAGS="-arch ${ARCH} -fpascal-strings -Os -fembed-bitcode-marker -fembed-bitcode -fmessage-length=0 -fvisibility=hidden -miphoneos-version-min=${VERSION} -I${SDK}/usr/include/libxml2 -isysroot ${SYSROOT}" \
CC=`xcrun -sdk iphoneos -find clang` \
CFLAGS="-arch ${ARCH} -fpascal-strings -Os -fembed-bitcode-marker -fembed-bitcode -fmessage-length=0 -fvisibility=hidden -miphoneos-version-min=${VERSION} -isysroot ${SYSROOT}" \
LD=`xcrun -sdk iphoneos -find ld` \
LDFLAGS="-arch ${ARCH} -isysroot ${SYSROOT} -miphoneos-version-min=${MINVERSION}" \
AR=`xcrun -sdk iphoneos -find ar` && make
# Copy the file, and append it to the merge command.
cp objs/.libs/libfreetype.a ${OUT}
MERGE="${MERGE} ${OUT}"
done
echo ""
# Execute the merge command, output some debug info:
echo `${MERGE}` && lipo -info libfreetype-fat.a
echo ""
echo "If there was an error, try running \"xcode-select --install\" (in particular for x86 simulator builds.";
echo ""
exit 0
|
Gerjo/freetype_ios_android
|
freetype_ios.sh
|
Shell
|
mit
| 2,040 |
#!/bin/bash -i
# Bail out on failure
set -e
cd $HOME
timestamp=$(date "+%Y%m%d_%H%M%S")
branch="master"
function print_help {
echo "Usage:
./setup.sh [-r|-u|-h] [branch]
-r Remote setup, grabs your Vim config from GitHub
-u Update Vim config from GitHub and update all Bundles
-h Print this help
branch The branch name to checkout after setup.
Defaults to 'master'. Only works with -r or no options
"
exit 1;
}
if [ $# -gt 2 ]; then
echo "Too many args...
"
print_help
fi
case $1 in
"-r")
echo "Remote setup..."
shift
if [ -e ".vim" ]; then
echo "$HOME/.vim exists, moving to .vim.$timestamp"
mv .vim .vim.$timestamp
fi
git clone https://github.com/moshen/vimconfig.git .vim ||
{ echo "Remote clone failed, bailing out..."; exit 1; }
echo "
"
;;
"-u")
echo "Updating current config..."
shift
cd .vim
# Check for an unclean repo
{ git diff-index --quiet --cached HEAD &&
git diff-files --quiet; } ||
{ echo "Unclean repo, exiting..."; exit 1; }
# Get changes from Git!
git pull origin ||
{ echo "Failed to pull changes, exiting..."; exit 1; }
# Update Bundles
vim +BundleInstall +BundleUpdate +qall 2>/dev/null
echo "Done! Your vim config is up-to-date"
exit 0;
;;
"-h")
print_help
;;
"--help")
print_help
;;
esac
if [ "$1" ]; then
branch=$1
fi
cd .vim
# Grab Vundle
git clone https://github.com/gmarik/vundle.git bundle/vundle ||
{ echo "Failed to clone Vundle.
If you're trying to update, use the -u flag!"; exit 1; }
git checkout $branch ||
echo "Git checkout failed, continuing...
but seriously, check your available branches.
"
# Link up!
cd $HOME
# Check for readlink on Solaris/BSD
readlink=$(type -p greadlink readlink | head -1)
for f in vimrc gvimrc; do
if [ -L ".$f" ]; then
if [ "$readlink" ]; then
if [ "$($readlink -n .$f)" == ".vim/$f" ]; then
echo "$HOME/.$f already links to the right file"
continue
fi
fi
echo "$HOME/.$f exists, moving to .$f.$timestamp"
mv .$f .$f.$timestamp
ln -s .vim/$f .$f
elif [ -e ".$f" ]; then
echo "$HOME/.$f exists, moving to .$f.$timestamp"
mv .$f .$f.$timestamp
ln -s .vim/$f .$f
else
ln -s .vim/$f .$f
fi
done
# Install Bundles
echo "" | vim +BundleInstall +qall - ||
{ echo "Vim exited with $?, you may need to check your config."; exit 1; }
echo "Done! Vim is fully configured."
exit 0
|
ashishraste/.dotfiles
|
vim_osx/setup.sh
|
Shell
|
mit
| 2,455 |
#!/bin/bash
#
# cloc.sh: Compile cl file into an HSA Code object file (.hsaco)
# using the LLVM Ligntning Compiler. An hsaco file contains
# the amdgpu isa that can be loaded by the HSA Runtime.
#
# Old options -hsail and -brig use HLC are deprecated
#
# Written by Greg Rodgers [email protected]
#
PROGVERSION="X.Y-Z"
#
# Copyright (c) 2018 ADVANCED MICRO DEVICES, INC.
#
# AMD is granting you permission to use this software and documentation (if any) (collectively, the
# Materials) pursuant to the terms and conditions of the Software License Agreement included with the
# Materials. If you do not have a copy of the Software License Agreement, contact your AMD
# representative for a copy.
#
# You agree that you will not reverse engineer or decompile the Materials, in whole or in part, except for
# example code which is provided in source code form and as allowed by applicable law.
#
# WARRANTY DISCLAIMER: THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
# KIND. AMD DISCLAIMS ALL WARRANTIES, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING BUT NOT
# LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, TITLE, NON-INFRINGEMENT, THAT THE SOFTWARE WILL RUN UNINTERRUPTED OR ERROR-
# FREE OR WARRANTIES ARISING FROM CUSTOM OF TRADE OR COURSE OF USAGE. THE ENTIRE RISK
# ASSOCIATED WITH THE USE OF THE SOFTWARE IS ASSUMED BY YOU. Some jurisdictions do not
# allow the exclusion of implied warranties, so the above exclusion may not apply to You.
#
# LIMITATION OF LIABILITY AND INDEMNIFICATION: AMD AND ITS LICENSORS WILL NOT,
# UNDER ANY CIRCUMSTANCES BE LIABLE TO YOU FOR ANY PUNITIVE, DIRECT, INCIDENTAL,
# INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM USE OF THE SOFTWARE OR THIS
# AGREEMENT EVEN IF AMD AND ITS LICENSORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGES. In no event shall AMD's total liability to You for all damages, losses, and
# causes of action (whether in contract, tort (including negligence) or otherwise)
# exceed the amount of $100 USD. You agree to defend, indemnify and hold harmless
# AMD and its licensors, and any of their directors, officers, employees, affiliates or
# agents from and against any and all loss, damage, liability and other expenses
# (including reasonable attorneys' fees), resulting from Your use of the Software or
# violation of the terms and conditions of this Agreement.
#
# U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED RIGHTS."
# Use, duplication, or disclosure by the Government is subject to the restrictions as set
# forth in FAR 52.227-14 and DFAR252.227-7013, et seq., or its successor. Use of the
# Materials by the Government constitutes acknowledgement of AMD's proprietary rights in them.
#
# EXPORT RESTRICTIONS: The Materials may be subject to export restrictions as stated in the
# Software License Agreement.
#
function usage(){
/bin/cat 2>&1 <<"EOF"
cloc.sh: Compile a cl or cu file into an HSA Code object file (.hsaco)
using the HCC2 compiler. An hsaco file contains the amdgpu
isa that can be loaded by the HSA Runtime.
The creation of hsaco for cuda kernels is experimental.
Usage: cloc.sh [ options ] filename.cl
Options without values:
-ll Generate IR for LLVM steps before generating hsaco
-s Generate dissassembled gcn from hsaco
-g Generate debug information
-noqp No quickpath, Use LLVM commands instead of clang driver
-noshared Do not link hsaco as shared object, forces noqp
-version Display version of cloc then exit
-v Verbose messages
-n Dryrun, do nothing, show commands that would execute
-h Print this help message
-k Keep temporary files
Options with values:
-hcc2 <path> $HCC2 or /opt/rocm/hcc2
-libgcn <path> $DEVICELIB or $HCC2/lib/libdevice
-cuda-path <path> $CUDA_PATH or /usr/local/cuda
-atmipath <path> $ATMI_PATH or /opt/rocm/hcc2
-mcpu <cputype> Default= value returned by mygpu
-bclib <bcfile> Add a bc library for llvm-link
-clopts <compiler opts> Addtional options for cl frontend
-cuopts <compiler opts> Additonal options for cu frontend
-I <include dir> Provide one directory per -I option
-opt <LLVM opt> LLVM optimization level
-o <outfilename> Default=<filename>.<ft> ft=hsaco
-t <tdir> Temporary directory or intermediate files
Default=/tmp/cloc-tmp-$$
Examples:
cloc.sh my.cl /* creates my.hsaco */
cloc.sh whybother.cu /* creates whybother.hsaco */
Note: Instead of providing these command line options:
-hcc2, -libgcn, -cuda-path, -atmipath -mcpu, -clopts, or -cuopts
you may set these environment variables, respectively:
HCC2, DEVICELIB, CUDA_PATH, ATMI_PATH, LC_MCPU, CLOPTS, or CUOPTS
Command line options will take precedence over environment variables.
Copyright (c) 2017 ADVANCED MICRO DEVICES, INC.
EOF
exit 0
}
DEADRC=12
# Utility Functions
function do_err(){
if [ $NEWTMPDIR ] ; then
if [ $KEEPTDIR ] ; then
cp -rp $TMPDIR $OUTDIR
[ $VERBOSE ] && echo "#Info: Temp files copied to $OUTDIR/$TMPNAME"
fi
rm -rf $TMPDIR
else
if [ $KEEPTDIR ] ; then
[ $VERBOSE ] && echo "#Info: Temp files kept in $TMPDIR"
fi
fi
[ $VERBOSE ] && echo "#Info: Done"
exit $1
}
function version(){
echo $PROGVERSION
exit 0
}
function runcmd(){
THISCMD=$1
if [ $DRYRUN ] ; then
echo "$THISCMD"
else
[ $VV ] && echo "$THISCMD"
$THISCMD
rc=$?
if [ $rc != 0 ] ; then
echo "ERROR: The following command failed with return code $rc."
echo " $THISCMD"
do_err $rc
fi
fi
}
function getdname(){
local __DIRN=`dirname "$1"`
if [ "$__DIRN" = "." ] ; then
__DIRN=$PWD;
else
if [ ${__DIRN:0:1} != "/" ] ; then
if [ ${__DIRN:0:2} == ".." ] ; then
__DIRN=`dirname $PWD`/${__DIRN:3}
else
if [ ${__DIRN:0:1} = "." ] ; then
__DIRN=$PWD/${__DIRN:2}
else
__DIRN=$PWD/$__DIRN
fi
fi
fi
fi
echo $__DIRN
}
# -------- The main code starts here -----
INCLUDES=""
# Argument processing
while [ $# -gt 0 ] ; do
case "$1" in
-q) QUIET=true;;
--quiet) QUIET=true;;
-k) KEEPTDIR=true;;
-n) DRYRUN=true;;
-hsail) GEN_IL=true;;
-brig) GEN_BRIG=true;;
-g) GEN_DEBUG=true;;
-ll) GENLL=true;;
-s) GENASM=true;;
-noqp) NOQP=true;;
-noshared) NOSHARED=true;;
-clopts) CLOPTS=$2; shift ;;
-cuopts) CUOPTS=$2; shift ;;
-I) INCLUDES="$INCLUDES -I $2"; shift ;;
-opt) LLVMOPT=$2; shift ;;
-o) OUTFILE=$2; shift ;;
-t) TMPDIR=$2; shift ;;
-bclib) EXTRABCLIB=$2; shift ;;
-mcpu) LC_MCPU=$2; shift ;;
-hcc2) HCC2=$2; shift ;;
-triple) TARGET_TRIPLE=$2; shift ;;
-libgcn) DEVICELIB=$2; shift ;;
-atmipath) ATMI_PATH=$2; shift ;;
-cuda-path) CUDA_PATH=$2; shift ;;
-h) usage ;;
-help) usage ;;
--help) usage ;;
-version) version ;;
--version) version ;;
-v) VERBOSE=true;;
-vv) VV=true;;
--) shift ; break;;
*) break;echo $1 ignored;
esac
shift
done
# The above while loop is exited when last string with a "-" is processed
LASTARG=$1
shift
# Allow output specifier after the cl file
if [ "$1" == "-o" ]; then
OUTFILE=$2; shift ; shift;
fi
if [ ! -z $1 ]; then
echo " "
echo "WARNING: cloc.sh can only process one .cl or .cu file at a time."
echo " You can call cloc multiple times to get multiple outputs."
echo " Argument $LASTARG will be processed. "
echo " These args are ignored: $@"
echo " "
fi
cdir=$(getdname $0)
[ ! -L "$cdir/cloc.sh" ] || cdir=$(getdname `readlink "$cdir/cloc.sh"`)
HCC2=${HCC2:-/opt/rocm/hcc2}
DEVICELIB=${DEVICELIB:-$HCC2/lib/libdevice}
TARGET_TRIPLE=${TARGET_TRIPLE:-amdgcn-amd-amdhsa}
CUDA_PATH=${CUDA_PATH:-/usr/local/cuda}
ATMI_PATH=${ATMI_PATH:-/opt/rocm/hcc2}
# Determine which gfx processor to use, default to Fiji (gfx803)
if [ ! $LC_MCPU ] ; then
# Use the mygpu in pair with this script, no the pre-installed one.
LC_MCPU=`$cdir/mygpu`
if [ "$LC_MCPU" == "" ] ; then
LC_MCPU="gfx803"
fi
fi
LLVMOPT=${LLVMOPT:-2}
CUOPTS=${CUOPTS:- -fcuda-rdc --cuda-device-only -Wno-unused-value --hip-auto-headers=cuda_open -O$LLVMOPT --cuda-gpu-arch=$LC_MCPU}
if [ $VV ] ; then
VERBOSE=true
fi
BCFILES=""
# Check if user supplied libgcn has libdevice convention
GCNDEVICE=`echo $DEVICELIB | grep libdevice`
if [ -z $GCNDEVICE ]; then
# Here a User supplied libgcn does not have libdevice convention
DEVICELIBDIR=$DEVICELIB
else
# This is the default path. bc files are found with libdevice convention
# $HCC2/lib/libdevice/$LC-MCPU/
DEVICELIBDIR=$DEVICELIB/$LC_MCPU
fi
BCFILES="$BCFILES $DEVICELIBDIR/cuda2gcn.amdgcn.bc"
BCFILES="$BCFILES $DEVICELIBDIR/hip.amdgcn.bc"
BCFILES="$BCFILES $DEVICELIBDIR/hc.amdgcn.bc"
BCFILES="$BCFILES $DEVICELIBDIR/opencl.amdgcn.bc"
BCFILES="$BCFILES $DEVICELIBDIR/ocml.amdgcn.bc"
BCFILES="$BCFILES $DEVICELIBDIR/ockl.amdgcn.bc"
BCFILES="$BCFILES $DEVICELIBDIR/oclc_isa_version.amdgcn.bc"
if [ -f $ATMI_PATH/lib/libdevice/$LC_MCPU/libatmi.bc ]; then
BCFILES="$BCFILES $ATMI_PATH/lib/libdevice/$LC_MCPU/libatmi.bc"
else
if [ -f $DEVICELIBDIR/libatmi.bc ]; then
BCFILES="$BCFILES $DEVICELIBDIR/libatmi.bc"
fi
fi
if [ $EXTRABCLIB ] ; then
if [ -f $EXTRABCLIB ] ; then
# EXTRABCFILE will force QP off so LINKOPTS not used.
BCFILES="$EXTRABCLIB $BCFILES"
else
echo "ERROR: Environment variable EXTRABCLIB is set to $EXTRABCLIB"
echo " File $EXTRABCLIB does not exist"
exit $DEADRC
fi
fi
filetype=${LASTARG##*\.}
if [ "$filetype" != "cl" ] ; then
if [ "$filetype" != "cu" ] ; then
echo "ERROR: $0 requires one argument with file type cl or cu"
exit $DEADRC
else
CUDACLANG=true
if [ ! -d $CUDA_PATH ] ; then
echo "ERROR: No CUDA_PATH directory at $CUDA_PATH "
exit $DEADRC
fi
fi
fi
# Define the subcomands
if [ $CUDACLANG ] ; then
INCLUDES="-I $CUDA_PATH/include ${INCLUDES}"
CMD_CLC=${CMD_CLC:-clang++ $CUOPTS $INCLUDES}
else
if [ -z $GCNDEVICE ]; then
INCLUDES="-I ${DEVICELIB}/include ${INCLUDES}"
else
INCLUDES="-I ${DEVICELIB}/$LC_MCPU/include ${INCLUDES}"
fi
CMD_CLC=${CMD_CLC:-clang -x cl -Xclang -cl-std=CL2.0 $CLOPTS $LINKOPTS $INCLUDES -include opencl-c.h -Dcl_clang_storage_class_specifiers -Dcl_khr_fp64 -target ${TARGET_TRIPLE}}
fi
CMD_LLA=${CMD_LLA:-llvm-dis}
CMD_ASM=${CMD_ASM:-llvm-as}
CMD_LLL=${CMD_LLL:-llvm-link}
CMD_OPT=${CMD_OPT:-opt -O$LLVMOPT -mcpu=$LC_MCPU -amdgpu-annotate-kernel-features}
CMD_LLC=${CMD_LLC:-llc -mtriple ${TARGET_TRIPLE} -mcpu=$LC_MCPU -filetype=obj}
RUNDATE=`date`
if [ ! -e "$LASTARG" ] ; then
echo "ERROR: The file $LASTARG does not exist."
exit $DEADRC
fi
# Parse LASTARG for directory, filename, and symbolname
INDIR=$(getdname $LASTARG)
FILENAME=${LASTARG##*/}
# FNAME has the .cl extension removed, used for naming intermediate filenames
FNAME=${FILENAME%.*}
if [ -z $OUTFILE ] ; then
# Output file not specified so use input directory
OUTDIR=$INDIR
# Make up the output file name based on last step
if [ $GEN_BRIG ] || [ $GEN_IL ] ; then
OUTFILE=${FNAME}.brig
else
OUTFILE=${FNAME}.hsaco
fi
else
# Use the specified OUTFILE
OUTDIR=$(getdname $OUTFILE)
OUTFILE=${OUTFILE##*/}
fi
sdir=$(getdname $0)
[ ! -L "$sdir/cloc.sh" ] || sdir=$(getdname `readlink "$sdir/cloc.sh"`)
CLOC_PATH=${CLOC_PATH:-$sdir}
TMPNAME="cloc-tmp-$$"
TMPDIR=${TMPDIR:-/tmp/$TMPNAME}
if [ -d $TMPDIR ] ; then
KEEPTDIR=true
else
if [ $DRYRUN ] ; then
echo "mkdir -p $TMPDIR"
else
mkdir -p $TMPDIR
NEWTMPDIR=true
fi
fi
# Be sure not to delete the output directory
if [ $TMPDIR == $OUTDIR ] ; then
KEEPTDIR=true
fi
if [ ! -d $TMPDIR ] && [ ! $DRYRUN ] ; then
echo "ERROR: Directory $TMPDIR does not exist or could not be created"
exit $DEADRC
fi
if [ ! -d $OUTDIR ] && [ ! $DRYRUN ] ; then
echo "ERROR: The output directory $OUTDIR does not exist"
exit $DEADRC
fi
# Print Header block
if [ $VERBOSE ] ; then
echo "# "
echo "#Info: HCC2 Version: $PROGVERSION"
echo "#Info: HCC2 Path: $HCC2"
echo "#Info: Run date: $RUNDATE"
echo "#Info: Input file: $INDIR/$FILENAME"
echo "#Info: Code object: $OUTDIR/$OUTFILE"
[ $KEEPTDIR ] && echo "#Info: Temp dir: $TMPDIR"
echo "# "
fi
if [ $GEN_IL ] || [ $GEN_BRIG ] ; then
echo "ERROR: Support for HSAIL and BRIG generation depricated"
exit $DEADRC
fi
rc=0
if [ $VV ] ; then
CLOPTS="-v $CLOPTS"
fi
if [ $NOQP ] || [ $GENLL ] || [ $NOSHARED ] || [ $EXTRABCLIB ] ; then
quickpath="false"
else
quickpath="true"
fi
# Fixme : need long path for linking multiple libs
quickpath="false"
if [ "$quickpath" == "true" ] ; then
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: Compile cl cl --> hsaco ..."
runcmd "$HCC2/bin/$CMD_CLC -o $OUTDIR/$OUTFILE $INDIR/$FILENAME"
else
# Run 4 steps, clang,link,opt,llc
if [ $CUDACLANG ] ; then
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: cuda-clang cu --> bc ..."
runcmd "$HCC2/bin/$CMD_CLC -o $TMPDIR/$FNAME.bc $INDIR/$FILENAME"
else
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: Compile cl cl --> bc ..."
runcmd "$HCC2/bin/$CMD_CLC -c -emit-llvm -o $TMPDIR/$FNAME.bc $INDIR/$FILENAME"
fi
if [ $GENLL ] ; then
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: Disassemble bc --> ll ..."
runcmd "$HCC2/bin/$CMD_LLA -o $TMPDIR/$FNAME.ll $TMPDIR/$FNAME.bc"
if [ "$OUTDIR" != "$TMPDIR" ] ; then
runcmd "cp $TMPDIR/$FNAME.ll $OUTDIR/$FNAME.ll"
fi
fi
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: Link(llvm-link) bc --> lnkd.bc ..."
runcmd "$HCC2/bin/$CMD_LLL $TMPDIR/$FNAME.bc $BCFILES -o $TMPDIR/$FNAME.lnkd.bc"
if [ $GENLL ] ; then
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: Disassemble lnkd.bc --> lnkd.ll ..."
runcmd "$HCC2/bin/$CMD_LLA -o $TMPDIR/$FNAME.lnkd.ll $TMPDIR/$FNAME.lnkd.bc"
if [ "$OUTDIR" != "$TMPDIR" ] ; then
runcmd "cp $TMPDIR/$FNAME.lnkd.ll $OUTDIR/$FNAME.lnkd.ll"
fi
fi
if [ $LLVMOPT != 0 ] ; then
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: Optimize(opt) lnkd.bc --> final.bc -O$LLVMOPT ..."
runcmd "$HCC2/bin/$CMD_OPT -o $TMPDIR/$FNAME.final.bc $TMPDIR/$FNAME.lnkd.bc"
if [ $GENLL ] ; then
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: Disassemble final.bc --> final.ll ..."
runcmd "$HCC2/bin/$CMD_LLA -o $TMPDIR/$FNAME.final.ll $TMPDIR/$FNAME.final.bc"
if [ "$OUTDIR" != "$TMPDIR" ] ; then
runcmd "cp $TMPDIR/$FNAME.final.ll $OUTDIR/$FNAME.final.ll"
fi
fi
LLC_BC="final"
else
# No optimization so generate object for lnkd bc.
LLC_BC="lnkd"
fi
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: llc mcpu=$LC_MCPU $LLC_BC.bc --> amdgcn ..."
runcmd "$HCC2/bin/$CMD_LLC -o $TMPDIR/$FNAME.gcn $TMPDIR/$FNAME.$LLC_BC.bc"
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: ld.lld gcn --> hsaco ..."
if [ $NOSHARED ] ; then
SHAREDARG=""
else
SHAREDARG="-shared"
fi
# FIXME: Why does shared sometimes cause the -fPIC problem ?
runcmd "$HCC2/bin/ld.lld $TMPDIR/$FNAME.gcn --no-undefined $SHAREDARG -o $OUTDIR/$OUTFILE"
fi # end of if quickpath then ... else ...
if [ $GENASM ] ; then
[ $VV ] && echo
[ $VERBOSE ] && echo "#Step: llvm-objdump hsaco --> .s ..."
textstarthex=`readelf -S -W $OUTDIR/$OUTFILE | grep .text | awk '{print $6}'`
textstart=$((0x$textstarthex))
textszhex=`readelf -S -W $OUTDIR/$OUTFILE | grep .text | awk '{print $7}'`
textsz=$((0x$textszhex))
countclause=" count=$textsz skip=$textstart"
dd if=$OUTDIR/$OUTFILE of=$OUTDIR/$FNAME.raw bs=1 $countclause 2>/dev/null
hexdump -v -e '/1 "0x%02X "' $OUTDIR/$FNAME.raw | $HCC2/bin/llvm-mc -arch=amdgcn -mcpu=$LC_MCPU -disassemble >$OUTDIR/$FNAME.s 2>$OUTDIR/$FNAME.s.err
rm $OUTDIR/$FNAME.raw
if [ "$LC_MCPU" == "kaveri" ] ; then
echo "WARNING: Disassembly not supported for Kaveri. See $FNAME.s.err"
else
rm $OUTDIR/$FNAME.s.err
echo "#INFO File $OUTDIR/$FNAME.s contains amdgcn assembly"
fi
fi
# cleanup
do_err 0
exit 0
|
ROCm-Developer-Tools/hcc2
|
utils/bin/cloc.sh
|
Shell
|
mit
| 17,425 |
#!/usr/bin/env bash
docker run -it --rm=true \
-v /mnt/Seagate/Dropbox:/home/pi/rclone/Dropbox \
-v conf.txt:/root/.rclone.conf \
ianscrivener/rpi-rclone-dropbox \
rclone --transfers=1 -v copy /home/pi/rclone/Dropbox/Photos Dropbox:/Photo
|
ianscrivener/rpi-rclone-dropbox
|
run-interactively.sh
|
Shell
|
mit
| 246 |
#!/bin/bash
bowtie=/shared/workspace/software/bowtie-1.0.1/bowtie
bowtie_index=/shared/workspace/software/bowtie_index/hsapiens_hg19/genome
input_fastq=$1
if [ ! -f "$input_fastq.sam" ]; then
$bowtie $bowtie_index $input_fastq > $input_fastq.sam
fi
exit 0
|
ucsd-ccbb/jupyter-genomics
|
src/awsCluster/server/ChipSeqPipeline/homer_workflow/scripts/alignment.sh
|
Shell
|
mit
| 263 |
#!/usr/bin/env bash
set -ex
set -o pipefail
### Update /etc/hosts
if ! grep -q passenger.test /etc/hosts; then
cat >>/etc/hosts <<-EOF
127.0.0.1 passenger.test
127.0.0.1 mycook.passenger.test
127.0.0.1 zsfa.passenger.test
127.0.0.1 norails.passenger.test
127.0.0.1 1.passenger.test 2.passenger.test 3.passenger.test
127.0.0.1 4.passenger.test 5.passenger.test 6.passenger.test
127.0.0.1 7.passenger.test 8.passenger.test 9.passenger.test
127.0.0.1 rack.test foobar.test
EOF
fi
### Preset dpkg
# None of the packages to install require interaction, but a couple still expect an available
# stdin. With this, they'll know it isn't.
export DEBIAN_FRONTEND=noninteractive
### Update keys/certificates
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 7F438280EF8D349F # Puppet
apt-get update && apt-get install --reinstall ca-certificates
### Update bashrc and bash profile
if ! grep -q bashrc.mine /etc/bash.bashrc; then
echo ". /etc/bash.bashrc.mine" >> /etc/bash.bashrc
fi
if ! grep -q bashrc.mine /home/vagrant/.bashrc; then
echo ". /etc/bash.bashrc.mine" >> /home/vagrant/.bashrc
fi
if ! grep -q /vagrant /home/vagrant/.profile; then
echo "if tty -s; then cd /vagrant; fi" >> /home/vagrant/.profile
fi
cp /vagrant/dev/vagrant/bashrc /etc/bash.bashrc.mine
cp /vagrant/dev/vagrant/sudoers.conf /etc/sudoers.d/passenger
chmod 440 /etc/sudoers.d/passenger
### Install native dependencies
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list
curl -sL https://deb.nodesource.com/setup_6.x | bash -
apt-get update
apt-get install -y build-essential git bash-completion ccache wget \
libxml2-dev libxslt1-dev libsqlite3-dev libcurl4-openssl-dev libpcre3-dev \
ruby ruby-dev nodejs yarn \
apache2-mpm-worker apache2-threaded-dev
### Install basic gems
if [[ ! -e /usr/local/bin/rake ]]; then
gem install rake --no-rdoc --no-ri --version="=12.2.1"
fi
if [[ ! -e /usr/local/bin/rake ]]; then
gem install rake --no-rdoc --no-ri
fi
if [[ ! -e /usr/local/bin/bundler ]]; then
gem install bundler --no-rdoc --no-ri --version="=1.15.4"
fi
### Install Phusion Passenger development dependencies
pushd /vagrant
if [[ ! -e ~/.test_deps_installed ]]; then
rake test:install_deps SUDO=1 DEPS_TARGET=~/bundle
touch ~/.test_deps_installed
else
bundle install --path ~/bundle
fi
popd
### Install Nginx source code
pushd /home/vagrant
if [[ ! -e nginx ]]; then
sudo -u vagrant -H git clone -b branches/stable-1.6 https://github.com/nginx/nginx.git
fi
sudo -u vagrant -H mkdir -p nginx/inst/conf
sudo -u vagrant -H cp /vagrant/dev/vagrant/nginx_start nginx/start
if [[ ! -e nginx/Rakefile ]]; then
sudo -u vagrant -H cp /vagrant/dev/vagrant/nginx_rakefile nginx/Rakefile
fi
if [[ ! -e nginx/inst/conf/nginx.conf ]]; then
sudo -u vagrant -H cp /vagrant/dev/vagrant/nginx.conf nginx/inst/conf/
fi
if [[ ! -e nginx/nginx.conf && ! -h nginx/nginx.conf ]]; then
sudo -u vagrant -H ln -s inst/conf/nginx.conf nginx/nginx.conf
fi
if [[ ! -e nginx/access.log && ! -h nginx/access.log ]]; then
sudo -u vagrant -H ln -s inst/logs/access.log nginx/access.log
fi
if [[ ! -e nginx/error.log && ! -h nginx/error.log ]]; then
sudo -u vagrant -H ln -s inst/logs/error.log nginx/error.log
fi
popd
### Set up Apache
should_restart_apache=false
cp /vagrant/dev/vagrant/apache_ports.conf /etc/apache2/ports.conf
cp /vagrant/dev/vagrant/apache_default_site.conf /etc/apache2/sites-available/000-default.conf
if [[ ! -e /etc/apache2/mods-available/passenger.conf ]]; then
cp /vagrant/dev/vagrant/apache_passenger.conf /etc/apache2/mods-available/passenger.conf
fi
if [[ ! -e /etc/apache2/mods-available/passenger.load ]]; then
cp /vagrant/dev/vagrant/apache_passenger.load /etc/apache2/mods-available/passenger.load
fi
if [[ ! -e /etc/apache2/sites-available/010-rack.test.conf ]]; then
cp /vagrant/dev/vagrant/apache_rack_test.conf /etc/apache2/sites-available/010-rack.test.conf
a2ensite 010-rack.test
should_restart_apache=true
fi
if $should_restart_apache; then
service apache2 restart
fi
|
phusion/passenger
|
dev/vagrant/provision.sh
|
Shell
|
mit
| 4,122 |
#!/usr/bin/env bash
set -e
REPO="$(pwd)"
ROOT="$REPO/.."
# Publish Web Client
WEB_BUILD_NAME="vscode-web"
WEB_TARBALL_FILENAME="vscode-web.tar.gz"
WEB_TARBALL_PATH="$ROOT/$WEB_TARBALL_FILENAME"
rm -rf $ROOT/vscode-web.tar.*
(cd $ROOT && tar --owner=0 --group=0 -czf $WEB_TARBALL_PATH $WEB_BUILD_NAME)
node build/azure-pipelines/common/publish.js "$VSCODE_QUALITY" "web-standalone" archive-unsigned "$WEB_TARBALL_FILENAME" "$VERSION" true "$WEB_TARBALL_PATH"
|
mjbvz/vscode
|
build/azure-pipelines/web/publish.sh
|
Shell
|
mit
| 462 |
#!/usr/bin/env bash
SCM_THEME_PROMPT_PREFIX="${bold_blue}|${yellow}"
SCM_THEME_PROMPT_SUFFIX=" "
SCM_THEME_PROMPT_CLEAN=" ${bold_green}✓${bold_blue}|"
SCM_THEME_PROMPT_DIRTY=" ${bold_red}✗${bold_blue}|"
SCM_NONE_CHAR=""
function prompt_command() {
local last_stats="$?"
if [ $last_stats -eq 0 ]; then
local status_color="${bold_green}"
else
local status_color="${bold_red}"
fi
user_host=
if [[ $BASH_IT_PROMPT_SHOW_USER == "1" ]]; then
[[ $BASH_IT_PROMPT_HIDE_HOST == "1" ]] && user_host+="${blue}u:"
user_host+="${bold_blue}\u"
fi
if [[ $BASH_IT_PROMPT_HIDE_HOST != "1" ]]; then
[[ $BASH_IT_PROMPT_SHOW_USER == "1" ]] && user_host+="${blue}@"
if [[ $BASH_IT_PROMPT_HOST ]]; then
user_host+="${bold_blue}${BASH_IT_PROMPT_HOST}"
else
user_host+="${bold_blue}\h"
fi
fi
[[ $user_host != "" ]] && user_host="${user_host} "
PS1="${reset_color}${normal}${status_color}➜ ${user_host}${bold_cyan}\w${bold_blue} $(scm_char)$(scm_prompt_info)${reset_color}${normal} "
unset user_host
}
PROMPT_COMMAND=prompt_command;
|
EduardoLeggiero/dotfiles
|
bash_it_custom/themes/leggiero/leggiero.theme.bash
|
Shell
|
mit
| 1,155 |
#!/bin/bash
SWARM_CSS_DIR=../../project/war/dougkoellmer_com
DK_CSS_DIR=../../project/src/com/dougkoellmer/shared/css
source ./config_app_version.sh
## mega hack to make sure spinner sprite plate uses app version
SPINNER_CSS=$DK_CSS_DIR/spinner.css
sed -E "s/url\('(.*)'\)/url('\1?v=$CURRENT_VERSION')/g" $SPINNER_CSS > "$SPINNER_CSS.temp"
chmod 777 "$SPINNER_CSS.temp"
cat "$SPINNER_CSS.temp" > $DK_CSS_DIR/spinner.temp.css
rm "$SPINNER_CSS.temp"
java -jar ../../project/lib/swarm/tools/yuicompressor-2.4.8.jar $DK_CSS_DIR/cell_content.css -o $DK_CSS_DIR/cell_content.temp.css
java -jar ../../project/lib/swarm/tools/yuicompressor-2.4.8.jar $DK_CSS_DIR/spinner.temp.css -o $DK_CSS_DIR/spinner.temp.css
cat $SWARM_CSS_DIR/min.css $DK_CSS_DIR/cell_content.temp.css $DK_CSS_DIR/spinner.temp.css > $SWARM_CSS_DIR/min.temp.css
rm $SWARM_CSS_DIR/min.css
rm $DK_CSS_DIR/cell_content.temp.css
rm $DK_CSS_DIR/spinner.temp.css
cp $SWARM_CSS_DIR/min.temp.css $SWARM_CSS_DIR/min.css
rm $SWARM_CSS_DIR/min.temp.css
|
dougkoellmer/dougkoellmer_com
|
scripts/client/minify_css.sh
|
Shell
|
mit
| 1,013 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2915-1
#
# Security announcement date: 2016-03-01 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:14 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: i686
#
# Vulnerable packages fix on version:
# - python-django:1.6.1-2ubuntu0.12
#
# Last versions recommanded by security team:
# - python-django:1.6.1-2ubuntu0.16
#
# CVE List:
# - CVE-2016-2512
# - CVE-2016-2513
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade python-django=1.6.1-2ubuntu0.16 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.04_LTS/i686/2016/USN-2915-1.sh
|
Shell
|
mit
| 658 |
#!/usr/bin/env bash
MY_DIR="$(dirname "$0")"
JUNK_DIR=${MY_DIR}/../junk
echo
echo "Installing latest bash..."
brew install bash
#TODO: These require sudo, figure out how to handle this
# Get password from encrypted file
I_GOTZ_CRED=$(openssl rsautl -decrypt -inkey $JUNK_DIR/junk_rsa -in $JUNK_DIR/.mi.6)
echo
echo "Adding /usr/local/bin/bash to /etc/shells"
echo $I_GOTZ_CRED | sudo -S sh -c 'echo "/usr/local/bin/bash" >> /etc/shells'
echo
echo "Changing default shell to latest bash..."
echo $I_GOTZ_CRED | sudo -S chsh -s /usr/local/bin/bash $(whoami)
# Immediately unset the I_GOTZ_CRED variable
unset I_GOTZ_CRED
|
rbaumbach/JumperCables
|
installz/bash.sh
|
Shell
|
mit
| 628 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.