code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
##
# Copies the directories contained inside the user's Data directory
# into the application bundle.
##
srcDir=${SRCROOT}/../../../Data
dstDir=${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}
# Parse argument.
case "${1}" in
"link" )
CP="ln -sf";;
"copy" )
CP="cp -rf";;
"echo" )
CP="echo";;
* )
# Default to use if no argument is sent.
CP="ln -sf";;
esac
#echo file=${file}
#echo src=${src}
#echo dst=${dst}
#echo srcroot=${SRCROOT}
for src in ${srcDir}/*
do
case ${src} in
*_museum )
;;
*model )
;;
*skeleton )
;;
* )
${CP} ${src} ${dstDir}
;;
esac
done
|
LudoSapiens/Dev
|
Tools/IDE/Xcode/Script/CopyData.sh
|
Shell
|
mit
| 697 |
#!/usr/bin/env bash
set -e
set -u
set -o pipefail
# Run all the tests
./tests/test-highway.sh
./tests/test-GRU.sh
./tests/test-BatchNorm.sh
./tests/test-dense_dropout_functional.sh
./tests/test-lstm_functional.sh
./tests/test-merge-graph.sh
./tests/test-time-distributed-dense.sh
./tests/test-gru-sequence.sh
./tests/check-version-number.sh
./tests/check-conversion.sh
./tests/test-leaky-relu.sh
./tests/test-elu.sh
./tests/test-unsplit-model.sh
./tests/test-SimpleRNN.sh
|
lwtnn/lwtnn
|
tests/test-runner.sh
|
Shell
|
mit
| 474 |
#!/usr/bin/env bash
docker-compose down
docker rmi $(docker images -q)
|
onekit/rest-tutorial
|
clean.sh
|
Shell
|
mit
| 70 |
#! /bin/bash
source ./install_helpers.sh ""
PKG_NAME="openssl"
PKG_INSTALLED_FILE="$SWEET_LOCAL_SOFTWARE_DST_DIR/lib/libssl.so"
PKG_URL_SRC="openssl-1.1.1.tar.gz"
config_setup
config_package $@
config_exec ./config --prefix=$SWEET_LOCAL_SOFTWARE_DST_DIR
config_make_default
if [ "${HOSTNAME:0:10}" != "mpp2-login" ]; then
# exclude mpp2-login:
# Exclude CoolMUC where one test fails :-(
config_exec make test
fi
config_make_install
config_success
|
schreiberx/sweet
|
local_software/install_openssl.sh
|
Shell
|
mit
| 457 |
#!/bin/bash
curl -X POST -H "Content-Type: application/json" -d '{
"setting_type":"call_to_actions",
"thread_state":"new_thread",
"call_to_actions":[
{
"message":{
"text":"空虛、寂寞、覺得冷嗎?你可以跟我說說話喔>///<"
}
}
]
}' "https://graph.facebook.com/v2.6/libGF/thread_settings?access_token=`cat accessToken`"
|
libGF/libGirlfriendFramework
|
welcome.sh
|
Shell
|
mit
| 372 |
#!/bin/bash
#统计git提交代码量
for dir in qmzb qmzb-android h5 im-server push-server qmzb-api-docs qmzb-ios web-live web-login web-money web-shake lua qmzb-web-login;do
#yestaerday-time
years=`date -d "yesterday" +%Y`
mons=`date -d "yesterday" +%m`
days=`date -d "yesterday" +%d`
hears=`date -d "yesterday" +%H`
#today-time
year=`date +%Y`
mon=`date +%m`
day=`date +%d`
hear=`date +%H`
#
cd /script/gitlab/$dir
git pull &> /dev/null
/script/gitlab/gitstat.sh --since="$years"/"$mons"/"$days" --until="$year"/"$mon"/"$day" -p /script/gitlab/$dir >> /script/gitlab/git/count_`date +%F`.log
done
cp /script/gitlab/git/count_`date +%F`.log /script/gitlab/git/aliyun.log
git_name=("zhoukeke2448" "xiang.ou" "hanlongfei3650" "15333038192" "spd2904" "2759455183" "210813289" "zdq7318" "ouxiang5154" "weijinpeng9928" "zwx1851" "gaojian1131" "zhaoshuguang" "wei815559417" "decheng7747" "cfq0050")
real_name=("周珂珂" "欧翔" "韩龙飞" "王海龙" "武倩辉" "陈文富" "刘香苇" "赵建超" "欧翔" "魏金鹏" "张文祥" "高健" "赵曙光" "魏爱军" "余德成" "马辉")
#处理文件
grep -v Author /script/gitlab/git/count_$(date +%F).log |awk -F ' ' '{print $1}' |sort |uniq |while read names;do
#sed -n '1p' count_$(date +%F).log >> /script/gitlab/git/all_count_`date +%F`.log
if [ ${#git_name[@]} == ${#real_name[@]} ];then
for (( s=0;s<${#git_name[@]};s++)) do
if [ "$names" == ${git_name[s]} ] ;then
sed -i "s/$names/${real_name[s]}/g" /script/gitlab/git/count_$(date +%F).log
fi
done
else
echo "Git脚本新增人员添加错误,请重新添加。" > /script/gitlab/git/count_`date +%F`.log
fi
done
#sed -i '/^Author*/d' /script/gitlab/git/count_$(date +%F).log
txt=`grep -v Author /script/gitlab/git/count_$(date +%F).log`
echo -e "Author\tAdd\tDelete\tCommit\tProject" > /script/gitlab/git/count_$(date +%F).log
echo "$txt" >> /script/gitlab/git/count_$(date +%F).log
#上传到阿里ECS
txt1=`grep -v "Author" /script/gitlab/git/aliyun.log`
echo "$txt1" > /script/gitlab/git/aliyun.log
scp -P 12138 /script/gitlab/git/aliyun.log [email protected]:/home/qmzb/Git_count
#rm /script/gitlab/git/aliyun.log
|
gong-long/gong-long.github.io
|
chang用脚本/gitlab/count_git/count_git.sh
|
Shell
|
mit
| 2,142 |
#!/bin/sh
SOURCEDIR=~/work/metis_projects/mcnulty_banking/d3
HTMLTARGETDIR=~/work/fdurant_github_io_source/_posts/projects/banking-project
OTHERHTMLTARGETDIR=~/work/fdurant_github_io_source/projects/banking-project
CSSTARGETDIR=~/work/fdurant_github_io_source/projects/banking-project
JSONTARGETDIR=~/work/fdurant_github_io_source/projects/banking-project
JSTARGETDIR=~/work/fdurant_github_io_source/projects/banking-project
cp $SOURCEDIR/*.css $CSSTARGETDIR/.
cp $SOURCEDIR/*.json $JSONTARGETDIR/.
cp $SOURCEDIR/*.js $JSTARGETDIR/.
cp $SOURCEDIR/index.html $HTMLTARGETDIR/.
cp $SOURCEDIR/[RLP]*.html $OTHERHTMLTARGETDIR/.
|
fdurant/fdurant_github_io_source
|
_posts/projects/banking-project/copy.sh
|
Shell
|
mit
| 625 |
#! /bin/bash
source ./install_helpers.sh ""
PKG_NAME="MPICH"
PKG_INSTALLED_FILE="$SWEET_LOCAL_SOFTWARE_DST_DIR/bin/mpicc"
PKG_URL_SRC="mpich-3.3.2.tar.gz"
export FC=$F90
export FCFLAGS=$F90FLAGS
unset F90
unset F90FLAGS
config_setup
config_package $@
config_configure
config_make_default_install
config_success
|
schreiberx/sweet
|
local_software/install_mpich.sh
|
Shell
|
mit
| 321 |
#! /usr/local/bin/zsh
# first import the dataset
mongoimport --drop -d blog -c posts data/posts.f52bca51f2fb.json
|
nmarley/mongo-univ-hw
|
week7/final4-load.sh
|
Shell
|
mit
| 117 |
echo "Waiting for MySQL connection..."
sleep 15
echo "Executing Web Server..."
exec java -jar /home/seriescarpincho.jar
|
erkike/daw
|
docker/web/wait.sh
|
Shell
|
mit
| 120 |
# Source this file to run it in your shell:
#
# source internal/test.sh
#
# Any arguments passed will go directly to the tox command line, e.g.:
#
# source internal/test.sh -e py27
#
# Which would test just Python 2.7.
# We just want the name of the directory to pass to the Python
# script. So rmdir it, then let the Python script re-create. Not the
# prettiest or safest operation, but it should be fine.
PPT_TEMP_DIR=$(mktemp -d /tmp/python-project-template-XXXXXXXXXX)
rmdir "$PPT_TEMP_DIR"
python internal/test.py "$PPT_TEMP_DIR" "$@"
pushd "$PPT_TEMP_DIR"
ppt_finished() {
popd
rm -rf "$PPT_TEMP_DIR"
}
echo
echo "Run \`ppt_finished' when done to return to the template project and delete this directory."
|
xguse/trenderbot
|
internal/test.sh
|
Shell
|
mit
| 726 |
rosrun image_view image_view image:=/head_mount_kinect/rgb/image_color compressed
|
djbutler/figleaf
|
scripts/image_view.sh
|
Shell
|
mit
| 82 |
#!/bin/sh
qdbus de.naptower.Baulicht / addText "Morse" 3
qdbus de.naptower.Baulicht / addText "Licht" 3
qdbus de.naptower.Baulicht / start
|
elnappo/Baulicht
|
Service/tests/test.sh
|
Shell
|
mit
| 141 |
cd www/zalbee/kong
echo "badges = " > badges.tmp
wget -o wget.log -O - http://www.kongregate.com/badges.json >> badges.tmp && mv badges.tmp badges.js
|
zAlbee/kong-graphs
|
updateBadges.sh
|
Shell
|
mit
| 150 |
#!/bin/sh
cron -L15
rsyslogd
/bin/sh -c "/opt/activemq/bin/activemq console"
|
vulhub/vulhub
|
base/activemq/5.11.1/with-cron/entrypoint.sh
|
Shell
|
mit
| 79 |
if [ ! -d ~/.workspaces ]; then
mkdir ~/.workspaces
fi
cp "workspace.sh" ~/.workspaces
cp "version" ~/.workspaces
cp "help" ~/.workspaces
if [ -f ~/.bashrc ]; then
echo "source ~/.workspaces/workspace.sh" >> ~/.bashrc
fi
if [ -f ~/.zshrc ]; then
if [ -d ~/.oh-my-zsh ]; then
cp -r workspace ~/.oh-my-zsh/plugins
fi
echo "source ~/.workspaces/workspace.sh" >> ~/.zshrc
fi
|
Geweldig/Workman
|
install.sh
|
Shell
|
mit
| 378 |
#!/bin/bash
function check_package_installed(){
if dpkg --get-selections | grep -q "^$1[[:space:]]*install$" >/dev/null; then
return 0
else
return 1
fi
}
package_to_check=ipvsadm
if check_package_installed $package_to_check; then
echo "$package_to_check is already installed"
else
echo "Installing $package_to_check"
apt-get update
apt-get install $package_to_check
fi
if [ ! -f ./nsenter ]; then
echo "File ./nsenter does not exists. Installing nsenter"
docker run --rm jpetazzo/nsenter cat /nsenter > ./nsenter && sudo chmod +x ./nsenter
fi
|
nitinmidha/docker-swarm-mode-test
|
swarmtest/Scripts/setup-ipvsadm-nsenter.sh
|
Shell
|
mit
| 598 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2899-1
#
# Security announcement date: 2014-04-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:53 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - openafs:1.6.1-3+deb7u2
#
# Last versions recommanded by security team:
# - openafs:1.6.1-3+deb7u7
#
# CVE List:
# - CVE-2014-0159
# - CVE-2014-2852
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade openafs=1.6.1-3+deb7u7 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/x86_64/2014/DSA-2899-1.sh
|
Shell
|
mit
| 634 |
#!/bin/bash
#SBATCH --partition=mono
#SBATCH --ntasks=1
#SBATCH --time=4-0:00
#SBATCH --mem-per-cpu=8000
#SBATCH -J Deep-DAE_MLP_7_bot_bin_DAE_tanh
#SBATCH -e Deep-DAE_MLP_7_bot_bin_DAE_tanh.err.txt
#SBATCH -o Deep-DAE_MLP_7_bot_bin_DAE_tanh.out.txt
source /etc/profile.modules
module load gcc
module load matlab
cd ~/deepLearn && srun ./deepFunction 7 'DAE' 'MLP' '128 500 1500 1000 2000 250 10' '0 1 1 1 1 1 1' '7_bot_bin' 'DAE_tanh' "'iteration.n_epochs', 'learning.lrate', 'use_tanh', 'noise.drop', 'noise.level', 'rica.cost', 'cae.cost'" '200 1e-3 1 0.1 0.1 0 0' "'iteration.n_epochs', 'use_tanh'" '200 1'
|
aciditeam/matlab-ts
|
jobs/deepJobs_DAE_MLP_7_bot_bin_DAE_tanh.sh
|
Shell
|
mit
| 629 |
#! /bin/bash
pushd reveal.js
grunt css-themes
popd
cp -rf reveal.js/css static/reveal.js/
cp -rf reveal.js/js static/reveal.js/
cp -rf reveal.js/lib static/reveal.js/
cp -rf reveal.js/plugin static/reveal.js/
|
asimonet/website
|
update_css.sh
|
Shell
|
cc0-1.0
| 211 |
# --- T2-COPYRIGHT-NOTE-BEGIN ---
# This copyright note is auto-generated by ./scripts/Create-CopyPatch.
#
# T2 SDE: target/pkgdist/build.sh
# Copyright (C) 2006 The T2 SDE Project
#
# More information can be found in the files COPYING and README.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License. A copy of the
# GNU General Public License can be found in the file COPYING.
# --- T2-COPYRIGHT-NOTE-END ---
#
#Description: Distribute binary packages to (remote) location
if [ "$SDECFG_TARGET_PKGDIST_LOCATION" ]; then
echo_header "Package distribution"
case "$SDECFG_TARGET_PKGDIST_LOCATION" in
http:*|ftp:*)
echo_warning "Remote package distribution not supported yet"
;;
*)
echo_status "Copying package files to $SDECFG_TARGET_PKGDIST_LOCATION..."
mkdir -p $SDECFG_TARGET_PKGDIST_LOCATION
cp -a $base/build/$SDECFG_ID/TOOLCHAIN/pkgs/* $SDECFG_TARGET_PKGDIST_LOCATION/
;;
esac
fi
exit
|
arete/t2
|
target/share/pkgdist/build.sh
|
Shell
|
gpl-2.0
| 1,062 |
#!/bin/sh
# Copyright (C) 2012 Glen Pitt-Pladdy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# See: http://www.pitt-pladdy.com/blog/_20091114-134615_0000_Apache_stats_on_Cacti_via_SNMP_/
CACHETIME=30
CACHEFILE=/var/local/snmp/cache/apache
# check for cache file newer CACHETIME seconds ago
if [ ! -f $CACHEFILE ]; then
touch -d yesterday $CACHEFILE
fi
if [ $((`date +%s`-`stat --format=%Y $CACHEFILE`)) -ge $CACHETIME ]; then
# update the data
wget --output-document=$CACHEFILE.TMP.$$ --user-agent='SNMP Apache Stats' 'http://localhost/server-status?auto' >/dev/null 2>&1
mv $CACHEFILE.TMP.$$ $CACHEFILE
fi
# output the data in order (this is because some platforms don't have them all)
for field in \
'Total Accesses' \
'Total kBytes' \
'CPULoad' \
'Uptime' \
'ReqPerSec' \
'BytesPerSec' \
'BytesPerReq' \
'BusyWorkers' \
'IdleWorkers'
do
if [ "$field" = 'Total kBytes' ]; then
echo $((`grep "^$field:" <$CACHEFILE | sed "s/^$field: *//"`*1024))
else
grep "^$field:" <$CACHEFILE | sed "s/^$field: *//"
fi
done
# count up the scorecard
scorecard=`grep ^Scoreboard: $CACHEFILE | sed 's/^Scoreboard: *//'`
echo -n $scorecard | sed 's/[^_]//g' | wc -c
echo -n $scorecard | sed 's/[^S]//g' | wc -c
echo -n $scorecard | sed 's/[^R]//g' | wc -c
echo -n $scorecard | sed 's/[^W]//g' | wc -c
echo -n $scorecard | sed 's/[^K]//g' | wc -c
echo -n $scorecard | sed 's/[^D]//g' | wc -c
echo -n $scorecard | sed 's/[^C]//g' | wc -c
echo -n $scorecard | sed 's/[^L]//g' | wc -c
echo -n $scorecard | sed 's/[^G]//g' | wc -c
echo -n $scorecard | sed 's/[^I]//g' | wc -c
echo -n $scorecard | sed 's/[^\.]//g' | wc -c
|
casep/isc_coding
|
trakautomation/cacti-apache/apache-stats.sh
|
Shell
|
gpl-2.0
| 2,295 |
#!/bin/bash
# Script to find your public MAC address.
# v1.1 - Modified to use /sbin/ip and nothing else
# David Cantrell 8/31/2008
# v1.0 - Initial script (mymac.sh)
#
# Copyright (C) 2008 James Bair <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
IP=/sbin/ip
${IP} >/dev/null 2>&1
if [ $? -eq 127 ]; then
echo "${IP} command not found, exiting." >&2
exit 1
fi
get_external_interface() {
${IP} route list | while read routename a b c devname remainder ; do
if [ "${routename}" = "default" ]; then
echo ${devname}
break
else
continue
fi
done
}
get_macaddr() {
interface="${1}"
if [ -z "${interface}" ]; then
return
fi
${IP} link show ${interface} | while read desc macaddr remainder ; do
if [ "${desc}" = "link/ether" ]; then
echo "${macaddr}"
break
else
continue
fi
done
}
interface="$(get_external_interface)"
if [ -z "${interface}" ]; then
echo "No public interface found, cannot determine MAC address." >&2
exit 1
fi
get_macaddr ${interface}
|
tsuehpsyde/misc
|
bash/getmac.sh
|
Shell
|
gpl-2.0
| 1,813 |
#!/bin/bash
################################################################################
# Constantes de base
# definition, si ce n'est pas une variable d'environnement, du time-out
if [ -z "$MAXTIME" ] ; then
MAXTIME="30"
fi
if [ -z "$SSHP" ] ; then
SSHP="2222"
fi
# if [ -z "$PROCESSOR" ] ; then
# MAXTIME="C1"
# fi
################################################################################
# Decoder la ligne de commandes
if [ "$1" = "-help" ] ; then
echo "usage "`basename $0`" [-help]"
echo " "`basename $0`" disk-image command-to-operate [-tool toolname] [-test testname]"
exit
elif [ $# -lt 2 ] ; then
echo "bad invocation, please run '"`basename $0`" -help' for documentation"
exit
fi
VMPATH="$1"
COMMAND="$2"
shift 2
if [ "$1" = "-tool" ] ; then
THE_TOOL_NAME=$2
shift 2
else
THE_TOOL_NAME="no_tool"
fi
if [ "$1" = "-test" ] ; then
THE_TEST_NAME=$2
shift 2
else
THE_TEST_NAME="test_with_no_name"
fi
if [ -z "$RUN_IDENTIFIER" ] ; then
RUN_IDENTIFIER="run-id"
fi
################################################################################
# Parametrages divers
if [ -z "$A_F_RADIX" ] ; then
LOG_FILE="$OUTPUT_DIR/node_${NODE_NUMBER}_CSV/run-${THE_TOOL_NAME}-${THE_TEST_NAME}-${RUN_IDENTIFIER}.csv"
else
LOG_FILE="$OUTPUT_DIR/node_${NODE_NUMBER}_CSV/run-${A_F_RADIX}-${THE_TOOL_NAME}-${THE_TEST_NAME}-${RUN_IDENTIFIER}.csv"
fi
if [ -z "$SUM_FILE" ] ; then
SUM_FILE="/data1/CSV/summary-${THE_TOOL_NAME}.csv"
fi # sinon elle este xportee par l'appelant
STDERR_FILE="$OUTPUT_DIR/node_${NODE_NUMBER}_OUTPUTS/log-${THE_TOOL_NAME}-${THE_TEST_NAME}-${RUN_IDENTIFIER}.stderr"
################################################################################
# Au travail!!!
# demarrer la VM
sh vm.sh $VMPATH 2> /dev/null
# lancer la commande
ssh -i "$BENCHKIT_DIR/bk-private_key" -p "$SSHP" $VM_LOGIN@localhost "$COMMAND" 2>> "$STDERR_FILE"
# arreter la VM
#ssh -i "$BENCHKIT_DIR/bk-private_key" -p "$SSHP" root@localhost "halt"
|
cesaro/cunf-mcc2014
|
ToolSubmissionKit/launch_a_command.sh
|
Shell
|
gpl-2.0
| 1,981 |
#!/usr/bin/env bash
#
# Author: Kim Hallén <[email protected]>
#
# iptables.sh generates iptables rules for a standalone server.
# By default only SSH is allowed on port 22. Modifications to
# script can be used by changing the variables under the
# "Dynamic script variables" section". Both IPv4 and IPv6 is
# taken into account.
#
# iptables.sh downloads a list of known spammersfrom spamhaus.org
# and the netfilter extension ipset is used to handle the large
# amount of networks. Therefore, you need to make sure ipset
# is installed on your system:
#
# sudo apt-get install ipset
#
# Verifed on the following systems:
# Debian 7.0 (32-bit / 64-bit)
# Ubuntu 12.04 (32-bit / 64-bit)
#
# The script is not compatible with CentOS 5.x and 6.x, since
# ipset differs when you install through yum. But the functionality
# is implemented for future references.
#
#
#==============================================================================
# Dynamic script variables
#==============================================================================
# If you want to block any specific IP-addresses, specify them here.
mySpammers_v4="./spammers-v4.txt"
mySpammers_v6="./spammers-v6.txt"
# The Spamhaus drop list. If you don't want to download
# the drop list, simply comment it out.
spamhaus_drop="http://www.spamhaus.org/drop/drop.txt"
# Log prefix to show in syslog when a rule is matched.
spamhaus_log_prefix="SPAMHAUS: "
spammers_log_prefix="BLACKLISTED NETS: "
ssh_log_prefix="SSH: "
# SSH port to open. Leave blank to skip.
ssh_port="22"
# TCP and UDP ports to open. Separate with white spaces.
# Leave blank to skip.
tcp_ports=""
udp_ports=""
#==============================================================================
# Variables
#==============================================================================
PATH="/sbin:/usr/sbin:/bin:/usr/bin:$PATH"
SYSLOGGER="/usr/bin/logger"
IPSET="/usr/sbin/ipset"
IPSET_RULES="/etc/ipset.up.rules"
IPT="/sbin/iptables"
IPT_SAVE="/sbin/iptables-save"
IPT_RULES="/etc/iptables.up.rules"
IPT_UP="/etc/network/if-pre-up.d/iptables"
IPT6="/sbin/ip6tables"
IPT6_SAVE="/sbin/ip6tables-save"
IPT6_RULES="/etc/ip6tables.up.rules"
#==============================================================================
# Functions
#==============================================================================
log_it() {
echo "$1"
$SYSLOGGER -p info "$1" -t "iptables"
}
check_path_exist() {
if ! which $1 > /dev/null 2>&1; then
log_it "Couldn't find $1. Exiting..."
exit 1
fi
}
check_path() {
check_path_exist "which"
check_path_exist $IPT
check_path_exist $IPT6
check_path_exist $IPT_SAVE
check_path_exist $IPT6_SAVE
check_path_exist $SYSLOGGER
check_path_exist $IPSET
}
get_username() {
username=$(id -un)
if [[ $username != "root" ]]; then
echo "You need to be root."
exit 1
fi
}
get_os() {
my_os=$(awk 'NR==1{ printf "%s", tolower($1) }' /etc/issue)
if [[ $my_os == "debian" || $my_os == "ubuntu" ]]; then
my_os="deb"
elif [[ $my_os == "redhat" || $my_os == "centos" ]]; then
my_os="rpm"
else
echo "Unknown operating system."
exit 1
fi
}
set_debian() {
# Check if iptables file exist
if [ ! -f $IPT_UP ]; then
touch $IPT_UP
chmod +x $IPT_UP
echo '#!/bin/bash' > $IPT_UP
echo "IPSET=\$(which ipset)" >> $IPT_UP
echo "IPSET_RULES=\"$IPSET_RULES\"" >> $IPT_UP
echo "\$IPSET restore < \$IPSET_RULES" >> $IPT_UP
echo "IPT_RESTORE=\$(which iptables-restore)" >> $IPT_UP
echo "IPT_RULES=\"$IPT_RULES\"" >> $IPT_UP
echo "\$IPT_RESTORE < \$IPT_RULES" >> $IPT_UP
echo "IPT6_RESTORE=\$(which ip6tables-restore)" >> $IPT_UP
echo "IPT6_RULES=\"$IPT6_RULES\"" >> $IPT_UP
echo "\$IPT6_RESTORE < \$IPT6_RULES" >> $IPT_UP
fi
}
flush_rules() {
# IPv4
$IPT -F
$IPT -X
$IPT -t nat -F
$IPT -t nat -X
$IPT -t mangle -F
$IPT -t mangle -X
$IPT -P INPUT $1
$IPT -P OUTPUT $1
$IPT -P FORWARD $1
# IPv6
$IPT6 -F
$IPT6 -X
$IPT6 -t mangle -F
$IPT6 -t mangle -X
$IPT6 -P INPUT $1
$IPT6 -P OUTPUT $1
$IPT6 -P FORWARD $1
# Destroy ipsets
IPSET_SETS=$($IPSET -list -n)
for sets in $IPSET_SETS; do
$IPSET destroy $sets
done
# Accept localhost and established,related if iptables is being activated
if [[ $2 == "start" ]]; then
$IPT -A INPUT -i lo -j ACCEPT
$IPT -A OUTPUT -o lo -j ACCEPT
$IPT -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
$IPT -A OUTPUT -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
$IPT6 -A INPUT -i lo -j ACCEPT
$IPT6 -A OUTPUT -o lo -j ACCEPT
$IPT6 -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
$IPT6 -A OUTPUT -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
fi
}
spamhaus() {
# Make sure user wants to download spammers list
if [[ $spamhaus_drop != "" ]]; then
# Inform user on what's going on
echo "Downloading known spammers from spamhaus.org..."
# Create a temporary file
spamhaus_file="/tmp/spammers-$(date +'%Y%m%d%H%M%S').txt"
# Download drop list from spamhaus.org
if wget $spamhaus_drop -O $spamhaus_file > /dev/null 2>&1; then
# Save all IP addresses in $spamhaus
spamhaus=$(egrep -v '^$|^;|^#' $spamhaus_file | awk '{ print $1 }')
# Create a new chain for iptables and ipset
$IPT -N SPAMHAUS
$IPSET create SPAMHAUS hash:net
# Add spammers to list
for spammer in $spamhaus; do
$IPSET add SPAMHAUS $spammer
done
# Set SPAMHAUS logging and drop matching packages
$IPT -A INPUT -m set --match-set SPAMHAUS src -j SPAMHAUS
$IPT -A SPAMHAUS -j LOG --log-level info --log-prefix "$spamhaus_log_prefix"
$IPT -A SPAMHAUS -j DROP
else
log_it "Spamhaus download failed with ERR_CODE $?. Skipping..."
fi
fi
}
block_spammers() {
# No spoofing / no bad packets
$IPT -A INPUT ! -i lo -s 127.0.0.0/8 -j DROP
$IPT -A INPUT -m state --state INVALID -j DROP
get_interfaces
for interface in $interfaces; do
# NMAP FIN/URG/PSH
$IPT -A INPUT -p tcp --tcp-flags ALL FIN,URG,PSH -j LOG --log-level info --log-prefix "PORTSCAN: "
$IPT -A INPUT -i $interface -p tcp --tcp-flags ALL FIN,URG,PSH -j DROP
# Stop Xmas Tree type scanning
$IPT -A INPUT -i $interface -p tcp --tcp-flags ALL ALL -j DROP
$IPT -A INPUT -i $interface -p tcp --tcp-flags ALL SYN,RST,ACK,FIN,URG -j DROP
# Stop NULL scanning
$IPT -A INPUT -i $interface -p tcp --tcp-flags ALL NONE -j DROP
# SYN/RST
$IPT -A INPUT -i $interface -p tcp --tcp-flags SYN,RST SYN,RST -j DROP
# SYN/FIN
$IPT -A INPUT -i $interface -p tcp --tcp-flags SYN,FIN SYN,FIN -j DROP
done
# Stop SYN-flood
$IPT -N SYNFLOOD
$IPT -A SYNFLOOD -j LOG --log-level info --log-prefix "POSSIBLE SYNFLOOD: "
$IPT -A SYNFLOOD -p tcp --syn -m limit --limit 1/s -j DROP
$IPT -A INPUT -p tcp -m state --state NEW -j SYNFLOOD
# Stop ping-flood
$IPT -N PING
$IPT -A SYNFLOOD -j LOG --log-level info --log-prefix "POSSIBLE PINGFLOOD: "
$IPT -A PING -p icmp --icmp-type echo-request -m limit --limit 1/second -j DROP
$IPT -I INPUT -p icmp --icmp-type echo-request -m state --state NEW -j PING
# Stop user defined IPv4 spammers
if [ -f $mySpammers_v4 ]; then
# Give user some info
echo "Blocking spammers in $mySpammers_v4"
# Save all IP addresses in $spammers_v4
spammers_v4=$(egrep -v '^$|^;|^#' $mySpammers_v4 | awk '{ print $1 }')
# Create a new chain
$IPT -N SPAMMERS_v4
$IPSET create SPAMMERS_v4 hash:net
# Add spammers to list
for spammer in $spammers_v4; do
$IPSET add SPAMMERS_v4 $spammer
done
# Set SPAMMERS_v4 logging and drop matching packages
$IPT -A INPUT -m set --match-set SPAMMERS_v4 src -j SPAMMERS_v4
$IPT -A SPAMMERS_v4 -j LOG --log-level info --log-prefix "$spammers_log_prefix"
$IPT -A SPAMMERS_v4 -j DROP
else
echo "Couldn't find $mySpammers_v4. Skipping..."
fi
# Stop user defined IPv6 spammers
if [ -f $mySpammers_v6 ]; then
# Give user some info
echo "Blocking spammers in $mySpammers_v6"
# Save all IP addresses in $spammers_v6
spammers_v6=$(egrep -v '^$|^;|^#' $mySpammers_v6 | awk '{ print $1 }')
# Create a new chain
$IPT6 -N SPAMMERS_v6
$IPSET create SPAMMERS_v6 hash:net family inet6
# Add spammers to list
for spammer in $spammers_v6; do
$IPSET add SPAMMERS_v6 $spammer
done
# Set SPAMMERS_v6 logging and drop matching packages
$IPT6 -A INPUT -m set --match-set SPAMMERS_v6 src -j SPAMMERS_v6
$IPT6 -A SPAMMERS_v6 -j LOG --log-level info --log-prefix "$spammers_log_prefix"
$IPT6 -A SPAMMERS_v6 -j DROP
else
echo "Couldn't find $mySpammers_v6. Skipping..."
fi
}
get_interfaces() {
interfaces=$(ip link show | grep -w "UP" | egrep -v "lo" | awk -F ":" '{ printf "%s", $2 }')
}
get_ip() {
# Variables
device=$1
version=$2
# Check if IPv4 and IPv6 exists on interfaces before getting it.
if [ $version == 4 ]; then
ip_addr_v4=$(ip -$version addr show dev $device | awk '{ if ($2 ~ /^[0-9]/) print $2 }' | cut -d "/" -f 1)
elif [ $version == 6 ]; then
ip_addr_v6=$(ip -$version addr show dev $device | awk '{ if ($2 ~ /^2001/) print $2 }' | cut -d "/" -f 1)
fi
}
set_rules_v4() {
# Call on functions
get_interfaces
# Allow ports
for interface in $interfaces; do
get_ip "$interface" "4"
if [[ $ip_addr_v4 != "" ]]; then
# Allow SSH and log it
if [[ $ssh_port != "" ]]; then
$IPT -A INPUT -p tcp --dport $ssh_port -j LOG --log-level info --log-prefix "$ssh_log_prefix"
$IPT -A INPUT -p tcp -i $interface -d $ip_addr_v4 --dport $ssh_port -m state --state NEW -j ACCEPT
fi
# Allow TCP ports
if [[ $tcp_ports != "" ]]; then
for port in $tcp_ports; do
$IPT -A INPUT -p tcp -i $interface -d $ip_addr_v4 --dport $port -m state --state NEW -j ACCEPT
done
fi
# Allow UDP ports
if [[ $udp_ports != "" ]]; then
for port in $udp_ports; do
$IPT -A INPUT -p udp -i $interface -d $ip_addr_v4 --dport $port -m state --state NEW -j ACCEPT
done
fi
else
echo "$device doesn't have an IPv4 address. Skipping..."
fi
done
}
set_rules_v6() {
# Call on functions
get_interfaces
# Allow ports
for interface in $interfaces; do
get_ip "$interface" "6"
if [[ $ip_addr_v6 != "" ]]; then
# Allow ICMPv6
$IPT6 -A INPUT -i eth0 -p ipv6-icmp -m state --state NEW -j ACCEPT
# Allow SSH and log it
if [[ $ssh_port != "" ]]; then
$IPT6 -A INPUT -p tcp --dport $ssh_port -j LOG --log-level info --log-prefix "$ssh_log_prefix"
$IPT6 -A INPUT -p tcp -i $interface -d $ip_addr_v6 --dport $ssh_port -m state --state NEW -j ACCEPT
fi
# Allow TCP ports
if [[ $tcp_ports != "" ]]; then
for port in $tcp_ports; do
$IPT6 -A INPUT -p tcp -i $interface -d $ip_addr_v6 --dport $port -m state --state NEW -j ACCEPT
done
fi
# Allow UDP ports
if [[ $udp_ports != "" ]]; then
for port in $udp_ports; do
$IPT6 -A INPUT -p udp -i $interface -d $ip_addr_v6 --dport $port -m state --state NEW -j ACCEPT
done
fi
else
echo "$device doesn't have an IPv6 address. Skipping..."
fi
done
}
list_rules() {
if [ $1 == 4 ]; then
$IPT -nvL --line-numbers
elif [ $1 == 6 ]; then
$IPT6 -nvL --line-numbers
fi
}
post_actions() {
# Make sure IPv4 forwarding is disabled
echo "Disabling IPv4 forwarding."
echo "0" > /proc/sys/net/ipv4/ip_forward
# Make sure IPv6 forwarding is disabled
echo "Disabling IPv6 forwarding."
echo "0" > /proc/sys/net/ipv6/conf/all/forwarding
# Cleanup
if [ -f $spamhaus_file ]; then
echo "Cleaning up temporary files."
rm -rf $spamhaus_file
fi
# Save ruleset
echo "Saving iptables and ipsets."
$IPSET save -output save > $IPSET_RULES
if [[ $my_os == "rpm" ]]; then
/sbin/service iptables save
/sbin/service ip6tables save
else
$IPT_SAVE > $IPT_RULES
$IPT6_SAVE > $IPT6_RULES
chmod 0600 $IPT_RULES
chmod 0600 $IPT6_RULES
fi
}
#==============================================================================
# Script arguments
#==============================================================================
case "$1" in
start)
log_it "Activating iptables ruleset."
check_path
get_os
get_username
if [[ $my_os == "deb" ]]; then set_debian; fi
flush_rules "DROP" "start"
spamhaus
block_spammers
set_rules_v4
set_rules_v6
post_actions
;;
stop)
log_it "Flushing iptables ruleset."
flush_rules "ACCEPT"
post_actions
;;
status_v4)
list_rules "4"
;;
status_v6)
list_rules "6"
;;
*)
echo "Usage: ./$(basename $0) {start|stop|status_v4|status_v6}" >&2
exit 3
;;
esac
#==============================================================================
# Exit success
#==============================================================================
exit 0
|
telnetmaster/mixed-space
|
bash/iptables.sh
|
Shell
|
gpl-2.0
| 14,270 |
#!/bin/bash
../../pm3 -c "script run testembedded_grab.py" -i
|
samyk/proxmark3
|
client/experimental_client_with_swig/02b_run_test_py_grabber.sh
|
Shell
|
gpl-2.0
| 63 |
#!/bin/sh
#LD_LIBRARY_PATH=../../../libpisock/.libs gdb --args python pisocktests.py $*
LD_LIBRARY_PATH=../../../libpisock/.libs python pisocktests.py $*
|
unwiredben/pilot-link
|
bindings/Python/test/run.sh
|
Shell
|
gpl-2.0
| 157 |
#!/bin/sh
project=`oraccopt`
if [ "$project" == "" ]; then
echo otf2odt.sh: must be run from a project directory
exit 1
fi
driverpath=$1
if [ "$driverpath" == "" ]; then
if [ -r 00lib/project.otf ]; then
driverpath=00lib/project.otf
else
echo otf2odt.sh: no .otf file given on command line and no 00lib/project.otf
exit 1
fi
fi
keep=$2
driverdir=`dirname $driverpath`
drivername=`basename $driverpath`
driverbase=`basename $drivername .otf`
odtdir=01bld/`basename $drivername .otf`
rm -fr $odtdir ; mkdir -p $odtdir
### ox -1 is tabular serial; -0 is paragraph serial
echo ox -0 -P$project -d$odtdir $driverpath
ox -0 -P$project -d$odtdir $driverpath | xmllint --xinclude - | xsltproc - \
| xsltproc ${ORACC}/lib/scripts/odt-table-width.xsl - \
| xsltproc -stringparam package "$odtdir" ${ORACC}/lib/scripts/doc-split.xsl - \
2>$odtdir/$driverbase.log
cwd=`pwd`; cd $odtdir
mkdir -p META-INF
echo '<?xml version="1.0" encoding="utf-8"?>' >META-INF/manifest.xml
echo '<manifest:manifest xmlns:manifest="urn:oasis:names:tc:opendocument:xmlns:manifest:1.0">' \
>>META-INF/manifest.xml
echo '<manifest:file-entry manifest:media-type="application/vnd.oasis.opendocument.text" manifest:full-path="/"/>' \
>>META-INF/manifest.xml
for a in *.xml ; \
do echo "<manifest:file-entry manifest:media-type=\"text/xml\" manifest:full-path=\"$a\"/>" \
>>META-INF/manifest.xml ; \
done
mkdir -p pictures ; odtpictures.plx >>META-INF/manifest.xml
echo '</manifest:manifest>' >>META-INF/manifest.xml
/bin/echo -n 'application/vnd.oasis.opendocument.text' >mimetype
pwd
zip -q -X ../`basename $odtdir`.odt mimetype
zip -q -X -g -r ../`basename $odtdir`.odt *.xml META-INF
cd $cwd
odtdir=$ORACC/$project/00any/odt
mkdir -p $odtdir
mv 01bld/$driverbase.odt $odtdir
mv 01bld/$driverbase.log $odtdir
|
oracc/oracc
|
misc/otf/otf2odt.sh
|
Shell
|
gpl-2.0
| 1,828 |
#!/bin/bash
if [ ${#4} -lt 1 ]; then
/bin/echo "msa2prf.sh <protnamefile> <fastadir> <msadir> <outdir>"
exit
fi
####### hard coded directories and progs###########
modhmmblastdir=modhmmblast
################################################
workingdir=`/bin/mktemp -d $TMPPATH/run_msa2prf_XXXXXXXXXX` || exit 1
pid=$$
#args
protnamefile=$1
fastadir=$2
msadir=$3
outdir=$4
basedir=$workingdir/BLAST_TEMP_$pid
/bin/mkdir $basedir
/bin/mkdir $basedir/MOD_FILES
/bin/mkdir $basedir/MOD_FILES_QUERY
/bin/mkdir $basedir/CHECK_FILES
/usr/bin/perl ${modhmmblastdir}/msa62mod.pl ${protnamefile} 2 ${msadir} $basedir/MOD_FILES ${fastadir}
/usr/bin/perl ${modhmmblastdir}/mod2modquery.pl ${protnamefile} $basedir/MOD_FILES $basedir/MOD_FILES_QUERY
/usr/bin/perl ${modhmmblastdir}/mod_upper_caser.pl $protnamefile $basedir/MOD_FILES_QUERY
/usr/bin/perl ${modhmmblastdir}/mod2prfmod_nolabelfile.pl $protnamefile $fastadir $basedir/MOD_FILES_QUERY $outdir
/bin/rm -r $workingdir
|
ElofssonLab/TOPCONS2
|
topcons2_webserver/workflow/modhmmblast/msa2prf.sh
|
Shell
|
gpl-2.0
| 985 |
#!/bin/sh
# Include config to find out OpenWRT trunk revision
. ../../config.mk
# Package name, taken as current folder name
PKG_NAME="${PWD##*/}"
#Make sure package index is exists!
[ -f ../.packages_path.mk ] || $(cd .. && ./index_scan.rb > /dev/null)
# Path to package, taken from .package_path.mk. It will be something like
# PK_NAME_25volt="/home/ryzhovau/Entware/openwrt_trunk/feeds/rtndev/25volt/"
PK_NAME_STR=$(grep ^PK_NAME_${PKG_NAME}= ../.packages_path.mk)
# Take a substring between double quotes, remove trailing slash
FULL_PATH=$(echo $PK_NAME_STR | cut -d'"' -f2 | sed 's/\/$//')
# Find full path to OpenWRT trunk folder to cut it out from $FULL_PATH
ROOT_PATH=$(cd ../../../openwrt_trunk && pwd -P)
# Short path to package folder, relative to the openwrt_trunk folder
SHORT_PATH=${FULL_PATH#${ROOT_PATH}/}
# A basic packages from openwrt_trunk/package is not indexed, trying
# to find it manually
[ -z "$SHORT_PATH" ] && \
SHORT_PATH=$(find ../../../openwrt_trunk/package/ -type d -name $PKG_NAME | \
sed 's/^\.\.\/\.\.\/\.\.\/openwrt_trunk\///' | head -n 1)
# Nothing found? Give up.
if [ -z "$SHORT_PATH" ];
then
echo "No package found. Current folder name is matched with no any package."
exit 1
fi
# Take a recursive diff on package folder if "-r" parameter is given,
# or diff on Makefiles otherwise.
case $1 in
-r)
diff -urx .svn ../../../downloads/openwrt_trunk-q${OPENWRT_REVISION}/${SHORT_PATH} \
../../../openwrt_trunk/${SHORT_PATH}
;;
*)
diff -u ../../../downloads/openwrt_trunk-q${OPENWRT_REVISION}/${SHORT_PATH}/Makefile \
../../../openwrt_trunk/${SHORT_PATH}/Makefile
;;
esac
|
zyxmon/qnapware
|
packages/fix-path.sh
|
Shell
|
gpl-2.0
| 1,648 |
#! /bin/sh
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TAP support: more unusual forms for valid TAP input.
# See also related test 'tap-fancy.sh'.
. test-init.sh
. tap-setup.sh
#
# From manpage Test::Harness::TAP(3):
#
# Lines written to standard output matching /^(not )?ok\b/ must be
# interpreted as test lines. All other lines must not be considered
# test output.
#
# Unfortunately, the exact format of TODO and SKIP directives is not as
# clearly described in that manpage; but a simple reverse-engineering of
# the prove(1) utility shows that it is probably given by the perl regex
# /#\s*(TODO|SKIP)\b/.
#
# To avoid problems with backslashes in echo arguments.
xecho () { printf '%s\n' "$*"; }
# There are 34 values for $str ...
for str in \
\' \
'"' \
'`' \
'#' \
'$' \
'!' \
'\' \
'/' \
'&' \
'%' \
'(' \
')' \
'|' \
'^' \
'~' \
'?' \
'*' \
'+' \
'-' \
',' \
':' \
';' \
'=' \
'<' \
'>' \
'@' \
'[' \
']' \
'{' \
'}' \
'\\' \
'...' \
'?[a-zA-Z0-9]*' \
'*.*' \
; do
# ... each of them add 1 pass, 1 fail, ...
xecho "ok${str}"
xecho "not ok${str}"
# ... and (generally) 4 skips, 4 xfails, and 4 xpasses ...
for settings in \
'result="ok" directive=SKIP' \
'result="not ok" directive=TODO' \
'result="ok" directive=TODO' \
; do
eval "$settings"
xecho "${result}# ${directive}${str}"
# ... but 6 skips, 6 xpasses and 6 xfails are to be removed, since
# they might not work with $str = '#' or $str = '\' ...
if test x"$str" != x'#' && test x"$str" != x'\'; then
xecho "${result}${str}#${directive}"
xecho "${result}${str}# ${tab}${tab} ${directive}"
xecho "${result}${str}#${directive}${str}"
fi
done
done > all.test
# Sanity check against a previous use of unportable usages of backslashes
# with the "echo" builtin.
if grep '[^\\]\\#' all.test; then
framework_failure_ "writing backslashes in all.test"
fi
# ... so that we finally have:
pass=34
fail=34
xfail=130 # = 4 * 34 - 6
xpass=130 # = 4 * 34 - 6
skip=130 # = 4 * 34 - 6
error=0
total=$(($pass + $fail + $xfail + $xpass + $skip))
# Even nastier! But accordingly to the specifics, it should still work.
for result in 'ok' 'not ok'; do
echo "${result}{[(<#${tab}TODO>)]}" >> all.test
done
echo "ok{[(<#${tab}SKIP>)]}" >> all.test
# We have to update some test counts.
xfail=$(($xfail + 1))
xpass=$(($xpass + 1))
skip=$(($skip + 1))
total=$(($total + 3))
# And add the test plan!
echo 1..$total >> all.test
$MAKE check >stdout && { cat stdout; exit 1; }
cat stdout
$EGREP '^(PASS|FAIL|SKIP).*#.*TODO' stdout && exit 1
$EGREP '^X?(PASS|FAIL).*#.*SKIP' stdout && exit 1
count_test_results total=$total pass=$pass fail=$fail skip=$skip \
xpass=$xpass xfail=$xfail error=$error
:
|
GavinSmith0123/automake-amplain
|
t/tap-fancy2.sh
|
Shell
|
gpl-2.0
| 3,459 |
#!/bin/sh
# Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux prepare_pvs 5
vgcreate $vg1 "$dev1"
vgcreate $vg2 "$dev3" "$dev4" "$dev5"
aux disable_dev "$dev1"
pvscan
# dev1 is missing
fail pvs $(cat DEVICES)
vgcreate $vg1 "$dev2"
aux enable_dev "$dev1"
pvs "$dev1"
# reappearing device (rhbz 995440)
lvcreate -aey -m2 --type mirror -l4 --alloc anywhere --corelog -n $lv1 $vg2
aux disable_dev "$dev3"
lvconvert --yes --repair $vg2/$lv1
aux enable_dev "$dev3"
# here it should fix any reappeared devices
lvs $vg1 $vg2
lvs -a $vg2 -o+devices 2>&1 | tee out
not grep reappeared out
vgremove -ff $vg1 $vg2
|
shehbazj/DyRe
|
test/shell/lvmcache-exercise.sh
|
Shell
|
gpl-2.0
| 1,057 |
#!/usr/bin/env bash
sudo -v
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# sane standby
sudo pmset -a standbydelay 86400
#sound off
sudo nvram SystemAudioVolume=" "
# green highlight
defaults write NSGlobalDomain AppleHighlightColor -string "0.764700 0.976500 0.568600"
#save dialog
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
#piss off, quarantine
defaults write com.apple.LaunchServices LSQuarantine -bool false
#disable auto-quit and standby of apps
defaults write NSGlobalDomain NSDisableAutomaticTermination -bool true
#moreinfo in login
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName
#smart quotes off
defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
#smart dashes off
defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
# Set a custom wallpaper image. `DefaultDesktop.jpg` is already a symlink, and
# all wallpapers are in `/Library/Desktop Pictures/`. The default is `Wave.jpg`.
#rm -rf ~/Library/Application Support/Dock/desktoppicture.db
#sudo rm -rf /System/Library/CoreServices/DefaultDesktop.jpg
#sudo ln -s /path/to/your/image /System/Library/CoreServices/DefaultDesktop.jpg
# Disable hibernation (speeds up entering sleep mode)
sudo pmset -a hibernatemode 0
# Remove the sleep image file to save disk space
#sudo rm /Private/var/vm/sleepimage
# Create a zero-byte file instead…
#sudo touch /Private/var/vm/sleepimage
# …and make sure it can’t be rewritten
#sudo chflags uchg /Private/var/vm/sleepimage
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# Use scroll gesture with the Ctrl (^) modifier key to zoom
defaults write com.apple.universalaccess closeViewScrollWheelToggle -bool true
defaults write com.apple.universalaccess HIDScrollZoomModifierMask -int 262144
# Follow the keyboard focus while zoomed in
defaults write com.apple.universalaccess closeViewZoomFollowsFocus -bool true
# Disable press-and-hold for keys in favor of key repeat
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
# Set a blazingly fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 0
# Show icons for hard drives, servers, and removable media on the desktop
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowMountedServersOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Finder: show path bar
defaults write com.apple.finder ShowPathbar -bool true
# Finder: allow text selection in Quick Look
defaults write com.apple.finder QLEnableTextSelection -bool true
# Display full POSIX path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# Enable spring loading for directories
defaults write NSGlobalDomain com.apple.springing.enabled -bool true
# Remove the spring loading delay for directories
defaults write NSGlobalDomain com.apple.springing.delay -float 0
# Disable disk image verification
defaults write com.apple.frameworks.diskimages skip-verify -bool true
defaults write com.apple.frameworks.diskimages skip-verify-locked -bool true
defaults write com.apple.frameworks.diskimages skip-verify-remote -bool true
# Show item info near icons on the desktop and in other icon views
#/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
# Show item info to the right of the icons on the desktop
#/usr/libexec/PlistBuddy -c "Set DesktopViewSettings:IconViewSettings:labelOnBottom false" ~/Library/Preferences/com.apple.finder.plist
# Enable snap-to-grid for icons on the desktop and in other icon views
#/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
# Increase grid spacing for icons on the desktop and in other icon views
#/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
# Increase the size of icons on the desktop and in other icon views
#/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist
chflags nohidden ~/Library
# Enable highlight hover effect for the grid view of a stack (Dock)
defaults write com.apple.dock mouse-over-hilite-stack -bool true
# Minimize windows into their application’s icon
defaults write com.apple.dock minimize-to-application -bool true
# Enable spring loading for all Dock items
defaults write com.apple.dock enable-spring-load-actions-on-all-items -bool true
# Show indicator lights for open applications in the Dock
defaults write com.apple.dock show-process-indicators -bool true
# Speed up Mission Control animations
defaults write com.apple.dock expose-animation-duration -float 0.1
# Don’t group windows by application in Mission Control
# (i.e. use the old Exposé behavior instead)
defaults write com.apple.dock expose-group-by-app -bool false
# Hot corners
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
# Top left screen corner → Mission Control
#defaults write com.apple.dock wvous-tl-corner -int 2
#defaults write com.apple.dock wvous-tl-modifier -int 0
# Top right screen corner → Desktop
#defaults write com.apple.dock wvous-tr-corner -int 4
#defaults write com.apple.dock wvous-tr-modifier -int 0
# Bottom left screen corner → Start screen saver
#defaults write com.apple.dock wvous-bl-corner -int 5
#defaults write com.apple.dock wvous-bl-modifier -int 0
|
chrishewlings/Projects
|
bash/fuckoff_osx.sh
|
Shell
|
gpl-2.0
| 7,312 |
#!/bin/sh
targetname=backups.local
tid=$(cat /proc/net/iet/volume | grep $targetname | awk '{print $1}' | cut -d : -f 2)
sudo blockdev --setrw $1
sudo ietadm --op new --lun=0 --tid=$tid --params Path=$1,Type=fileio,IOMode=wb
|
TimJDFletcher/personal-scripts
|
bin/iscsi-export.sh
|
Shell
|
gpl-2.0
| 225 |
#!/usr/bin/env bash
cd module
string="Button_Plot_Photograph=Attach Photograph"
replacement="Button_Plot_Photograph=Plot Photograph"
perl -0777 -i.original -pe "s/\\Q$string/$replacement/igs" english.0.properties
string="Button_Marker_Photograph=Attach Photograph"
replacement="Button_Marker_Photograph=Marker Photograph"
perl -0777 -i.original -pe "s/\\Q$string/$replacement/igs" english.0.properties
string="Button_Motif_Photograph=Attach Photograph"
replacement="Button_Motif_Photograph=Motif Photograph"
perl -0777 -i.original -pe "s/\\Q$string/$replacement/igs" english.0.properties
rm english.0.properties.original
|
FAIMS/gravestones
|
postproc.sh
|
Shell
|
gpl-2.0
| 626 |
defaults write com.apple.spotlight orderedItems -array \
'{"enabled" = 1;"name" = "APPLICATIONS";}' \
'{"enabled" = 1;"name" = "SYSTEM_PREFS";}' \
'{"enabled" = 1;"name" = "DIRECTORIES";}' \
'{"enabled" = 1;"name" = "PDF";}' \
'{"enabled" = 1;"name" = "FONTS";}' \
'{"enabled" = 0;"name" = "DOCUMENTS";}' \
'{"enabled" = 0;"name" = "MESSAGES";}' \
'{"enabled" = 0;"name" = "CONTACT";}' \
'{"enabled" = 0;"name" = "EVENT_TODO";}' \
'{"enabled" = 0;"name" = "IMAGES";}' \
'{"enabled" = 0;"name" = "BOOKMARKS";}' \
'{"enabled" = 0;"name" = "MUSIC";}' \
'{"enabled" = 0;"name" = "MOVIES";}' \
'{"enabled" = 0;"name" = "PRESENTATIONS";}' \
'{"enabled" = 0;"name" = "SPREADSHEETS";}' \
'{"enabled" = 0;"name" = "SOURCE";}' \
'{"enabled" = 0;"name" = "MENU_DEFINITION";}' \
'{"enabled" = 0;"name" = "MENU_OTHER";}' \
'{"enabled" = 0;"name" = "MENU_CONVERSION";}' \
'{"enabled" = 0;"name" = "MENU_EXPRESSION";}' \
'{"enabled" = 0;"name" = "MENU_WEBSEARCH";}' \
'{"enabled" = 0;"name" = "MENU_SPOTLIGHT_SUGGESTIONS";}'
# Load new settings before rebuilding the index
killall mds > /dev/null 2>&1
# Make sure indexing is enabled for the main volume
sudo mdutil -i on / > /dev/null
# Rebuild the index from scratch
sudo mdutil -E / > /dev/null
echo "Expanding the save panel by default"
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint -bool true
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint2 -bool true
echo "Automatically quit printer app once the print jobs complete"
defaults write com.apple.print.PrintingPrefs "Quit When Finished" -bool true
echo ""
echo "Save to disk, rather than iCloud, by default? (y/n)"
defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
echo "Reveal IP address, hostname, OS version, etc. when clicking the clock in the login window"
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName
echo "Disable hibernation? (speeds up entering sleep mode) (y/n)"
sudo pmset -a hibernatemode 0
echo "Remove the sleep image file to save disk space? (y/n)"
echo "(If you're on a <128GB SSD, this helps but can have adverse affects on performance. You've been warned.)"
sudo rm /Private/var/vm/sleepimage
echo "Creating a zero-byte file instead"
sudo touch /Private/var/vm/sleepimage
echo "and make sure it can't be rewritten"
sudo chflags uchg /Private/var/vm/sleepimage
echo "Disable the sudden motion sensor? (it's not useful for SSDs/current MacBooks) (y/n)"
sudo pmset -a sms 0
echo "Disable system-wide resume? (y/n)"
defaults write com.apple.systempreferences NSQuitAlwaysKeepsWindows -bool false
echo "Disable the menubar transparency? (y/n)"
defaults write com.apple.universalaccess reduceTransparency -bool true
echo "Speeding up wake from sleep to 24 hours from an hour"
# http://www.cultofmac.com/221392/quick-hack-speeds-up-retina-macbooks-wake-from-sleep-os-x-tips/
sudo pmset -a standbydelay 86400
echo "Increasing sound quality for Bluetooth headphones/headsets"
defaults write com.apple.BluetoothAudioAgent "Apple Bitpool Min (editable)" -int 40
echo "Enabling full keyboard access for all controls (enable Tab in modal dialogs, menu windows, etc.)"
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
echo "Disabling press-and-hold for special keys in favor of key repeat"
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
echo "Setting a blazingly fast keyboard repeat rate"
defaults write NSGlobalDomain KeyRepeat -int 0
echo "Disable auto-correct? (y/n)"
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
echo "Setting trackpad & mouse speed to a reasonable number"
defaults write -g com.apple.trackpad.scaling 2
defaults write -g com.apple.mouse.scaling 2.5
echo "Turn off keyboard illumination when computer is not used for 5 minutes"
defaults write com.apple.BezelServices kDimTime -int 300
echo "Disable display from automatically adjusting brightness? (y/n)"
sudo defaults write /Library/Preferences/com.apple.iokit.AmbientLightSensor "Automatic Display Enabled" -bool false
echo "Disable keyboard from automatically adjusting backlight brightness in low light? (y/n)"
sudo defaults write /Library/Preferences/com.apple.iokit.AmbientLightSensor "Automatic Keyboard Enabled" -bool false
s
|
erikdejonge/devenv
|
osxhacker.sh
|
Shell
|
gpl-2.0
| 4,424 |
#!/bin/bash
#Function:卸载不必要的配置文件
#说明:请以root 运行
date
echo "正在清除......"
sudo dpkg -l|grep "^rc"|awk '{print $2}' |xargs aptitude -y purge
sudo apt-get autoremove
sudo apt-get clean
echo "残留的配置文件已清除!"
|
pengshp/shell_script
|
remove_purge.sh
|
Shell
|
gpl-2.0
| 262 |
#!/bin/bash
EMAIL=$1
PASSWD=$2
URL=http://localhost:1337/soapbox/
SRV_RESPONSE=$(curl -s --compressed $URL"nothing/user/authenticateUser?email="$EMAIL"&password="$PASSWD)
IN=$SRV_RESPONSE
ARRIN=(${IN//>/ })
USERID=${ARRIN[2]}
USERID=${USERID//[^0-9]/}
TOKEN=${ARRIN[4]}
TOKEN=${TOKEN//[^0-9]/}
echo SRV: $SRV_RESPONSE
echo USER: $USERID
echo TOKEN: $TOKEN
echo
wine explorer /desktop="SOAPBOX["$USERID"]"$EMAIL,800x600 soapbox.exe \
US $URL $TOKEN $USERID &> /dev/null
|
nilzao/soapbox-race
|
launcher.sh
|
Shell
|
gpl-2.0
| 490 |
#!/bin/bash
header "Changeset"
# synchronize repo to load the packages
#test_success "repo synchronize" repo synchronize --id="$REPO_ID"
test_success "product synchronize" product synchronize --org="$TEST_ORG" --name="$FEWUPS_PRODUCT"
# testing changesets
CS_NAME="changeset_$RAND"
CS_NAME_2="changeset_2_$RAND"
CS_NAME_3="changeset_3_$RAND"
test_success "changeset create" changeset create --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME"
test_success "changeset add product" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME" --add_product="$FEWUPS_PRODUCT"
check_delayed_jobs_running
test_success "promote changeset with one product" changeset promote --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME"
test_success "changeset create" changeset create --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_2"
test_success "changeset add package" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_2" --from_product="$FEWUPS_PRODUCT" --add_package="monkey-0.3-0.8.noarch"
test_success "changeset add erratum" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_2" --from_product="$FEWUPS_PRODUCT" --add_erratum="RHEA-2010:0001"
test_success "changeset add repo" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_2" --from_product="$FEWUPS_PRODUCT" --add_repo="$REPO_NAME"
test_success "changeset promote" changeset promote --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_2"
test_success "changeset list" changeset list --org="$TEST_ORG" --environment="$TEST_ENV"
test_success "changeset info" changeset info --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME"
test_success "changeset remove product" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME" --remove_product="$FEWUPS_PRODUCT"
test_success "changeset remove package" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_2" --from_product="$FEWUPS_PRODUCT" --remove_package="monkey-0.3-0.8.noarch"
test_success "changeset remove erratum" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_2" --from_product="$FEWUPS_PRODUCT" --remove_erratum="RHEA-2010:0001"
test_success "changeset remove repo" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_2" --from_product="$FEWUPS_PRODUCT" --remove_repo="$REPO_NAME"
test_success "changeset update" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME" --new_name="new_$CS_NAME" --description="updated description"
#promote template with product and package
PROM_TEMPLATE_NAME="promotion_test_tpl_$RAND"
test_success "template create" template create --name="$PROM_TEMPLATE_NAME" --description="template description" --org="$TEST_ORG"
test_success "template update add package" template update --name="$PROM_TEMPLATE_NAME" --org="$TEST_ORG" --add_package="cheetah"
test_success "changeset create" changeset create --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_3" --description "a description of changeset"
test_success "changeset add template" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_3" --add_template="$PROM_TEMPLATE_NAME"
test_success "changeset promote" changeset promote --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_3"
test_success "changeset remove template" changeset update --org="$TEST_ORG" --environment="$TEST_ENV" --name="$CS_NAME_3" --remove_template="$PROM_TEMPLATE_NAME"
|
iNecas/katello
|
scripts/system-test/cli_tests/changeset.sh
|
Shell
|
gpl-2.0
| 3,590 |
#!/bin/bash
set -e
cd ../../mdm
make clean
make ubuntu-8.04
cp packages/*.deb ../compiled/ubuntu-8.04/
cd ../extra-modes/xephyr-gdm/
make clean
make ubuntu-8.04
cp packages/*.deb ../../compiled/ubuntu-8.04/
cd ../../dependencies/ubuntu-8.04/
|
freedesktop-unofficial-mirror/xorg__app__mdm
|
compiled/ubuntu-8.04/update.sh
|
Shell
|
gpl-2.0
| 246 |
BEGIN {
printf "Bill for the 4-March-2001.\n"
printf "By Vivek G Gite.\n"
printf "----------------------------------------\n"
}
{
total = $3 * $4
recno = $1
item = $2
gtotal += total
#printf "%d %s Rs.%f\n", recno, item, total
printf "%2d %-10s Rs.%7.2f\n", recno, item, total
}
END {
printf "----------------------------------------\n"
#printf "Total Rs. %f\n", gtotal
printf " Total Rs.%7.2f\n", gtotal
printf "========================================\n"
}
|
Furzoom/demo-C
|
src/shell/awk_user_defined_variables_5.sh
|
Shell
|
gpl-2.0
| 520 |
#!/bin/bash
ADDRESS=`echo $1 | sed 's/mailto://'`
/opt/google/chrome/google-chrome "https://mail.google.com/mail?view=cm&tf=0&to=$ADDRESS"
|
casep/Molido
|
mailto.sh
|
Shell
|
gpl-2.0
| 140 |
VBoxManage controlvm FTFSTest poweroff
|
oscarlab/betrfs
|
buildbot/masterftfs/poweroff.sh
|
Shell
|
gpl-2.0
| 39 |
#!/bin/bash
source ./test_DATA.sh
source ./test_ffmpeg.sh
source ./test_H264AVCEncoderLibTestStatic.sh
source ./test_wget.sh
VIDEO=coastguard_352x288x30x420x300
PICTURES=33
Y_DIM=288
X_DIM=352
FPS=30
MAX_Q_SCALE=50
GOP_SIZE=32
if [[ ! -e $DATA/$VIDEO.yuv ]]; then
current_dir=$PWD
cd $DATA
wget http://www.hpca.ual.es/~vruiz/videos/$VIDEO.avi
ffmpeg -i $VIDEO.avi $VIDEO.yuv
cd $current_dir
fi
set -x
DATA_DIR=data-${0##*/}
rm -rf $DATA_DIR
mkdir $DATA_DIR
cd $DATA_DIR
Q_SCALE=$MAX_Q_SCALE
while [ $Q_SCALE -ge 1 ]
do
../RD-H264SVC-SQCGS.sh -v $VIDEO -p $PICTURES -y $Y_DIM -x $X_DIM -f $FPS -q $Q_SCALE -g $GOP_SIZE >> RD-H264SVC-SQCGS-$VIDEO.dat 2>> stderr
let Q_SCALE=Q_SCALE-10
done
set +x
|
vicente-gonzalez-ruiz/QSVC
|
trunk/tests/RD-H264SVC-SQCGS-coastguard_352x288x30x420x300.sh
|
Shell
|
gpl-2.0
| 730 |
#!/bin/bash
if [ ! -f /etc/httpd/ssl/server.key ]; then
mkdir -p /etc/httpd/ssl
KEY=/etc/httpd/ssl/server.key
DOMAIN=$(hostname)
export PASSPHRASE=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 16)
SUBJ="
C=US
ST=Texas
O=University of Texas
localityName=Austin
commonName=$DOMAIN
organizationalUnitName=TACC
emailAddress=admin@$DOMAIN
"
openssl genrsa -des3 -out /etc/httpd/ssl/server.key -passout env:PASSPHRASE 2048
openssl req -new -batch -subj "$(echo -n "$SUBJ" | tr "\n" "/")" -key $KEY -out /tmp/$DOMAIN.csr -passin env:PASSPHRASE
cp $KEY $KEY.orig
openssl rsa -in $KEY.orig -out $KEY -passin env:PASSPHRASE
openssl x509 -req -days 365 -in /tmp/$DOMAIN.csr -signkey $KEY -out /etc/httpd/ssl/server.crt
fi
HOSTLINE=$(echo $(ip -f inet addr show eth0 | grep 'inet' | awk '{ print $2 }' | cut -d/ -f1) $(hostname) $(hostname -s))
echo $HOSTLINE >> /etc/hosts
# link in the ssl certs at runtime to allow for valid certs to be mounted in a volume
ln -s /etc/httpd/ssl/server.key /etc/pki/tls/private/server.key
ln -s /etc/httpd/ssl/server.crt /etc/pki/tls/certs/server.crt
# if a ca bundle is present, load it and update the ssl.conf file
if [[ -e /etc/httpd/ssl/ca-bundle.crt ]]; then
ln -s /etc/httpd/ssl/ca-bundle.crt /etc/pki/tls/certs/server-ca-chain.crt
set -i 's/#SSLCACertificateFile/SSLCACertificateFile/' /etc/httpd/conf.d/ssl.conf
fi
# if a ca cert chain file is present, load it and update the ssl.conf file
if [[ -e /etc/httpd/ssl/ca-chain.crt ]]; then
ln -s /etc/httpd/ssl/ca-chain.crt /etc/pki/tls/certs/server-ca-chain.crt
set -i 's/#SSLCertificateChainFile/SSLCertificateChainFile/' /etc/httpd/conf.d/ssl.conf
fi
#Setup URL for Controller
if [[ ! -z $BASE_URL ]]; then
mysql -h db -u${DB_ENV_MYSQL_USER} -p${DB_ENV_MYSQL_PASSWORD} --database=${DB_ENV_MYSQL_DATABASE} \
-e "UPDATE settings SET value = '${BASE_URL}' WHERE setting = 'baseurl';"
fi
# finally, start docker
/usr/sbin/httpd -DFOREGROUND
|
milhomem/controller
|
backend/docker/docker_entrypoint.sh
|
Shell
|
gpl-2.0
| 2,016 |
#!/usr/bin/sh
CWD=`dirname $0`
source ${CWD}/../conf/galera-tools.conf
source ${CWD}/../lib/*
#write_to_log ${LOGFILE} "the script is working" ${INFO}
exit 0
|
giuse-to/galera-tools
|
bin/run-cluster.sh
|
Shell
|
gpl-3.0
| 162 |
#!/bin/bash -e
. ../env/env
brief="A GTK+ module that bridges ATK to D-Bus at-spi"
intro="
The At-Spi2 Atk package contains a library that bridges ATK to At-Spi2 D-Bus
service.
"
depends="at-spi2-core (>= 2.32.1), atk (>= 2.32.0)"
depends_for_compile=""
version=2.32.0
srcfil=at-spi2-atk-$version.tar.xz
srcdir=at-spi2-atk-$version
srcurl=http://ftp.gnome.org/pub/gnome/sources/at-spi2-atk/2.32/$srcfil
srcmd5=6a4b27bace3b9352721ed462b95f6291
build_src() {
tar -xf $srcfil && cd $srcdir
mkdir -pv build && cd build
meson --prefix=/usr ..
ninja
DESTDIR=$OUTPUTDIR ninja install
cleanup_src
}
postinst_action="glib-compile-schemas /usr/share/glib-2.0/schemas"
postrm_action="glib-compile-schemas /usr/share/glib-2.0/schemas"
build
|
fangxinmiao/projects
|
Architeture/OS/Linux/Distributions/LFS/build-scripts/blfs-9.0-systemd/scripts/a/at-spi2-atk.sh
|
Shell
|
gpl-3.0
| 764 |
#!/bin/bash
# to get rid of MSDOS format do this to this file: sudo sed -i s/\\r//g ./filename
# or, open in nano, control-o and then then alt-M a few times to toggle msdos format off and then save
# updated the list of installed gear so this works if we also use it : https://github.com/DeadSix27/python_cross_compile_script
sudo apt-get install -y build-essential autoconf libtool-bin libtool gettext autopoint gyp gperf autogen bzip2 pandoc
sudo apt-get install -y subversion curl texinfo g++ bison flex cvs yasm automake ed gcc cmake git make pkg-config mercurial unzip pax wget ant
sudo apt-get install -y git-remote-hg libxslt1.1 libxml2 rake docbook-utils docbook-xsl docbook-to-man docbook2x p7zip p7zip-full
sudo apt-get install -y xsltproc docbook-to-man itstool
#sudo apt-get remove -y nasm
sudo apt-get remove -y doxygen
# gendef is installed with mingw
sudo apt-get install -y libmozjs-dev libxmu-dev libgconf2-dev libdbus-1-dev network-manager-dev xserver-xorg-dev # for libproxy
sudo apt-get install -y zlib1g-dev #warning: you may need to install zlib development headers first if you want to build mp4-box on ubuntu
cd ~/Desktop
sudo chmod 777 -R *
#------------------------------------------------------------------------------------------------
# 2017.05.26 x264 has a new dependency on nasm 2.13.1 or greater ...
# before we do anything, build NASM if need be
set -x
if [[ ! -d "nasm-2.13.02" ]]; then
echo "Downloading nasm 2.13.02"
#url="https://github.com/hydra3333/ffmpeg-windows-build-helpers-withOpenCL/blob/master/miscellaneous/nasm-2.13.01.tar.xz?raw=true"
url="https://github.com/hydra3333/ffmpeg-windows-build-helpers-withOpenCL/blob/master/miscellaneous/nasm-2.13.02.tar.xz?raw=true"
rm -f "nasm-2.13.02.tar.xz"
curl -4 -H 'Pragma: no-cache' -H 'Cache-Control: no-cache' -H 'Cache-Control: max-age=0' "$url" --retry 50 -L --output "nasm-2.13.02.tar.xz" --fail # -L means "allow redirection" or some odd :|
tar -xf "nasm-2.13.02.tar.xz" || unzip "nasm-2.13.02.tar.xz"
echo "Configuring nasm 2.13.02"
cd nasm-2.13.02
./autogen.sh || exit 1
./configure --prefix=/usr --exec_prefix=/usr --enable-sections --enable-lto || exit 1
echo "Make nasm 2.13.02"
make || exit 1
echo "Installing nasm 2.13.02"
sudo make install || exit 1 # sudo so it copies into /usr folder tree
cd ..
echo "Done Building and Installing nasm 2.13.02"
fi
set +x
#read -p "After nasm build, press Enter to continue"
#------------------------------------------------------------------------------------------------
cd ~/Desktop
sudo chmod 777 -R *
mkdir -v "ffmpeg-windows-build-helpers-withOpenCL-master"
cd ffmpeg-windows-build-helpers-withOpenCL-master
pwd
#rm -f ./cross_compile_ffmpeg-rdp-withOpenCL-v5-gcc7.1.0.sh
#curl -4 -H 'Pragma: no-cache' -H 'Cache-Control: no-cache' -H 'Cache-Control: max-age=0' https://raw.githubusercontent.com/hydra3333/ffmpeg-windows-build-helpers-withOpenCL/master/cross_compile_ffmpeg-rdp-withOpenCL-v5-gcc7.1.0.sh -O --fail || exit 1
#sudo chmod 777 -R *
rm -f ./cross_compile_ffmpeg.rdp-mod010.sh
curl -4 -H 'Pragma: no-cache' -H 'Cache-Control: no-cache' -H 'Cache-Control: max-age=0' https://raw.githubusercontent.com/hydra3333/ffmpeg-windows-build-helpers-withOpenCL/master/cross_compile_ffmpeg.rdp-mod010.sh -O --fail || exit 1
sudo chmod 777 -R *
sudo ./cross_compile_ffmpeg.rdp-mod010.sh --enable_min_build=y --cflags='-mtune=generic -O3' --gcc-cpu-count=2 --sandbox-ok=y --build-ffmpeg-shared=n --build-ffmpeg-static=y --disable-nonfree=n --build-x264-with-libav=y --build-intel-qsv=y --build-libmxf=n --build-mp4box=y --build-mplayer=n --build-vlc=n --git-get-latest=y --prefer-stable=n --compiler-flavors=multi --enable-gpl=y --build-lsw=y --enable-opencl=y --high-bitdepth=y --build-aom=y # --build-youtube-dl=y --build-flac=y --build-cuetools=y
exit
|
hydra3333/ffmpeg-windows-build-helpers-withOpenCL
|
archive/rm-v10-min.sh
|
Shell
|
gpl-3.0
| 3,875 |
#!/bin/bash
source $HOME/.local/share/AppInstall/components/helpers/colors.sh
echo -e "$green Installing... $close_color" &&
sudo apt install -y ubuntu-restricted-extras vlc unace unrar zip unzip p7zip-full p7zip-rar sharutils rar &&
echo -e "$green Codecs successfully installed $close_color"
|
Wilfison/AppsInstall
|
components/extras.sh
|
Shell
|
gpl-3.0
| 296 |
#!/bin/bash
#
# Schedules a list of wordseg jobs on bash. Have a
# "./wordseg-bash.sh --help" to display help.
#
# author: Mathieu Bernard <[email protected]>
# import functions defined in functions.sh
. $(dirname $0)/functions.sh
# sbatch is the job launcher for SLURM
backend="bash"
# run a job with bash TODO should be optimized to run jobs in parallel
# when possible ($job_slots)
function schedule_job_backend
{
echo -n "running $job_name ..."
bash $job_script
echo " done"
}
main "$@" || exit 1
exit 0
|
bootphon/wordseg
|
tools/wordseg-bash.sh
|
Shell
|
gpl-3.0
| 538 |
#Que a podcast file and open in using https://github.com/mpv-player/mpv
terminator --new-tab -x "mpv `cat ~/.newsbeuter/queue` --fs"
cat /dev/null > ~/.newsbeuter/queue
|
diskotechjam/newsbeuter-scripts
|
mpv.pod.sh
|
Shell
|
gpl-3.0
| 169 |
#!/bin/sh
test_perms /etc/shadow 000
|
Taywee/dod-stig-scripts
|
linux/V-38504/test.sh
|
Shell
|
gpl-3.0
| 39 |
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "$(readlink -f "$0")")/.."
if [ ! -f "build/compile_commands.json" ]; then
echo "Expected a compile_commands.json in build folder.
Check that it is generated (-DCMAKE_EXPORT_COMPILE_COMMANDS=ON)" >&2
exit 1
fi
NAMES=(run-clang-tidy-8.py run-clang-tidy.py)
for fn in "${NAMES}"; do
if which "${fn}" &> /dev/null; then
CLANG_TIDY_CMD="${fn}"
break
fi
done
if [ "${CLANG_TIDY_CMD:-}" == "" ] || ! which "${fn}" &> /dev/null; then
echo "clang-tidy not found. Tried: ${NAMES[@]}" >&2
exit 1
fi
FILTER="$(pwd)/(extras|libs|tests|\
external/(libendian|liblobby|libsiedler2|\
libutil/(include|src|tests)|\
mygettext|s-c/src|s25edit|s25update))"
${CLANG_TIDY_CMD} -p build \
-j $(nproc) \
-header-filter "${FILTER}" \
-quiet \
"$@" \
"${FILTER}"
|
Flamefire/s25client
|
tools/runClangTidy.sh
|
Shell
|
gpl-3.0
| 857 |
#!/usr/bin/env bash
# This script installs all dependencies needed to build DICOMautomaton starting with a minimal Ubuntu system.
set -eux
mkdir -pv /scratch_base
cd /scratch_base
export DEBIAN_FRONTEND="noninteractive"
apt-get update --yes
apt-get install --yes --no-install-recommends \
bash \
git \
cmake \
make \
g++ \
vim \
ncurses-term \
gdb \
rsync \
wget \
ca-certificates
apt-get install --yes --no-install-recommends \
` # Ygor dependencies ` \
libboost-dev \
libgsl-dev \
libeigen3-dev \
` # DICOMautomaton dependencies ` \
libeigen3-dev \
libboost-dev \
libboost-filesystem-dev \
libboost-iostreams-dev \
libboost-program-options-dev \
libboost-thread-dev \
libz-dev \
libsfml-dev \
libsdl2-dev \
libglew-dev \
libjansson-dev \
libpqxx-dev \
postgresql-client \
libcgal-dev \
libnlopt-dev \
libnlopt-cxx-dev \
libasio-dev \
fonts-freefont-ttf \
fonts-cmu \
freeglut3 \
freeglut3-dev \
libxi-dev \
libxmu-dev \
patchelf \
` # Additional dependencies for headless OpenGL rendering with SFML ` \
x-window-system \
mesa-utils \
x11-apps \
libfreetype6 \
libsdl2-dev \
libice-dev \
libsm-dev \
libopengl0 \
` # Other optional dependencies ` \
libnotify \
dunst \
bash-completion \
gnuplot \
zenity
# Install Wt from source to get around outdated and buggy Debian package.
#
# Note: Add additional dependencies if functionality is needed -- this is a basic install.
#
# Note: Could also install build-deps for the distribution packages, but the dependencies are not
# guaranteed to be stable (e.g., major version bumps).
mkdir -pv /wt
cd /wt
git clone https://github.com/emweb/wt.git .
mkdir -p build && cd build
cmake -DCMAKE_INSTALL_PREFIX=/usr ../
JOBS=$(nproc)
JOBS=$(( $JOBS < 8 ? $JOBS : 8 ))
make -j "$JOBS" VERBOSE=1
make install
make clean
#mkdir -pv /scratch_base
#cd /scratch_base
#apt-get install --yes \
# -f ./libwt-dev_10.0_all.deb ./libwthttp-dev_10.0_all.deb
# Install Ygor.
#
# Option 1: install a binary package.
#mkdir -pv /scratch
#cd /scratch
#apt-get install --yes -f ./Ygor*deb
#
# Option 2: clone the latest upstream commit.
mkdir -pv /ygor
cd /ygor
git clone https://github.com/hdclark/Ygor .
./compile_and_install.sh -b build
git reset --hard
git clean -fxd :/
# Install Explicator.
#
# Option 1: install a binary package.
#mkdir -pv /scratch
#cd /scratch
#apt-get install --yes -f ./Explicator*deb
#
# Option 2: clone the latest upstream commit.
mkdir -pv /explicator
cd /explicator
git clone https://github.com/hdclark/explicator .
./compile_and_install.sh -b build
git reset --hard
git clean -fxd :/
# Install YgorClustering.
mkdir -pv /ygorcluster
cd /ygorcluster
git clone https://github.com/hdclark/YgorClustering .
./compile_and_install.sh -b build
git reset --hard
git clean -fxd :/
|
hdclark/DICOMautomaton
|
docker/build_bases/ubuntu/implementation_ubuntu.sh
|
Shell
|
gpl-3.0
| 2,951 |
#!/bin/bash
# Syncs QR Code images in directory $1 to the remote webserver (via rsync). It
# generates a HTML file with information about the image, and keeps track of
# prevous/next image links.
[ -d "$1" ] || return 1
DIR="$1"
INDEX="$DIR/index.html"
IMAGES=(`ls $DIR/ | grep "[0-9].jpg$" | sort -n`)
FIRST=`basename ${IMAGES[0]} | sed 's/\.jpg$/\.html/'`
LAST=`basename ${IMAGES[$((${#IMAGES[@]} - 1))]} | sed 's/\.jpg$/\.html/'`
for ((i = 0; i < ${#IMAGES[@]}; i++)); do
img="${DIR}/${IMAGES[$i]}"
n="`basename -s '.jpg' ${img}`"
info="$(cd $DIR && file -b ${IMAGES[$i]} | sed 's%,%\n%g')"
h="${n}.html"
t="${n}_t.jpg"
echo $i ${IMAGES[$i]} >&2
! [ -e "${DIR}/${t}" ] && convert $img -resize 10% "${DIR}/${t}"
[ $i -gt 0 ] \
&& p="`basename ${IMAGES[$(($i - 1))]} | sed 's/\.jpg$/\.html/'`" || p=""
[ $i -lt $((${#IMAGES[@]} - 1)) ] \
&& q="`basename ${IMAGES[$(($i + 1))]} | sed 's/\.jpg$/\.html/'`" || q=""
cat <<-EOF > $DIR/$h
<!DOCTYPE html>
<!--
__ ______ __ __ __ __ __
/\\ \\ /\\ ___\\ /\\ \\ /\\ \\_\\ \\ /\\ \\_\\ \\
_\\_\\ \\ \\ \\ __\\ _\\_\\ \\ \\ \\ __ \\ \\ \\ __ \\
/\\_____\\ \\ \\_\\ /\\_____\\ \\ \\_\\ \\_\\ \\ \\_\\ \\_\\
\\/_____/ \\/_/ \\/_____/ \\/_/\\/_/ \\/_/\\/_/
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Site J.2 - Scan $n</title>
<link rel="stylesheet" type="text/css" href="/css/scan.css" />
</head>
<body>
<a class="image" href="/s/${n}.jpg">
<h1>Scan ${n}</h1>
<img src="/s/${t}" alt="Scanned Image $n"
title="Updated: `date -I`" />
<h3>Click to Enlarge.</h3>
<h2>
<a href="/s/${FIRST}"><<</a>
<a href="/s/${p}"><= Previous</a>
<br />
<a href="/s/">::</a>
<a href="/s/${q}">Next =></a>
<a href="/s/${LAST}">>></a>
</h2>
</a>
<div class="info"><pre><code>$info</code></pre></div>
</body>
</html>
EOF
done
cat <<-EOF > $INDEX
<!DOCTYPE html>
<!--
__ ______ __ __ __ __ __
/\\ \\ /\\ ___\\ /\\ \\ /\\ \\_\\ \\ /\\ \\_\\ \\
_\\_\\ \\ \\ \\ __\\ _\\_\\ \\ \\ \\ __ \\ \\ \\ __ \\
/\\_____\\ \\ \\_\\ /\\_____\\ \\ \\_\\ \\_\\ \\ \\_\\ \\_\\
\\/_____/ \\/_/ \\/_____/ \\/_/\\/_/ \\/_/\\/_/
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Site J.2 - Scan Index</title>
<link rel="stylesheet" type="text/css" href="/css/scan.css" />
</head>
<body>
EOF
for ((i = 0; i < ${#IMAGES[@]}; i++)); do
img="${DIR}/${IMAGES[$i]}"
n="`basename -s '.jpg' ${img}`"
h="${n}.html"
t="${n}_t.jpg"
cat <<-EOF >> $INDEX
<!-- Thumbnail for image ${n}. -->
<div class="thumb">
<a class="image" href="/s/${n}.html">
<img src="/s/${t}" alt="Scanned Image $n"
title="Updated: `date -I`" />
</a>
</div>
EOF
done
cat <<-EOF >> $INDEX
</body>
</html>
EOF
rsync -av --delete --checksum ${DIR}/ [email protected]:/var/www/scans/
|
jfjhh/QRScans
|
qr-sync.sh
|
Shell
|
gpl-3.0
| 3,018 |
# ffactor expands factored files according to an environment.
# Copyright (C) 2015 ia0
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
cst_B=true
get_B()
{
echo build
}
cst_C=true
get_C()
{
local c
for c in clang gcc
do
which $c 2>/dev/null >/dev/null && echo $c
done
}
cst_E=false
get_E()
{
echo y
echo n
}
cst_O=true
get_O()
{
seq 0 3
echo s
}
cst_V=false
get_V()
{
echo n
echo y
}
|
ia0/ffactor
|
script/common.sh
|
Shell
|
gpl-3.0
| 1,004 |
#!/bin/bash
# montecarlo-font
if [ -n "$(pacman -Qs montecarlo-font)" ]; then
yay -s aur/montecarlo-font
else
continue
fi
# FiraCode
for type in Bold Light Medium Regular Retina; do
wget -O ~/.local/share/fonts/FiraCode-${type}.ttf \
"https://github.com/tonsky/FiraCode/blob/master/distr/ttf/FiraCode-${type}.ttf?raw=true";
done
fc-cache -f > /dev/null
mkfontscale ~/.local/share/fonts/misc
mkfontdir ~/.local/share/fonts/misc
|
lemones/dotfiles
|
tools/fonts/install-fonts.sh
|
Shell
|
gpl-3.0
| 446 |
#!/bin/bash
# simulate a bad connection on the loopback interface for testing purposes.
#
# also check out `libfiu` (packages available in Ubuntu) which can simulate
# errors by injecting them into function calls themselves rather than
# interfering with the connection.
#
# examples:
# fiu-run -x -c 'enable name=posix/io/net/connect' python3 my_program.py
# fiu-run -x -c 'enable_random name=posix/io/net/send,probability=0.5' python3 my_program.py
#
# the `name=` is the name of the function to sabotage by causing one of the
# possible errors. See the posix/io/net section of:
# https://github.com/albertito/libfiu/blob/master/preload/posix/modules/posix.io.mod
DEV=lo
#### tc terminology explanation ####
# tc = traffic control
# qdisc = queueing discipline
# root = rules are nested in a tree so root inserts them at the top
# netem = net emulation
shutdown() {
echo "removing simulation"
tc qdisc del dev $DEV root netem
ip link set $DEV up
exit
}
# shutdown gracefully on Ctrl+C
trap shutdown SIGINT
# ensure root
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root"
exit 1
fi
# add net emulation to the device
tc qdisc add dev $DEV root netem
echo "simulating a bad connection"
# base delay with some variation.
# delay TIME JITTER CORRELATION
# loss a b => probability of a unless previous packet dropped, then probability of b
tc qdisc change dev $DEV root netem \
delay 50ms 50ms distribution normal \
loss 20% 25% \
duplicate 5% \
corrupt 5% \
reorder 10%
# unplug the interface for the given length of time
unplug_interface() {
ip link set $DEV down
echo "interface down"
sleep $1
ip link set $DEV up
echo "interface up"
}
# repeatedly pull the plug on the interface
while true; do
unplug_interface 10
sleep 20
done
|
mbway/Bayesian-Optimisation
|
tools/bad_connection.sh
|
Shell
|
gpl-3.0
| 1,910 |
#!/bin/bash
# SSAFD verification test V (van der Veen) regression test
PISM_PATH=$1
MPIEXEC=$2
MPIEXEC_COMMAND="$MPIEXEC -n 1"
PISM_SOURCE_DIR=$3
EXT=""
if [ $# -ge 4 ] && [ "$4" == "-python" ]
then
PYTHONEXEC=$5
MPIEXEC_COMMAND="$MPIEXEC_COMMAND $PYTHONEXEC"
PYTHONPATH=${PISM_PATH}/site-packages:${PYTHONPATH}
PISM_PATH=${PISM_SOURCE_DIR}/examples/python/ssa_tests
EXT=".py"
fi
output=`mktemp pism-testv-XXXX` || exit 1
set -e
set -x
OPTS="-verbose 1 -o_size none -My 3 -ssa_method fem"
# do stuff
$MPIEXEC_COMMAND $PISM_PATH/ssa_test_cfbc${EXT} -Mx 201 $OPTS > ${output}
$MPIEXEC_COMMAND $PISM_PATH/ssa_test_cfbc${EXT} -Mx 401 $OPTS >> ${output}
set +e
# Check results:
diff ${output} - <<END-OF-OUTPUT
NUMERICAL ERRORS in velocity relative to exact solution:
velocity : maxvector prcntavvec maxu maxv avu avv
1.1792 0.11288 1.1792 0.0000 1.0998 0.0000
NUM ERRORS DONE
NUMERICAL ERRORS in velocity relative to exact solution:
velocity : maxvector prcntavvec maxu maxv avu avv
0.4323 0.03780 0.4323 0.0000 0.3687 0.0000
NUM ERRORS DONE
END-OF-OUTPUT
if [ $? != 0 ];
then
cat ${output}
exit 1
fi
exit 0
|
pism/pism
|
test/regression/ssa/ssa_test_cfbc_fem.sh
|
Shell
|
gpl-3.0
| 1,247 |
#!/bin/sh
#
# tsend_card.sh
# 20131030 -tm
#
# simulate Parallax card proximity transmission
#
#
# 1. open two windows in a current session, one will be Reader
# the other Writer
#
# 2. mknod tdev p # create a pipe device (1 time only)
#
# 3. invoke the tdev_read.sh in the Reader Window
#
# 4. from the Writer window do: echo "testreader" > ./tdev
#
# 6. the echoed contents should be displayed in the Reader window
#
#
#
# usage: tsend_card.sh cardid [ no. of times to xmit ]
#
#
# sends the pattern in first parm 1 timee to the tdev device unless it's
# supplied a second parm indicating the number of repetitions
#
DV="./tdev" # ASSUME Reader and Writer process in same directory
if [ ! -p $DV ]; then # no pipe file
echo -e "\n\tNo pipe device: $DV found!\n"
exit 1
fi
if [ $# -eq 0 ]; then # no parms passed
echo -e "\n\n"
echo -e "\tUsage: tsend_card.sh CARD_ID [ no. of times to send ]"
echo -e "\n\n"
exit 0
fi
CARDID="${1}"
if [ "$2_" = "_" ]; then
NTS=1
else
NTS=${2}
fi
echo " sending card: $CARDID for: $NTS passes"
while [ $NTS -gt 0 ]
do
#
# \n = 0x0A =NewLIne
# \r = 0x0D =Return
# --simulate wrapping the CARDID the way Parallax RFID transmits it
#
echo -e "\n${CARDID}\r" > $DV
NTS=`expr $NTS - 1`
#
done
|
bubbapizza/doorController
|
drivers/RPi/tsend_card.sh
|
Shell
|
gpl-3.0
| 1,369 |
#!/bin/bash
javac -d ../bin/update -cp ../lib:../bin CreateDB.java
jar cfe ../bin/Updates.jar CreateDB ../bin/update/*.class
|
prasadtalasila/ISPView
|
src/install-update.bash
|
Shell
|
gpl-3.0
| 127 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="wolf4sdl"
rp_module_desc="Wolf4SDL - port of Wolfenstein 3D / Spear of Destiny engine"
rp_module_menus="4+"
rp_module_flags="dispmanx"
function depends_wolf4sdl() {
getDepends libsdl1.2-dev libsdl-mixer1.2-dev
}
function sources_wolf4sdl() {
gitPullOrClone "$md_build" https://github.com/mozzwald/wolf4sdl.git
}
function get_opts_wolf4sdl() {
echo 'wolf4sdl-3dr-v14 -DCARMACIZED' # 3d realms / apogee v1.4 full
echo 'wolf4sdl-gt-v14 -DCARMACIZED -DGOODTIMES' # gt / id / activision v1.4 full
echo 'wolf4sdl-spear -DCARMACIZED -DGOODTIMES -DSPEAR' # spear of destiny
echo 'wolf4sdl-sw-v14 -DCARMACIZED -DUPLOAD' # shareware v1.4
}
function get_bins_wolf4sdl() {
local opt
while read -r opt; do
echo ${opt%% *}
done < <(get_opts_wolf4sdl)
}
function build_wolf4sdl() {
mkdir "bin"
local opt
while read -r opt; do
local bin="${opt%% *}"
local defs="${opt#* }"
make clean
CFLAGS+=" -DVERSIONALREADYCHOSEN $defs" make DATADIR="$romdir/ports/wolf3d/"
mv wolf3d "bin/$bin"
md_ret_require+=("bin/$bin")
done < <(get_opts_wolf4sdl)
}
function install_wolf4sdl() {
mkdir -p "$md_inst/share/man"
cp -Rv "$md_build/man6" "$md_inst/share/man/"
md_ret_files=('bin')
}
function configure_wolf4sdl() {
mkRomDir "ports"
mkRomDir "ports/wolf3d"
moveConfigDir "$home/.wolf4sdl" "$configdir/wolf3d"
# Get shareware game data
wget -q -O wolf3d14.zip http://maniacsvault.net/ecwolf/files/shareware/wolf3d14.zip
unzip -j -o -LL wolf3d14.zip -d "$romdir/ports/wolf3d"
chown -R $user:$user "$romdir/ports/wolf3d"
rm -f wolf3d14.zip
local bins
while read -r bin; do
bins+=("$bin")
done < <(get_bins_wolf4sdl)
setDispmanx "$md_id" 1
configure_dispmanx_on_wolf4sdl
local bin
# called outside of above loop to avoid problems with addPort and stdin
for bin in "${bins[@]}"; do
addPort "$bin" "wolf4sdl" "Wolfenstein 3D" "$md_inst/bin/$bin"
done
}
function configure_dispmanx_off_wolf4sdl() {
local bin
while read -r bin; do
setDispmanx "$bin" 0
done < <(get_bins_wolf4sdl)
}
function configure_dispmanx_on_wolf4sdl() {
local bin
while read -r bin; do
setDispmanx "$bin" 1
done < <(get_bins_wolf4sdl)
}
|
goofwear/RetroPie-Setup
|
scriptmodules/ports/wolf4sdl.sh
|
Shell
|
gpl-3.0
| 2,755 |
#!/bin/bash
if [ -d "C:/cygwin64/home/streamcreed" ];then
xtreamcodes="ok"
wwwdir="C:/cygwin64/home/streamcreed/wwwdir"
crondir="cronstreamcreed"
user="nouser"
elif [ -d "/home/streamcreed" ];then
xtreamcodes="ok"
wwwdir="/home/streamcreed/wwwdir"
crondir="cronstreamcreed"
user="streamcreed"
fi
if [ -d "/home/xtreamcodes/iptv_xtream_codes" ];then
xtreamcodes="ok"
wwwdir="/home/xtreamcodes/iptv_xtream_codes/wwwdir"
crondir="cronstreamcreed"
user="xtreamcodes"
fi
cd $wwwdir/xmltv/$1/$2
rm -rf *
wget --no-check-certificate "https://github.com/andykimpe/euroiptv-epg-fr/raw/master/index.php" -O "index.php"
wget --no-check-certificate https://github.com/andykimpe/euroiptv-epg-fr/raw/master/config/$1/$2/$2.sh -O $wwwdir/xmltv/$1/$2/$2.sh
chmod +x $wwwdir/xmltv/$1/$2/$2.sh
wget `wget -qO- https://raw.githubusercontent.com/andykimpe/euroiptv-epg-fr/master/webgrabplusplusinstallurl`
tar -xvf *.tar.gz
rm -f *.tar.gz
mv .wg++/* ./
rm -f .wg++/
chmod +x install.sh
./install.sh
rm -rf siteini.pack
wget `wget -qO- https://raw.githubusercontent.com/andykimpe/euroiptv-epg-fr/master/webgrabplusplussiteiniurl`
unzip *.zip
rm -f *.zip
rm -f WebGrab++.config.xml
wget https://github.com/andykimpe/euroiptv-epg-fr/raw/master/config/$1/$2/$2.xml -O $wwwdir/xmltv/$1/$2/WebGrab++.config.xml
if [ -f "/usr/bin/mono" ]; then
mono $wwwdir/xmltv/$1/$2/bin/WebGrab+Plus.exe $wwwdir/xmltv/$1/$2
else
$wwwdir/xmltv/$1/$2/bin/WebGrab+Plus.exe $wwwdir/xmltv/$1/$2
fi
cp $wwwdir/xmltv/$1/$2/$2.xml $wwwdir/xmltv/$1/$2/$2.xml.save
gzip $wwwdir/xmltv/$1/$2/$2.xml
mv $wwwdir/xmltv/$1/$2/$2.xml.save $wwwdir/xmltv/$1/$2/$2.xml
sed '1d' $wwwdir/xmltv/$1/$2/$2.xml > $wwwdir/xmltv/$1/$2/$2.xml.tmp && mv $wwwdir/xmltv/$1/$2/$2.xml.tmp $wwwdir/xmltv/$1/$2/$2.xml
sed '1d' $wwwdir/xmltv/$1/$2/$2.xml > $wwwdir/xmltv/$1/$2/$2.xml.tmp && mv $wwwdir/xmltv/$1/$2/$2.xml.tmp $wwwdir/xmltv/$1/$2/$2.xml
sed '1d' $wwwdir/xmltv/$1/$2/$2.xml > $wwwdir/xmltv/$1/$2/$2.xml.tmp && mv $wwwdir/xmltv/$1/$2/$2.xml.tmp $wwwdir/xmltv/$1/$2/$2.xml
sed '1d' $wwwdir/xmltv/$1/$2/$2.xml > $wwwdir/xmltv/$1/$2/$2.xml.tmp && mv $wwwdir/xmltv/$1/$2/$2.xml.tmp $wwwdir/xmltv/$1/$2/$2.xml
sed '1d' $wwwdir/xmltv/$1/$2/$2.xml > $wwwdir/xmltv/$1/$2/$2.xml.tmp && mv $wwwdir/xmltv/$1/$2/$2.xml.tmp $wwwdir/xmltv/$1/$2/$2.xml
sed '1d' $wwwdir/xmltv/$1/$2/$2.xml > $wwwdir/xmltv/$1/$2/$2.xml.tmp && mv $wwwdir/xmltv/$1/$2/$2.xml.tmp $wwwdir/xmltv/$1/$2/$2.xml
head -n -1 $wwwdir/xmltv/$1/$2/$2.xml > $wwwdir/xmltv/$1/$2/$2.xml.tmp && mv $wwwdir/xmltv/$1/$2/$2.xml.tmp $wwwdir/xmltv/$1/$2/$2.xml
chmod -R 777 $wwwdir/xmltv/$1/$2/*
chown $user:$user $wwwdir/xmltv/$1/$2/*
#if [ -f /home/streamcreed/crons/epg.php ]
#then
#/home/streamcreed/php/bin/php /home/streamcreed/crons/epg.php
#elif [ -f /home/xtreamcodes/iptv_xtream_codes/crons/epg.php ]
#then
#/home/xtreamcodes/iptv_xtream_codes/php/bin/php /home/xtreamcodes/iptv_xtream_codes/crons/epg.php
#elif [ -f /home/streamcreed/crons/epg_checking.php ]
#then
#/home/streamcreed/php/bin/php /home/streamcreed/crons/epg_checking.php
#elif [ -f /home/xtreamcodes/iptv_xtream_codes/crons/epg_checking.php ]
#then
#/home/xtreamcodes/iptv_xtream_codes/php/bin/php /home/xtreamcodes/iptv_xtream_codes/crons/epg_checking.php
#fi
#bash <(wget -qO- https://github.com/andykimpe/euroiptv-epg-fr/raw/master/gen.sh)
|
andykimpe/euroiptv-epg-fr
|
updatechannel.sh
|
Shell
|
gpl-3.0
| 3,304 |
#KALMAN FILTER CONFIGURATION
DOMAINCONF=CORDOBA_2KBIS #Define a domain
LETKFNAMELIST=control #Define a letkf namelist template
MEMBER=60 #Number of ensemble members.
MAX_DOM=1 #Maximum number of WRF domains.
HOMEDIR=${HOME}
DATADIR=/home/jruiz/share
ANALYSIS=0 #Identify this job as an analysis job.
FORECAST=1 #This is not a forecast job.
INTERPANA=0 #This is used in forecast jobs (but we need to define it here too)
RUN_ONLY_MEAN=0 #This is used in forecast jobs (but we need to define it here too)
USE_ANALYSIS_BC=1 #1 - use analysis as BC , 0 - use forecasts as bc (e.g. global gfs)
# if 1 then bc data will be taken from exp_met_em folder in the corresponding INPUT folder.
# if 0 then bc data will be taken from for_met_em folder in the corresponding INPUT folder.
# default is 1
USE_ANALYSIS_IC=1 #1 - use global analysis as IC, 0 use LETKF analysis as IC
#if 0 then profide a LETKF-analysis source (ANALYSIS_SOURC)
#default is 0
ANALYSIS_SOURCE=""
NVERTEXP=27 #Number of vertical levels in initial and boundary conditions input grib data.
NVERTDB=38 #Number of vertical levels in initial and boundary conditions perturbation input grib data.
#AUXILIARY VARIABLE FOR ENSEMBLE SIZE
MM=$MEMBER #Variable for iteration limits.
MEANMEMBER=`expr $MEMBER + 1 ` #This is the member ID corresponding to the ensemble mean.
WINDOW=300 #Forecast initialization frequency (seconds)
GUESFT=16200 #36000 #Forecast length (seconds)
WINDOW_START=0 #Window start (seconds from forecast initialization)
WINDOW_END=$GUESFT #Window end (seconds from forecast initialization)
WINDOW_FREC=300 #Output frequency within window (seconds) should be the same as the maximum observation frequency.
ASSIMILATION_FREC=$WINDOW #
NSLOTS=`expr $WINDOW_END \/ $WINDOW_FREC - $WINDOW_START \/ $WINDOW_FREC + 1 ` #Number of time slots.
NBSLOT=`expr $ASSIMILATION_FREC \/ $WINDOW_FREC - $WINDOW_START \/ $WINDOW_FREC + 1 ` #Time slot corresponding to the analysis.
if [ $NBSLOT -lt 10 ] ; then
NBSLOT=0$NBSLOT
fi
SIGMA_OBS="2.0d3" #NOT USED IN THE FORECAST
SIGMA_OBSV="0.2d0" #NOT USED IN THE FORECAST
SIGMA_OBSZ="2.0d3" #NOT USED IN THE FORECAST
SIGMA_OBST="3.0d0" #NOT USED IN THE FORECAST
GROSS_ERROR="15.0d0" #NOT USED IN THE FORECAST
COV_INFL_MUL="1.1d0" #NOT USED IN THE FORECAST
SP_INFL_ADD="0.d0" #NOT USED IN THE FORECAST
RELAX_ALPHA_SPREAD="0.8d0" #NOT USED IN THE FORECAST
RELAX_ALPHA="0.0d0" #NOT USED IN THE FORECAST
USE_ADAPTIVE_INFLATION=0 #NOT USED IN THE FORECAST
#DOMAIN AND BOUNDARY DATA
BOUNDARY_DATA_FREQ=21600 #Boundary data frequency. (seconds)
BOUNDARY_DATA_PERTURBATION_FREQ=21600 #Frequency of data used to perturb boundary conditions (seconds)
#POSTPROC CONFIGURATION
OUTLEVS="0.1,0.5,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0," #Level list
OUTVARS="'umet,vmet,pressure,W,QVAPOR,QCLOUD,QRAIN,QICE,QSNOW,QGRAUP,RAINNC,tk,u10m,v10m,slp,mcape,dbz,max_dbz'" #Variable list.
ARWPOST_FREC=300 # Post processing frequency (seconds)
INPUT_ROOT_NAME='wrfout'
INTERP_METHOD=1
ENABLE_UPP=0
### LETKF setting
OBS="" # NOT USED IN THE FORECAST
RADAROBS=""
#RADAROBS="/OSSE_20140122_DBZ2.5_VR1.0_SO2KM/" # NOT USED IN THE FORECAST
EXP=FORECAST_${DOMAINCONF}_${CONFIGURATION} # name of experiment
### initial date setting
IDATE=20140122120000
EDATE=20140122120000
#### DATA
OBSDIR=${DATADIR}/DATA/OBS/$OBS/ # NOT USED IN THE FORECAST
NRADARS=1 # NOT USED IN THE FORECAST
RADAROBSDIR=${DATADIR}/DATA/OBS/$RADAROBS/ # NOT USED IN THE FORECAST
TMPDIR=${HOMEDIR}/TMP/$EXP/ # Temporal work directory
OUTPUTDIR=${HOMEDIR}/datosmunin2/LETKF_WRF/exps/$EXP/ # Where results will be stored.
GRIBDIR=${DATADIR}/DATA/GRIB/FNL/HIRES/ARGENTINA/ # Folder where bdy and initial grib files are located.
GRIBTABLE="Vtable.GFS" # Bdy and init data source Vtable name.
PERTGRIBDIR=${DATADIR}/DATA/GRIB/CFSR/HIRES/ARGENTINA/00001/ # Folder where data for perturbing bdy are located.
PERTGRIBTABLE="Vtable.CFSR2_web" # Bdy perturbation source vtable name.
GEOG=/share/GEOG/ # Folder where WPS GEOG dataset is located.
#INITIAL AND BOUNDARY RANDOM PERTURBATIONS
AMP_FACTOR="0.05" #Perturbation scale factor.
RANDOM_AMP_FACTOR="0.0" #Random perturbation scale factor.
PERTURB_BOUNDARY=1 #Wether boundary conditions are going to be perturbed.
PERTURB_ATMOSPHERE=".true." #Wether atmospheric conditions will be perturbed (boundary and first cycle)
PERTURB_SST=".true." #Wether SST will be perturbed.
PERTURB_SOIL=".true." #Wether SOIL conditions will be perturbed (soil moisture and soil temperature)
PERTURB_T=".true." #Wether ATM temperature will be perturbed.
PERTURB_RH=".true." #Wether ATM RH will be perturbed
PERTURB_WIND=".true." #Wether ATM winds will be perturbed.
PERTURB_T_AMP="0.5d0" #T random perturbation amplitude
PERTURB_RH_AMP="5.0d0" #RH random perturbation amplitude
PERTURB_WIND_AMP="0.5d0" #WIND random perturbation amplitude
PERTURB_T_SCLH="40000d0" #T random perturbation horizontal scale
PERTURB_RH_SCLH="40000d0" #RH random perturbation horizontal scale
PERTURB_WIND_SCLH="40000d0" #WIND random perturbation horizontal scale
PERTURB_T_SCLV="5000d0" #T random perturbation vertical scale
PERTURB_RH_SCLV="5000d0" #RH random perturbation vertical scale
PERTURB_WIND_SCLV="5000d0" #WIND random perturbation vertical scale
#Random dates for boundary perturbations.
INIPERTDATE=20060101000000 #Initial date in grib database (used for perturbing initial and boundary conditions)
ENDPERTDATE=20091231180000 #Final date in grib database (used for perturbing initial and boundary conditions)
PERTREFDATE=20140122000000 #At this date the initial perturbation dates will be taken. This date is used to keep consisntency among the perturbations
#used in forecast and analysis experiments. This date must be previous or equal to IDATE.
INPUT_PERT_DATES_FROM_FILE=1 #0 - generate a new set of random dates, 1 - read random dates from a file.
INI_PERT_DATE_FILE=${DATADIR}/DATA/INITIAL_RANDOM_DATES/initial_perturbation_dates_60m #List of initial random dates.
#### EXECUTABLES
RUNTIMELIBS=""
WRF=${HOMEDIR}/datosmunin/LETKF_WRF/wrf/ # WRF folder (for computing nodes)
LETKF=$WRF/letkf/letkf.exe # LETKF module (for computing nodes)
UPDATEBC=$WRF/model/WRFDA/da_update_bc.exe # Update bc tool (WRFVAR) (for computing nodes)
WRFMODEL=$WRF/model/WRFV3.6.1/ # WRF model that run in computing nodes.
WRFMODELPPS=$WRF/model/WRFV3.6.1/ # WRF model that runs in pps server (usually the same as the one for the computing nodes)
WPS=$WRF/model/WPSV3.6.1/ # WRF model pre processing utilities (for pps server)
ARWPOST=$WRF/model/ARWpost/ # WRF model post processing utilities that run in computing nodes.
SPAWN=$WRF/spawn/
MPIBIN=mpiexec
#### SCRIPTS
UTIL=$WRF/run/util.sh # Script containing bash functions that will be used during execution.
#### NAMELIST
NAMELISTWRF=$WRF/run/configuration/domain_conf/$DOMAINCONF/namelist.input #Namelist for WRF model.
NAMELISTWPS=$WRF/run/configuration/domain_conf/$DOMAINCONF/namelist.wps #Namelist for WRF pre processing tools
NAMELISTLETKF=$WRF/run/configuration/letkf_conf/letkf.namelist.$LETKFNAMELIST #Namelist for LETKF
NAMELISTARWPOST=$WRF/run/configuration/domain_conf/$DOMAINCONF/namelist.ARWpost #Namelist for post-processing tools.
NAMELISTOBSOPE=$WRF/run/configuration/letkf_conf/obsope.namelist.$OBSOPENAMELIST #Namelist for observation operator.
NAMELISTPERTMETEM=$WRF/run/configuration/letkf_conf/pertmetem.namelist.control #Namelist for boundary conditions perturbation.
|
gustfrontar/LETKF_WRF
|
wrf/run/configuration/forecast_conf/ensemble_forecast_spinup_grib_Hydra.sh
|
Shell
|
gpl-3.0
| 8,704 |
#!/bin/sh
cd $(dirname $0)
for i in `ls po/*.po`;do
echo "Compiling `echo $i|sed "s|po/||"`"
msgfmt $i -o `echo $i |sed "s/.po//"`.mo
done
intltool-merge po/ -d -u src/persistence-wizard.desktop.in src/persistence-wizard.desktop
intltool-merge po/ -d -u src/persistence-wizard-kde.desktop.in src/persistence-wizard-kde.desktop
|
Salix-OS/persistence-wizard
|
compile.sh
|
Shell
|
gpl-3.0
| 329 |
#!/bin/sh
for page in *.wiki; do
./insert_page.sh $page
done
|
ETegro/OpenSAN
|
wiki/insert_all.sh
|
Shell
|
gpl-3.0
| 63 |
#!/bin/bash
sleep 20
echo "Starting celery ..."
# cd /earkweb && celery multi start ingestqueue -A earkweb.celery --concurrency=4 -Ofair --pidfile=/data/celery_worker.pid --logfile=/data/celery_default_queue.log
# cd /earkweb && celery -A earkweb.celery worker --pool threads -Ofair --pidfile=/data/celery_worker.pid --logfile=/data/celery_default_queue.log &
cd /earkweb && /wait-for-it.sh -t 600 rabbitmq:5672 && celery -A earkweb.celery worker --pool prefork -Ofair
|
eark-project/earkweb
|
run_celery.sh
|
Shell
|
gpl-3.0
| 471 |
echo "" > ../timer/tv
|
eastmaels/smarthome
|
web/scripts/cleartimers.sh
|
Shell
|
gpl-3.0
| 21 |
#!/bin/bash
########################################################################
# Author : Victor Mendez ( [email protected] )
########################################################################
# alternative to python script (remote run con Cernvm seems to fail on python call)
# contextualization script to be run on the VM, after init.d proccess
echo "Starting /root/contextualize-script.bash" > /var/log/contextualize-script.log
if [ $# -ne 15 ]
then
echo "Parameter ERROR: bash contextualize-script.bash <uniqueId> <certfile> <keyfile> <runjobagent> <runvmmonitoragent> <runvmupdateragent> <runlogagent> <cvmfscontextscript> <diraccontextscript> <cvmfshttpproxy> <sitename> <clouddriver> <cpuTime> <vmStopPolicy> <submitPool>" >> /var/log/contextualize-script.log
exit 1
fi
uniqueId=$1
vmCertPath=$2
vmKeyPath=$3
vmRunJobAgent=$4
vmRunVmMonitorAgent=$5
vmRunVmUpdaterAgent=$6
vmRunLogAgent=$7
cvmfsContextPath=$8
diracContextPath=$9
cvmfs_http_proxy=${10}
siteName=${11}
cloudDriver=${12}
cpuTime=${13}
vmStopPolicy=${14}
submitPool=${15}
if [ ${vmRunJobAgent} != 'nouse' ]
then
localVmRunJobAgent=/root/run.job-agent
else
localVmRunJobAgent=nouse
fi
localVmRunVmMonitorAgent=/root/run.vm-monitor-agent
if [ ${vmRunVmUpdaterAgent} != 'nouse' ]
then
localVmRunVmUpdaterAgent=/root/run.vm-updater-agent
else
localVmRunVmUpdaterAgent=nouse
fi
localVmRunLogAgent=/root/run.log.agent
if [ ${cvmfsContextPath} != 'nouse' ]
then
localCvmfsContextPath=/root/cvmfs-context.sh
else
localCvmfsContextPath=nouse
fi
localDiracContextPath=/root/dirac-context.sh
# parameters log:
echo "1 $uniqueId" >> /var/log/contextualize-script.log 2>&1
echo "2 $vmCertPath" >> /var/log/contextualize-script.log 2>&1
echo "3 $vmKeyPath" >> /var/log/contextualize-script.log 2>&1
echo "4 $vmRunJobAgent" >> /var/log/contextualize-script.log 2>&1
echo "5 $vmRunVmMonitorAgent" >> /var/log/contextualize-script.log 2>&1
echo "6 $vmRunVmUpdaterAgent" >> /var/log/contextualize-script.log 2>&1
echo "7 $vmRunLogAgent" >> /var/log/contextualize-script.log 2>&1
echo "8 $cvmfsContextPath" >> /var/log/contextualize-script.log 2>&1
echo "9 $diracContextPath" >> /var/log/contextualize-script.log 2>&1
echo "10 $cvmfs_http_proxy" >> /var/log/contextualize-script.log 2>&1
echo "11 $siteName" >> /var/log/contextualize-script.log 2>&1
echo "12 $cloudDriver" >> /var/log/contextualize-script.log 2>&1
echo "13 $cpuTime" >> /var/log/contextualize-script.log 2>&1
echo "14 $vmStopPolicy" >> /var/log/contextualize-script.log 2>&1
echo "15 $submitPool" >> /var/log/contextualize-script.log 2>&1
#recording the uniqueId of the VM to be used by VM agents:
echo ${uniqueId} > /etc/VMID
# vmcert and key have been previoslly copy to VM, these paths are local, the rest of files are on some repo...
# 1) download the necesary files:
if [ ${vmRunJobAgent} != 'nouse' ]
then
wget --no-check-certificate -O ${localVmRunJobAgent} ${vmRunJobAgent} >> /var/log/contextualize-script.log 2>&1
fi
if [ ${vmRunVmMonitorAgent} != 'nouse' ]
then
wget --no-check-certificate -O ${localVmRunVmMonitorAgent} ${vmRunVmMonitorAgent} >> /var/log/contextualize-script.log 2>&1
fi
if [ ${vmRunVmUpdaterAgent} != 'nouse' ]
then
wget --no-check-certificate -O ${localVmRunVmUpdaterAgent} ${vmRunVmUpdaterAgent} >> /var/log/contextualize-script.log 2>&1
fi
if [ ${cvmfsContextPath} != 'nouse' ]
then
wget --no-check-certificate -O ${localCvmfsContextPath} ${cvmfsContextPath} >> /var/log/contextualize-script.log 2>&1
fi
if [ ${vmRunLogAgent} != 'nouse' ]
then
wget --no-check-certificate -O ${localVmRunLogAgent} ${vmRunLogAgent} >> /var/log/contextualize-script.log 2>&1
fi
if [ ${diracContextPath} != 'nouse' ]
then
wget --no-check-certificate -O ${localDiracContextPath} ${diracContextPath} >> /var/log/contextualize-script.log 2>&1
fi
#2) Run the cvmvfs contextualization script:
if [ ${cvmfsContextPath} != 'nouse' ]
then
chmod u+x ${localCvmfsContextPath} >> /var/log/contextualize-script.log 2>&1
bash ${localCvmfsContextPath} "${cvmfs_http_proxy}" >> /var/log/contextualize-script.log 2>&1
fi
#3) Run the dirac contextualization script:
if [ ${diracContextPath} != 'nouse' ]
then
echo "Ready for running dirac contextualize script: ${localDiracContextPath}" >> /var/log/contextualize-script.log 2>&1
echo " Parameters: ${siteName} ${vmStopPolicy} ${vmCertPath} ${vmKeyPath} ${localVmRunJobAgent} ${localVmRunVmMonitorAgent} ${localVmRunVmUpdaterAgent} ${localVmRunLogAgent} ${submitPool} ${cpuTime} ${cloudDriver}" >> /var/log/contextualize-script.log 2>&1
chmod u+x ${localDiracContextPath} >> /var/log/contextualize-script.log 2>&1
bash ${localDiracContextPath} ${siteName} ${vmStopPolicy} ${vmCertPath} ${vmKeyPath} ${localVmRunJobAgent} ${localVmRunVmMonitorAgent} ${localVmRunVmUpdaterAgent} ${localVmRunLogAgent} ${submitPool} ${cpuTime} ${cloudDriver}
else
echo "Context configured with 'nouse' of dirac contextualize script" >> /var/log/contextualize-script.log 2>&1
fi
echo "END /root/contextualize-script.bash" >> /var/log/contextualize-script.log
exit 0
|
myco/VMDIRAC
|
WorkloadManagementSystem/private/bootstrap/contextualize-script.bash
|
Shell
|
gpl-3.0
| 5,133 |
#!/bin/bash
tr -d 'x' < draws$1 | awk '
BEGIN {
sum=0
}
{
sum += $1
#print $1
}
END {
print sum
}
'
|
richi235/pid_lotto
|
trivia/sum.sh
|
Shell
|
gpl-3.0
| 110 |
#!/bin/bash
zip="/tmp/rom_sf2.zip"
dir="/tmp/sf2"
rom="${dir}/ssf2t.zip"
mkdir -p "$dir"
if [ ! -f "/usr/games/mame" ]
then
sudo apt-get install mame -y
fi
wget -c "https://dl.dropbox.com/s/d1n98wb8p5gpj1w/sf2.zip?dl=0" -O "$zip"
unzip -o "$zip" -d "$dir"
rm "$zip"
mame "$rom" -rompath "$dir"
|
metalx1000/MyBin
|
games/mame/street_fighter_2/sf2_deb.sh
|
Shell
|
gpl-3.0
| 301 |
#! /bin/bash
# kill the most memory intensive user process
# Check if all parameters are present
# If no, exit
if [ $# -ne 1 ]
then
echo
echo "usage :"
echo "$0 license_to_kill"
echo "This shellscript will kill the most memory intensive user process if license_to_kill!=0."
echo "Otherwise, it just locates it without killing it."
echo "All missions will be logged in: ${HOME}/log/memwatch.log"
echo
exit 0
fi
LOGFILE=${HOME}/log/memwatch.log
INFO=$(ps -eo %mem,pid,user -o comm= | grep $USER | sort -k1 -n -r | head -1)
USAGE=$(echo $INFO | awk '{ print $1 } ')
PID=$(echo $INFO | awk '{print $2 }')
PNAME=$(echo $INFO | awk '{print $4 }')
echo $(date)
echo "Hostile process detected:"
echo "memory used: $USAGE%"
echo "PID: $PID"
echo "Name: $PNAME"
echo "=========================" >>${HOME}/log/memwatch.log
echo $(date) >>${HOME}/log/memwatch.log
echo "Hostile process detected:" >>${HOME}/log/memwatch.log
echo "memory used: $USAGE%" >>${HOME}/log/memwatch.log
echo "PID: $PID" >>${HOME}/log/memwatch.log
echo "Name: $PNAME" >>${HOME}/log/memwatch.log
if [ $1 -ne 0 ]
then
echo "killing process..." >>${HOME}/log/memwatch.log
kill_1=0
kill_2=0
#clean kill
killall $PNAME
if [ $? -eq 0 ]
then
kill_1=1
fi
#messy kill
kill -9 $PID
if [ $? -eq 0 ]
then
kill_2=1
fi
if [ $kill_1 -eq 1 ] || [ $kill_2 -eq 1 ]
then
echo "Target successfully eliminated." >>${HOME}/log/memwatch.log
echo "kill_1=$kill_1" >>${HOME}/log/memwatch.log
echo "kill_2=$kill_2" >>${HOME}/log/memwatch.log
zenity --info --text "$PNAME was killed because of excessive RAM usage.\n kill_1=$kill_1 \n kill_2=$kill_2" ||
kdialog --msgbox "$PNAME was killed because of excessive RAM usage.\n kill_1=$kill_1 \n kill_2=$kill_2" ||
xmessage -buttons okay -default okay "$PNAME was killed because of excessive RAM usage.\n kill_1=$kill_1 \n kill_2=$kill_2" &
else
echo "Mission failed." >>${HOME}/log/memwatch.log
fi
# #messy kill
# kill -9 $PID
# if [ $? -eq 0 ]
# then
# echo "Target successfully eliminated." >>${HOME}/log/memwatch.log
# zenity --info --text "$PNAME was killed because of excessive RAM usage." ||
# kdialog --msgbox "$PNAME was killed because of excessive RAM usage." ||
# xmessage -buttons okay -default okay "$PNAME was killed because of excessive RAM usage."
# else
# echo "kill -9 failed." >>${HOME}/log/memwatch.log
# fi
fi
|
KIAaze/bin_and_dotfiles_public
|
bins/public_bin/killmaxmemprocess.sh
|
Shell
|
gpl-3.0
| 2,421 |
#!/bin/bash
CFENCODINGFILE=~/.CFUserTextEncoding
PROGNAME="$0"
case $# in
0)
echo -n 'Init '
cat ${CFENCODINGFILE}
echo
echo -n 'Env '
echo ${__CF_USER_TEXT_ENCODING}
exit 0
;;
*)
case $1 in
en) ENCODING='0:0' ;;
ja) ENCODING='1:14' ;;
*) echo "$PROGNAME: Unknown encoding" >&2; exit 1;;
esac
;;
esac
case $# in
1)
echo -n ${ENCODING} >${CFENCODINGFILE}
exit 0
;;
*)
shift
XUID=`id -u | awk '{printf "0x%X", $0}'`
__CF_USER_TEXT_ENCODING="${XUID}:${ENCODING}" exec "$@"
;;
esac
|
moriai/macutils
|
cfencoding.sh
|
Shell
|
gpl-3.0
| 501 |
#! /bin/sh
if [ -z "$1" ]; then
for l in "$(lsblk -o NAME,HOTPLUG,MODEL,SIZE -P -n -s)"; do
name="$(echo "$l" | awk '/NAME/ {printf $1}')"
name="${name%\"}"
echo "HHH $name HHH"
done
fi
|
AntoninRousset/i3statusExtra
|
mountDevice.sh
|
Shell
|
gpl-3.0
| 191 |
JDK_VER=jdk1.8.0_40.jdk
rm -rf /Library/Java/JavaVirtualMachines/$JDK_VER
rm -rf /Library/PreferencePanes/JavaControlPanel.prefPane
rm -rf /Library/Internet\ Plug-Ins/JavaAppletPlugin.plugin
|
junjiemars/kit
|
darwin/uninstall-jdk.sh
|
Shell
|
gpl-3.0
| 192 |
#!/bin/bash
num_nodes="${1:-0}"
scriptPATH=`realpath $0`
rootPATH=`dirname $scriptPATH`
echo "rootpath is : $rootPATH"
for i in $(seq 1 1 $num_nodes)
do
foldername="$rootPATH/NODE_$i/query/"
rm -rf $foldername
mkdir -p $foldername
queryfile="$rootPATH/query_$i.file"
mv $queryfile $foldername/
cp $rootPATH/sourcerer-cc.properties "$rootPATH/NODE_"$i/
cp $rootPATH/res/log4j2.xml "$rootPATH/NODE"_$i/
done
|
Mondego/SourcererCC
|
clone-detector/preparequery.sh
|
Shell
|
gpl-3.0
| 420 |
#!/bin/bash
sudo -i
dnf install MariaDB-server MariaDB-client
chkconfig mysql on
service mysql start
#'/usr/bin/mysqladmin' -u root password 'new-password'
#'/usr/bin/mysqladmin' -u root -h localhost.localdomain password 'new-password'
#'/usr/bin/mysql_secure_installation'
cp /etc/my.cnf{,.original}
cp /etc/my.cnf.d/server.cnf{,.original}
cp /etc/my.cnf.d/client.cnf{,.original}
sed -i '10iskip-name-resolve' /etc/my.cnf.d/server.cnf
sed -i '11imax_connections=8192' /etc/my.cnf.d/server.cnf
sed -i '12idefault-storage-engine=INNODB' /etc/my.cnf.d/server.cnf
sed -i '13iwait_timeout=30' /etc/my.cnf.d/server.cnf
sed -i '14iinteractive_timeout=30' /etc/my.cnf.d/server.cnf
sed -i '15icharacter-set-server=utf8' /etc/my.cnf.d/server.cnf
sed -i '16icollation_server=utf8_general_ci' /etc/my.cnf.d/server.cnf
sed -i "17iinit_connect='SET NAMES utf8'" /etc/my.cnf.d/server.cnf
sed -i '18iexplicit_defaults_for_timestamp=true' /etc/my.cnf.d/server.cnf
sed -i '5icharacter_set_client=utf8' /etc/my.cnf.d/client.cnf
iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 3306 -j ACCEPT
service iptables save
|
oscm/shell
|
database/mariadb/server.sh
|
Shell
|
gpl-3.0
| 1,116 |
#!/usr/bin/env bash
set -e
DIR=$APACHE_ROOT
INI=$DIR/openformat.ini
INSTALL=$INI"_INSTALL"
cp $DIR/openformat.wsdl_INSTALL $DIR/openformat.wsdl
cp $DIR/robots.txt_INSTALL $DIR/robots.txt
############# temporary fix #################
# set aaa access to true -
# TODO get the ip adresses for whereever this is supposed to run and
# add them to the AAA_IP_RIGHTS_BLOCK variable in Dockerfile
# sed -i "s/\$this->aaa->has_right('openformat', 500)/true/" $DIR/server.php
# TODO remove it in production !!!!!!
if [ ! -f $INI ] ; then
cp $INSTALL $INI
# handle curl_timeout separately - it is also used elsewhere
sed -i "s/;curl_timeout = 5/curl_timeout = 30/" $INI
while IFS='=' read -r name value ; do
echo "$name $value"
sed -i "s/@${name}@/$(echo $value | sed -e 's/\//\\\//g; s/&/\\\&/g')/g" $INI
done < <(env)
if [ -n "`grep '@[A-Z_]*@' $INI`" ]
then
printf "\nMissed some settings:\n"
echo "------------------------------"
grep '@[A-Z_]*@' $INI
echo "------------------------------"
printf "\nAdd the missing setting(s) and try again\n\n"
exit 1
fi
else
echo "###### ####### # # ####### # ####### ####### ###### ####### ######"
echo "# # # # # # # # # # # # # # #"
echo "# # # # # # # # # # # # # # #"
echo "# # ##### # # ##### # ##### # # ###### ##### ######"
echo "# # # # # # # # # # # # # #"
echo "# # # # # # # # # # # # # #"
echo "###### ####### # ####### ####### ####### ####### # ####### # #"
echo ""
echo "# # ####### ###### #######"
echo "## ## # # # # #"
echo "# # # # # # # # #"
echo "# # # # # # # #####"
echo "# # # # # # #"
echo "# # # # # # #"
echo "# # ####### ###### #######"
fi
#if [ -z "$URL_PATH" ]
#then
# printf "\nMissed PATH configuration :\n"
# echo "------------------------------"
#
# echo "------------------------------"
# printf "\nAdd the missing setting(s) and try again\n\n"
# exit 1
#fi
#cat - > $APACHE_ROOT/index.html <<EOF
#<html>
#<head>
#<title>OpenSearch $URL_PATH</title>
#<meta http-equiv="refresh" content="0; url=${URL_PATH}" />
#</head>
#<body>
#<p><a href="${URL_PATH}/">openformat</a></p>
#</body>
#</html>
#EOF
#ln -sf /dev/stdout /var/log/apache2/access.log
#ln -sf /dev/stderr /var/log/apache2/error.log
#exec apache2ctl -DFOREGROUND
|
DBCDK/OpenFormat-webservice
|
docker/www/config.sh
|
Shell
|
agpl-3.0
| 2,665 |
#!/bin/bash -e
. /etc/os-release
print_usage() {
echo "build_rpm.sh --rebuild-dep --target centos7 --reloc-pkg build/release/scylla-package.tar.gz"
echo " --dist create a public distribution rpm"
echo " --target target distribution in mock cfg name"
echo " --xtrace print command traces before executing command"
echo " --reloc-pkg specify relocatable package path"
echo " --builddir specify rpmbuild directory"
exit 1
}
DIST=0
RELOC_PKG=build/release/scylla-package.tar.gz
BUILDDIR=build/redhat
while [ $# -gt 0 ]; do
case "$1" in
"--dist")
DIST=1
shift 1
;;
"--target") # This is obsolete, but I keep this in order not to break people's scripts.
shift 2
;;
"--xtrace")
set -o xtrace
shift 1
;;
"--reloc-pkg")
RELOC_PKG=$2
shift 2
;;
"--builddir")
BUILDDIR="$2"
shift 2
;;
*)
print_usage
;;
esac
done
if [ ! -e $RELOC_PKG ]; then
echo "$RELOC_PKG does not exist."
echo "Run ./reloc/build_reloc.sh first."
exit 1
fi
RELOC_PKG=$(readlink -f $RELOC_PKG)
RPMBUILD=$(readlink -f $BUILDDIR)
mkdir -p $BUILDDIR/
tar -C $BUILDDIR/ -xpf $RELOC_PKG scylla/SCYLLA-RELOCATABLE-FILE scylla/SCYLLA-RELEASE-FILE scylla/SCYLLA-VERSION-FILE scylla/SCYLLA-PRODUCT-FILE scylla/dist/redhat
cd $BUILDDIR/scylla
RELOC_PKG_BASENAME=$(basename $RELOC_PKG)
SCYLLA_VERSION=$(cat SCYLLA-VERSION-FILE)
SCYLLA_RELEASE=$(cat SCYLLA-RELEASE-FILE)
PRODUCT=$(cat SCYLLA-PRODUCT-FILE)
mkdir -p $RPMBUILD/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
xz_thread_param=
if xz --help | grep -q thread; then
# use as many threads as there are CPUs
xz_thread_param="T$(nproc)"
fi
rpm_payload_opts=(--define "_binary_payload w2${xz_thread_param}.xzdio")
ln -fv $RELOC_PKG $RPMBUILD/SOURCES/
parameters=(
-D"version $SCYLLA_VERSION"
-D"release $SCYLLA_RELEASE"
-D"housekeeping $DIST"
-D"product $PRODUCT"
-D"${PRODUCT/-/_} 1"
-D"reloc_pkg $RELOC_PKG_BASENAME"
)
ln -fv dist/redhat/scylla.spec $RPMBUILD/SPECS/
rpmbuild "${parameters[@]}" -ba "${rpm_payload_opts[@]}" --define "_topdir $RPMBUILD" $RPMBUILD/SPECS/scylla.spec
|
avikivity/scylla
|
reloc/build_rpm.sh
|
Shell
|
agpl-3.0
| 2,315 |
#!/bin/bash
echo "Optimize Images"
find ./static/ -type f -name "*.png" -print -exec convert {} -strip {} \;
find ./static/ -type f -name "*.gif" -print -exec convert {} -strip {} \;
find ./static/ -type f -name "*.jpg" -print -exec convert {} -sampling-factor 4:2:0 -strip -quality 85 -interlace JPEG -colorspace sRGB {} \;
|
brickfiestastem/brickfiesta
|
scripts/optimizeimages.sh
|
Shell
|
agpl-3.0
| 324 |
#!/bin/bash -e
# Set up vm
vagrant up kor.base
# Extract base box
vagrant package --base "kor.base"
# Re-import the box
vagrant box remove coneda/debian7.kor
vagrant box add package.box --name coneda/debian7.kor
# Save the box
mv package.box deploy/build.boxes/coneda_debian7.kor.box
# Tear down vm
vagrant destroy kor.base -f
|
anne-cecile/kor_annotorious
|
deploy/build-base.sh
|
Shell
|
agpl-3.0
| 332 |
#!/bin/bash
set -e
if [ "$1" = 'zammad' ]; then
echo -e "\n Starting services... \n"
# starting services
service postgresql start
service elasticsearch start
service postfix start
service memcached start
service redis-server start
service nginx start
# wait for postgres processe coming up
until su - postgres -c 'psql -c "select version()"' &> /dev/null; do
echo "Waiting for PostgreSQL to be ready..."
sleep 2
done
cd "${ZAMMAD_DIR}"
echo -e "\n Starting Zammad... \n"
su -c "bundle exec script/websocket-server.rb -b 0.0.0.0 start &" zammad
su -c "bundle exec script/scheduler.rb start &" zammad
# show url
echo -e "\nZammad will be ready in some seconds! Visit http://localhost in your browser!"
# start railsserver
if [ "${RAILS_SERVER}" == "puma" ]; then
su -c "bundle exec puma -b tcp://0.0.0.0:3000 -e ${RAILS_ENV}" zammad
elif [ "${RAILS_SERVER}" == "unicorn" ]; then
su -c "bundle exec unicorn -p 3000 -c config/unicorn.rb -E ${RAILS_ENV}" zammad
fi
fi
|
zammad/zammad-docker
|
docker-entrypoint.sh
|
Shell
|
agpl-3.0
| 1,029 |
source ./ci/env.sh
set -eu
export CARGO_HOME='/usr/local/cargo'
RUSTUP_VERSION=1.24.3
RUST_VERSION=$1
RUST_ARCH=$2
RUSTUP_URL=https://static.rust-lang.org/rustup/archive/$RUSTUP_VERSION/$RUST_ARCH/rustup-init
wget $RUSTUP_URL
chmod +x rustup-init;
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION;
rm rustup-init;
chmod -R a+w $RUSTUP_HOME $CARGO_HOME
rustup --version
cargo --version
rustc --version
rustup component add clippy-preview
rustup component add rustfmt
cargo install --force cargo-c
cargo install --version ^1.0 gitlab_clippy
# cargo install --force cargo-deny
# cargo install --force cargo-outdated
if [ "$RUST_VERSION" = "nightly" ]; then
# Coverage tools
cargo install grcov
rustup component add llvm-tools-preview
# Documentation tools
cargo install --force rustdoc-stripper
fi
|
GNOME/librsvg
|
ci/install-rust.sh
|
Shell
|
lgpl-2.1
| 850 |
#!/bin/bash
#
echo `date` Running online remove command on hostname `hostname` >> /hostfs/upgradeInstallationRemoval.log
echo Arguments: >> /hostfs/upgradeInstallationRemoval.log
ARGS=$@
for ARG in $ARGS; do
echo $ARG >> /hostfs/upgradeInstallationRemoval.log
done
exit 0
|
kenzaburo/OpenSaf-FrameWork
|
samples/smfsv/online_remove.sh
|
Shell
|
lgpl-2.1
| 279 |
/opt/pandora-sebt/scripts/pnd_make -p testsparrow.pnd -d . -x ./PXML.xml -i ./sparrow-icon.png
|
theZiz/sparrow3d
|
build/pandora/make_package_pandora.sh
|
Shell
|
lgpl-2.1
| 95 |
#!/bin/sh -ex
$CEPH_TOOL mds tell 0 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 1 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 2 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 3 injectargs '--mds-bal-interval 0'
#$CEPH_TOOL mds tell 4 injectargs '--mds-bal-interval 0'
mkdir -p ./a/a
mkdir -p ./b/b
mkdir -p ./c/c
mkdir -p ./d/d
mount_dir=`df . | grep -o " /.*" | grep -o "/.*"`
cur_dir=`pwd`
ceph_dir=${cur_dir##$mount_dir}
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/b 1
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/c 2
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/d 3
sleep 5
|
ajnelson/ceph
|
qa/workunits/rename/prepare.sh
|
Shell
|
lgpl-2.1
| 604 |
#!/bin/bash
#set -e
echo " >> Migrating the database to the recent version"
su www-data -s /bin/bash -c "cd /var/www/html/ && make migrate"
echo " >> Reconfiguring the application to be running in TEST mode, erasing all data"
su www-data -s /bin/bash -c "cd /var/www/html/ && make _configure_ci_environment _erase_all_data"
echo " >> Starting application in the background"
supervisord -c /etc/supervisor/conf.d/supervisord.conf &
sleep 5
cat /var/www/html/.env
echo " >> Checking application status"
curl -vv http://localhost
echo " >> Running API tests"
exec newman run /var/www/html/postman-tests.json --timeout 100000 --insecure -e ./postman.ci-environment.json
|
Wolnosciowiec/file-repository
|
tests_entrypoint.sh
|
Shell
|
lgpl-3.0
| 673 |
#!/bin/sh
if [[ $1 == "on" ]]; then
pkill redshift
redshift -l 55.4:7.8 -t 5700:3000 -g 0.8 -m randr &
elif [[ $1 == "dark" ]]; then
pkill redshift
redshift -l 55.4:7.8 -t 5700:3000 -g 0.8 -b 0.7:0.7 -m randr &
elif [[ $1 == "off" ]]; then
redshift -x
pkill redshift
else
echo "Allowed inputs are 'on' and 'off'."
exit 1
fi
|
artemisclyde/dotfiles
|
scripts/rs.sh
|
Shell
|
lgpl-3.0
| 341 |
#!/usr/bin/env bash
#
# A script to build and run the Swarm development environment using Docker.
set -e
ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
# DEFAULT_NAME is the default name for the Docker image and container
DEFAULT_NAME="swarm-dev"
usage() {
cat >&2 <<USAGE
usage: $0 [options]
Build and run the Swarm development environment.
Depends on Docker being installed locally.
OPTIONS:
-n, --name NAME Docker image and container name [default: ${DEFAULT_NAME}]
-d, --docker-args ARGS Custom args to pass to 'docker run' (e.g. '-p 8000:8000' to expose a port)
-h, --help Show this message
USAGE
}
main() {
local name="${DEFAULT_NAME}"
local docker_args=""
parse_args "$@"
build_image
run_image
}
parse_args() {
while true; do
case "$1" in
-h | --help)
usage
exit 0
;;
-n | --name)
if [[ -z "$2" ]]; then
echo "ERROR: --name flag requires an argument" >&2
exit 1
fi
name="$2"
shift 2
;;
-d | --docker-args)
if [[ -z "$2" ]]; then
echo "ERROR: --docker-args flag requires an argument" >&2
exit 1
fi
docker_args="$2"
shift 2
;;
*)
break
;;
esac
done
if [[ $# -ne 0 ]]; then
usage
echo "ERROR: invalid arguments" >&2
exit 1
fi
}
build_image() {
docker build --tag "${name}" "${ROOT}/swarm/dev"
}
run_image() {
exec docker run \
--privileged \
--interactive \
--tty \
--rm \
--hostname "${name}" \
--name "${name}" \
--volume "${ROOT}:/go/src/github.com/trust-tech/go-trustmachine" \
--volume "/var/run/docker.sock:/var/run/docker.sock" \
${docker_args} \
"${name}" \
/bin/bash
}
main "$@"
|
trust-tech/go-trustmachine
|
swarm/dev/run.sh
|
Shell
|
lgpl-3.0
| 1,813 |
#!/bin/bash
set -e
BRANCH="master"
ARCH=`/bin/uname -m`
if [ `whoami` != 'root' ]; then
echo "ERROR: must be run as root"
exit 0
fi
if [ $ARCH != 'x86_64' ]; then
echo "ERROR: must install on a 64-bit OS"
exit 0
fi
if [ -f /etc/lsb-release ]; then
. /etc/lsb-release
OS=$DISTRIB_ID
VER=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
OS=Debian # XXX or Ubuntu??
VER=$(cat /etc/debian_version)
elif [ -f /etc/redhat-release ]; then
# TODO add code for Red Hat and CentOS here
...
else
OS=$(uname -s)
VER=$(uname -r)
fi
case $OS in
"Ubuntu" )
sudo apt-get update && sudo apt-get upgrade -y && sudo apt-get install -y htop build-essential automake autoconf git
git clone https://github.com/csirtgadgets/massive-octo-spice.git -b $BRANCH
cd massive-octo-spice
bash autogen.sh
sudo bash ./hacking/platforms/easybutton.sh
sudo chown `whoami`:`whoami` ~/.cif.yml;;
"Debian" )
echo 'Debian not yet supported...';;
"Darwin" )
echo 'Darwin not yet supported...' ;;
"Redhat" )
echo 'Redhat not yet supported...' ;;
"CentOS" )
echo 'CentOS not yet supported...' ;;
esac
|
aeppert/massive-octo-spice
|
hacking/platforms/easybutton_curl.sh
|
Shell
|
lgpl-3.0
| 1,217 |
udevadm control --reload-rule
udevadm trigger
upsdrvctl start
|
ddemuro/raspberrypi
|
rootDir/etc/nut/restart.sh
|
Shell
|
unlicense
| 62 |
#!/bin/sh
. /build/cleanup.sh
rm -rf /build/*-rethink.sh
|
dajobe/docker
|
rethinkdb/cleanup-rethinkdb.sh
|
Shell
|
unlicense
| 59 |
#!/bin/bash
# grab files from previous node project to start a new one
SRC=../node-dictionaries
mkdir -p doc config test/app app
cp $SRC/package.json $SRC/.npmignore $SRC/.gitignore $SRC/.jshint* \
$SRC/README.md $SRC/Gruntfile.js \
./
cp $SRC/config/pre-commit config/
cp $SRC/test/setup.js $SRC/test/.jshint* \
test/
|
bcowgill/simple-design
|
getpj.sh
|
Shell
|
unlicense
| 337 |
#!/bin/sh
INDIR="$1"
OUTDIR="$2"
mkdir -p $OUTDIR
for typ in "train" "test" "val"
do
MINCNT=10000000
for file in $INDIR/*_$typ.txt
do
count=$(wc -l < $file)
if [ $MINCNT -gt $count ]
then
MINCNT=$count
fi
done
for infile in $INDIR/*_$typ.txt
do
echo "[$MINCNT lines] $infile --> $OUTDIR/$(basename $infile)"
head -n $MINCNT $infile > $OUTDIR/$(basename $infile)
done
done
|
abilng/Mtech-proj-scripts
|
Others/trimData.sh
|
Shell
|
apache-2.0
| 425 |
perl $HOME/bin/ensembl-vep/vep \
--species human \
--database \
--host mysql-ensembl-mirror \
--port 4240 \
--user ensro \
--dir /nfs/production/panda/ensembl/variation/data/VEP/ \
--input_file /hps/nobackup2/production/ensembl/anja/vep_data/input/regulatory_variant_id.txt \
--output_file /hps/nobackup2/production/ensembl/anja/vep_data/output/regulatory_variant_mf_plugin.txt \
--force_overwrite \
--regulatory \
--dir_plugins $HOME/bin/VEP_plugins \
--plugin MotifFeature \
|
at7/work
|
vep/regulation/vep_regulation_94_mf_plugin.sh
|
Shell
|
apache-2.0
| 479 |
#!/bin/bash
instlog="/home/vagrant/install.log"
## Docker install
tee /etc/yum.repos.d/docker.repo <<-'EOF'
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7/
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF
echo "`date` yum repo for Docker was added" >> $instlog
yum install -y docker-engine
sudo systemctl enable docker.service
sudo groupadd docker
sudo usermod -aG docker vagrant
sudo systemctl start docker
echo "`date` Docker installed " >> $instlog
echo "Docker installation complete!"
## Set ScaleIO system name
pdomain="pdomain1"
stpool="pool1"
sysname="scaleio"
endpoint="https://192.168.33.12/api"
password="Password123!"
thin="ThinProvisioned"
pmdm=`scli --login --mdm_ip 192.168.33.13 --username admin --password "Password123!" --approve_certificate`
if [ $? = 0 ]; then
mdmip="192.168.33.13"
else
mdmip="192.168.33.12"
fi
echo "========================================"
echo " Primary MDM is $mdmip "
echo "========================================"
scli --login --mdm_ip $mdmip --username admin --password $password --approve_certificate
output=`scli --mdm_ip $mdmip --query_all |grep Name`
set -- $output
now=$2
snamenow=${now[0]}
if [ $snamenow = scaleio ]; then
echo "The system name is $snamenow ."
else
echo "The system name is $snamenow ."
scli --mdm_ip $mdmip --rename_system --new_name $sysname
echo "The system name has been set as $sysname ."
fi
## Install Rexray
sudo curl -sSL https://dl.bintray.com/emccode/rexray/install | sh -s -- stable
sudo cat << EOF > /etc/rexray/config.yml
libstorage:
service: scaleio
scaleio:
endpoint: $endpoint
insecure: true
userName: admin
password: $password
systemName: $sysname
protectionDomainName: $pdomain
storagePoolName: $stpool
thinOrThick: $thin
EOF
echo "`date` RexRay was installed and configured " >> $instlog
sudo service rexray restart
sudo service rexray status
sudo service docker restart
echo "==========================================="
echo " Docker and REX-Ray installation complete! "
echo "==========================================="
## Initial setup of Swarm
echo "Docker Swarm initialized..."
echo " "
manager=192.168.33.11
docker swarm init --listen-addr $manager:2377 --advertise-addr $manager
docker swarm join-token worker > /vagrant/token.txt
echo "============================"
echo " This node set as a manager."
echo "============================"
|
naotakeyoshida/scaleio2.0-rexray
|
scripts/rexray-tb.sh
|
Shell
|
apache-2.0
| 2,436 |
testJsonFiles=`find . -name "*.test.json"`
numFiles=`echo $testJsonFiles | wc -w`
echo "module.exports = [" > tests.js
i=1
for testJsonFile in $testJsonFiles;
do
testDir=`dirname $testJsonFile`
scFile=$testDir/`basename $testJsonFile .test.json`.sc.json
#alternatively, it could just be a regular module, rather than a json object literal
if [ ! -e $scFile ]
then scFile=$testDir/`basename $testJsonFile .test.json`.sc.js
fi;
echo -n " { name : '$testJsonFile', test : require('$testJsonFile'), sc : require('$scFile') }" >> tests.js
#trailing comma
if [ $i -lt $numFiles ]
then echo "," >> tests.js
else
echo "" >> tests.js
fi
i=$(( i + 1 ))
done;
echo "];" >> tests.js
|
mattoshry/SCION-CORE
|
test/tests/generate-test-registry.sh
|
Shell
|
apache-2.0
| 752 |
#!/bin/bash
# Copyright 2017 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#. What this is: Functions for testing with rancher.
#. Prerequisites:
#. - Ubuntu server for master and agent nodes
#. Usage:
#. $ git clone https://github.com/blsaws/nancy.git
#. $ cd nancy/rancher
#.
#. Usage:
#. $ bash rancher_cluster.sh all "<agents>"
#. Automate setup and start demo blueprints.
#. <agents>: space-separated list of agent node IPs
#. $ bash rancher_cluster.sh setup "<agents>"
#. Installs and starts master and agent nodes.
#. $ bash rancher_cluster.sh master
#. Setup the Rancher master node.
#. $ bash rancher_cluster.sh agents "<agents>"
#. Installs and starts agent nodes.
#. $ bash rancher_cluster.sh demo
#. Start demo blueprints.
#. $ bash rancher_cluster.sh clean "<agents>"
#. Removes Rancher and installed blueprints from the master and agent nodes.
#.
#. To call the procedures, directly, e.g. public_endpoint nginx/lb
#. $ source rancher-cluster.sh
#. See below for function-specific usage
#.
# Install master
function setup_master() {
docker_installed=$(dpkg-query -W --showformat='${Status}\n' docker-ce | grep -c "install ok")
if [[ $docker_installed == 0 ]]; then
echo "${FUNCNAME[0]}: installing and starting docker"
# Per https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
sudo apt-get remove -y docker docker-engine docker.io
sudo apt-get update
sudo apt-get install -y \
linux-image-extra-$(uname -r) \
linux-image-extra-virtual
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install -y docker-ce
echo "${FUNCNAME[0]}: installing jq"
sudo apt-get install -y jq
fi
echo "${FUNCNAME[0]}: installing rancher server (master)"
sudo docker run -d --restart=unless-stopped -p 8080:8080 --name rancher rancher/server
echo "${FUNCNAME[0]}: wait until server is up at http://$1:8080"
delay=0
id=$(wget -qO- http://$1:8080/v2-beta/projects/ | jq -r '.data[0].id')
while [[ "$id" == "" ]]; do
echo "${FUNCNAME[0]}: rancher server is not yet up, checking again in 10 seconds"
sleep 10
let delay=$delay+10
id=$(wget -qO- http://$1:8080/v2-beta/projects/ | jq -r '.data[0].id')
done
echo "${FUNCNAME[0]}: rancher server is up after $delay seconds"
rm -rf ~/rancher
mkdir ~/rancher
}
# Install rancher CLI tools
# Usage example: install_cli_tools 172.16.0.2
function install_cli_tools() {
echo "${FUNCNAME[0]}: installing rancher CLI tools for master $1"
cd ~
echo "${FUNCNAME[0]}: install Rancher CLI"
rm -rf rancher-v0.6.3
wget -q https://releases.rancher.com/cli/v0.6.3/rancher-linux-amd64-v0.6.3.tar.gz
gzip -d -f rancher-linux-amd64-v0.6.3.tar.gz
tar -xvf rancher-linux-amd64-v0.6.3.tar
sudo mv rancher-v0.6.3/rancher /usr/bin/rancher
echo "${FUNCNAME[0]}: install Rancher Compose"
rm -rf rancher-compose-v0.12.5
wget -q https://releases.rancher.com/compose/v0.12.5/rancher-compose-linux-amd64-v0.12.5.tar.gz
gzip -d -f rancher-compose-linux-amd64-v0.12.5.tar.gz
tar -xvf rancher-compose-linux-amd64-v0.12.5.tar
sudo mv rancher-compose-v0.12.5/rancher-compose /usr/bin/rancher-compose
echo "${FUNCNAME[0]}: setup Rancher CLI environment"
# CLI setup http://rancher.com/docs/rancher/v1.6/en/cli/
# Under the UI "API" select "Add account API key" and name it. Export the keys:
# The following scripted approach assumes you have 1 project/environment (Default)
# Set the url that Rancher is on
export RANCHER_URL=http://$1:8080/v1
id=$(wget -qO- http://$1:8080/v2-beta/projects/ | jq -r '.data[0].id')
export RANCHER_ENVIRONMENT=$id
curl -s -o /tmp/keys -X POST -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"accountId":"reference[account]", "description":"string", "name":"string", "publicValue":"string", "secretValue":"password"}' http://$1:8080/v2-beta/projects/$id/apikeys
# curl -s -o /tmp/keys -X POST -H 'Accept: application/json' -H 'Content-Type: application/json' -d {"type":"apikey","accountId":"1a1","name":"admin","description":null,"created":null,"kind":null,"removed":null,"uuid":null} http://$1:8080/v2-beta/projects/$id/apikey
export RANCHER_ACCESS_KEY=$(jq -r '.publicValue' /tmp/keys)
export RANCHER_SECRET_KEY=$(jq -r '.secretValue' /tmp/keys)
# create the env file ~/.rancher/cli.json
rancher config <<EOF
$RANCHER_URL
$RANCHER_ACCESS_KEY
$RANCHER_SECRET_KEY
EOF
master=$(rancher config --print | jq -r '.url' | cut -d '/' -f 3)
echo "${FUNCNAME[0]}: Create registration token"
# added sleep to allow server time to be ready to create registration tokens (otherwise error is returned)
sleep 5
curl -s -o /tmp/token -X POST -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"name":"master"}' http://$master/v1/registrationtokens
while [[ $(jq -r ".type" /tmp/token) != "registrationToken" ]]; do
sleep 5
curl -s -o /tmp/token -X POST -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"name":"master"}' http://$master/v1/registrationtokens
done
id=$(jq -r ".id" /tmp/token)
echo "${FUNCNAME[0]}: registration token id=$id"
echo "${FUNCNAME[0]}: wait until registration command is created"
command=$(curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' http://$master/v1/registrationtokens/$id | jq -r '.command')
while [[ "$command" == "null" ]]; do
echo "${FUNCNAME[0]}: registration command is not yet created, checking again in 10 seconds"
sleep 10
command=$(curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' http://$master/v1/registrationtokens/$id | jq -r '.command')
done
export RANCHER_REGISTER_COMMAND="$command"
# echo "${FUNCNAME[0]}: activate rancher debug"
# export RANCHER_CLIENT_DEBUG=true
echo "${FUNCNAME[0]}: Install docker-compose for syntax checks"
sudo apt install -y docker-compose
cd ~/rancher
}
# Start an agent host
# Usage example: start_host Default 172.16.0.7
function setup_agent() {
echo "${FUNCNAME[0]}: SSH to host $2 in env $1 and execute registration command"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$2 "sudo apt-get install -y docker.io; sudo service docker start"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$2 $RANCHER_REGISTER_COMMAND
echo "${FUNCNAME[0]}: wait until agent $2 is active"
delay=0
id=$(rancher hosts | awk "/$2/{print \$1}")
while [[ "$id" == "" ]]; do
echo "${FUNCNAME[0]}: agent $2 is not yet created, checking again in 10 seconds"
sleep 10
let delay=$delay+10
id=$(rancher hosts | awk "/$2/{print \$1}")
done
echo "${FUNCNAME[0]}: agent $2 id=$id"
state=$(rancher inspect $id | jq -r '.state')
while [[ "$state" != "active" ]]; do
echo "${FUNCNAME[0]}: host $2 state is $state, checking again in 10 seconds"
sleep 10
let delay=$delay+10
state=$(rancher inspect $id | jq -r '.state')
done
echo "${FUNCNAME[0]}: agent $2 state is $state after $delay seconds"
}
# Delete an agent host
# Usage example: delete_host 172.16.0.7
function stop_agent() {
echo "${FUNCNAME[0]}: deleting host $1"
rancher rm --stop $(rancher hosts | awk "/$1/{print \$1}")
}
# Test service at access points
# Usage example: check_service nginx/nginx http "Welcome to nginx!"
function check_service() {
echo "${FUNCNAME[0]}: checking service state for $1 over $2 with match string $3"
service=$1
scheme=$2
match="$3"
id=$(rancher ps | grep " $service " | awk "{print \$1}")
n=0
while [[ "$(rancher inspect $id | jq -r ".publicEndpoints[$n].ipAddress")" != "null" ]]; do
ip=$(rancher inspect $id | jq -r ".publicEndpoints[$n].ipAddress")
port=$(rancher inspect $id | jq -r ".publicEndpoints[$n].port")
while [[ $(wget -qO- $scheme://$ip:$port | grep -c "$match") == 0 ]]; do
echo "$service service is NOT active at address $scheme://$ip:$port, waiting 10 seconds"
sleep 10
done
echo "$service service is active at address $scheme://$ip:$port"
let n=$n+1
done
}
# Wait n 10-second tries for service to be active
# Usage example: wait_till_healthy nginx/nginx 6
function wait_till_healthy() {
service=$1
tries=$2
let delay=$tries*10
echo "${FUNCNAME[0]}: waiting for service $service to be ready in $delay seconds"
id=$(rancher ps | grep " $service " | awk "{print \$1}")
health=$(rancher inspect $id | jq -r ".healthState")
state=$(rancher inspect $id | jq -r ".state")
while [[ $tries > 0 && "$health" != "healthy" ]]; do
health=$(rancher inspect $id | jq -r ".healthState")
echo $service is $health
sleep 10
done
echo $service state is $(rancher inspect $id | jq -r ".state")
}
# Start service based upon docker image and simple templates
# Usage example: start_simple_service nginx nginx:latest 8081:80 3
# Usage example: start_simple_service dokuwiki ununseptium/dokuwiki-docker 8082:80 2
function start_simple_service() {
echo "${FUNCNAME[0]}: starting service $1 with image $2, ports $3, and scale $4"
service=$1
image=$2
# port is either a single (unexposed) port, or an source:target pair (source
# is the external port)
ports=$3
scale=$4
echo "${FUNCNAME[0]}: creating service folder ~/rancher/$service"
mkdir ~/rancher/$service
cd ~/rancher/$service
echo "${FUNCNAME[0]}: creating docker-compose.yml"
# Define service via docker-compose.yml
cat <<EOF >docker-compose.yml
version: '2'
services:
$service:
image: $image
ports:
- "$ports"
EOF
echo "${FUNCNAME[0]}: syntax checking docker-compose.yml"
docker-compose -f docker-compose.yml config
echo "${FUNCNAME[0]}: creating rancher-compose.yml"
cat <<EOF >rancher-compose.yml
version: '2'
services:
# Reference the service that you want to extend
$service:
scale: $scale
EOF
echo "${FUNCNAME[0]}: starting service $service"
rancher up -s $service -d
wait_till_healthy "$service/$service" 6
cd ~/rancher
}
# Add load balancer to a service
# Usage example: lb_service nginx 8000 8081
# Usage example: lb_service dokuwiki 8001 8082
function lb_service() {
echo "${FUNCNAME[0]}: adding load balancer port $2 to service $1, port $3"
service=$1
lbport=$2
port=$3
cd ~/rancher/$service
echo "${FUNCNAME[0]}: creating docker-compose-lb.yml"
# Define lb service via docker-compose.yml
cat <<EOF >docker-compose-lb.yml
version: '2'
services:
lb:
ports:
- $lbport
image: rancher/lb-service-haproxy:latest
EOF
echo "${FUNCNAME[0]}: syntax checking docker-compose-lb.yml"
docker-compose -f docker-compose-lb.yml config
echo "${FUNCNAME[0]}: creating rancher-compose-lb.yml"
cat <<EOF >rancher-compose-lb.yml
version: '2'
services:
lb:
scale: 1
lb_config:
port_rules:
- source_port: $lbport
target_port: $port
service: $service/$service
health_check:
port: 42
interval: 2000
unhealthy_threshold: 3
healthy_threshold: 2
response_timeout: 2000
EOF
echo "${FUNCNAME[0]}: starting service lb"
rancher up -s $service -d --file docker-compose-lb.yml --rancher-file rancher-compose-lb.yml
wait_till_healthy "$service/lb" 6
cd ~/rancher
}
# Change scale of a service
# Usage example: scale_service nginx 1
function scale_service() {
echo "${FUNCNAME[0]}: scaling service $1 to $2 instances"
id=$(rancher ps | grep " $1 " | awk '{print $1}')
rancher scale $id=$2
scale=$(rancher inspect $id | jq -r '.currentScale')
health=$(rancher inspect $id | jq -r '.healthState')
while [[ $scale != $2 || "$health" != "healthy" ]]; do
echo $service is scaled at $scale and is $health
scale=$(rancher inspect $id | jq -r '.currentScale')
health=$(rancher inspect $id | jq -r '.healthState')
sleep 10
done
echo $service is scaled at $scale and is $health
}
# Get public endpoint for a service
# Usage example public_endpoint nginx/lb
function public_endpoint() {
id=$(rancher ps | grep " $1 " | awk "{print \$1}")
ip=$(rancher inspect $id | jq -r ".publicEndpoints[0].ipAddress")
port=$(rancher inspect $id | jq -r ".publicEndpoints[0].port")
echo "${FUNCNAME[0]}: $1 is accessible at http://$ip:$port"
}
# Stop a stack
# Usage example: stop_stack nginx
function stop_stack() {
echo "${FUNCNAME[0]}: stopping stack $1"
rancher stop $(rancher stacks | awk "/$1/{print \$1}")
}
# Start a stopped stack
# Usage example: start_stack nginx
function start_stack() {
echo "${FUNCNAME[0]}: starting stack $1"
rancher start $(rancher stacks | awk "/$1/{print \$1}")
wait_till_healthy $1 6
}
# Delete a stack
# Usage example: delete_stack dokuwiki
function delete_stack() {
id=$(rancher stacks | grep "$1" | awk "{print \$1}")
echo "${FUNCNAME[0]}: deleting stack $1 with id $id"
rancher rm --stop $id
}
# Delete a service
# Usage example: delete_service nginx/lb
function delete_service() {
id=$(rancher ps | grep "$1" | awk "{print \$1}")
echo "${FUNCNAME[0]}: deleting service $1 with id $id"
rancher rm --stop $id
}
# Start a complex service, i.e. with yaml file customizations
# Usage example: start_complex_service grafana 3000:3000 1
function start_complex_service() {
echo "${FUNCNAME[0]}: starting service $1 at ports $2, and scale $3"
service=$1
# port is either a single (unexposed) port, or an source:target pair (source
# is the external port)
ports=$2
scale=$3
echo "${FUNCNAME[0]}: creating service folder ~/rancher/$service"
mkdir ~/rancher/$service
cd ~/rancher/$service
echo "${FUNCNAME[0]}: creating docker-compose.yml"
# Define service via docker-compose.yml
case "$service" in
grafana)
cat <<EOF >docker-compose.yml
grafana:
image: grafana/grafana:latest
ports:
- $ports
environment:
GF_SECURITY_ADMIN_USER: "admin"
GF_SECURITY_ADMIN_PASSWORD: "password"
GF_SECURITY_SECRET_KEY: $(uuidgen)
EOF
;;
*)
esac
echo "${FUNCNAME[0]}: starting service $service"
rancher up -s $service -d
wait_till_healthy "$service/$service" 6
cd ~/rancher
}
# Automated demo
# Usage example: rancher_demo start "172.16.0.7 172.16.0.8 172.16.0.9"
# Usage example: rancher_demo clean "172.16.0.7 172.16.0.8 172.16.0.9"
function demo() {
# Deploy apps
# Nginx web server, accessible on each machine port 8081, and via load
# balancer port 8001
start=`date +%s`
setup "$1"
start_simple_service nginx nginx:latest 8081:80 3
check_service nginx/nginx http "Welcome to nginx!"
lb_service nginx 8001 80
check_service nginx/lb http "Welcome to nginx!"
# Dokuwiki server, accessible on each machine port 8082, and via load
# balancer port 8002
start_simple_service dokuwiki ununseptium/dokuwiki-docker 8082:80 2
check_service dokuwiki/dokuwiki http "This topic does not exist yet"
lb_service dokuwiki 8002 80
check_service dokuwiki/lb http "This topic does not exist yet"
# Grafana server, accessible on one machine at port 3000
start_complex_service grafana 3000:3000 1
id=$(rancher ps | grep " grafana/grafana " | awk "{print \$1}")
source ~/nancy/prometheus/prometheus-tools.sh setup "$agents"
grafana_ip=$(rancher inspect $id | jq -r ".publicEndpoints[0].ipAddress")
prometheus_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
connect_grafana $prometheus_ip $grafana_ip
public_endpoint nginx/lb
public_endpoint dokuwiki/lb
public_endpoint grafana/grafana
end=`date +%s`
runtime=$((end-start))
runtime=$((runtime/60))
echo "${FUNCNAME[0]}: Demo duration = $runtime minutes"
}
# Automate the installation
function setup() {
# Installation: http://rancher.com/docs/rancher/v1.6/en/
# Install rancher server (master) at primary interface of host
# Account control is disabled (open access to API), and Default env created
ip=$(ip route get 1 | awk '{print $NF;exit}')
setup_master $ip
# Install rancher CLI tools (rancher, rancher-compose), register with master
# and setup CLI environment (e.g. API access/secret keys)
install_cli_tools $ip
# Add agent hosts per http://rancher.com/docs/rancher/v1.6/en/hosts/custom/
agents="$1"
for agent in $agents; do
setup_agent Default $agent
done
}
# Clean the installation
function clean() {
delete_service nginx/lb
delete_stack nginx
delete_service dokuwiki/lb
delete_stack dokuwiki
agents="$1"
for agent in $agents; do
stop_agent $agent
done
sudo docker stop rancher
sudo docker rm -v rancher
sudo apt-get remove -y docker-ce
}
export WORK_DIR=$(pwd)
case "$1" in
master)
ip=$(ip route get 1 | awk '{print $NF;exit}')
setup_master $ip
;;
agents)
agents="$2"
for agent in $agents; do
setup_agent Default $agent
done
;;
ceph)
# TODO Ceph support for rancher, e.g. re
# http://rancher.com/docs/rancher/latest/en/rancher-services/storage-service/
# https://github.com/rancher/rancher/issues/8722
# setup_ceph "$2" $3 $4 $5
;;
demo)
demo "$2"
;;
setup)
setup "$2"
;;
all)
setup "$2"
demo "$2"
check_service nginx/lb
check_service dokuwiki/lb
check_service grafana/grafana
;;
clean)
clean "$2"
;;
*)
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then grep '#. ' $0; fi
esac
|
blsaws/nancy
|
rancher/rancher-cluster.sh
|
Shell
|
apache-2.0
| 18,273 |
#!/bin/sh
# Copyright 2005-2017 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
. ./include.sh
${examples_dir}/eccodes_f_grib_precision > /dev/null
rm -f ${data_dir}/regular_latlon_surface_prec.grib1
|
0x1mason/GribApi.XP
|
grib_api/examples/F90/grib_precision.sh
|
Shell
|
apache-2.0
| 510 |
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2017 by Delphix. All rights reserved.
#
# Program Name : jetstream_list_bookmarks_jq.sh
# Description : Delphix API to list Self-Service Container Bookmarks
# Author : Alan Bitterman
# Created : 2017-09-25
# Version : v1.0.0
#
# Requirements :
# 1.) curl and jq command line libraries
# 2.) Populate Delphix Engine Connection Information . ./delphix_engine.conf
# 3.) Include ./jqJSON_subroutines.sh
# 4.) Change values below as required
#
# Usage: ./jetstream_list_bookmarks_jq.sh
#
#########################################################
# DELPHIX CORP #
# Please make changes to the parameters below as req'd! #
#########################################################
#
# Required for JetStream Self-Service Container ...
#
DC_NAME="jsdc" # Data Container Name ...
#########################################################
# NO CHANGES REQUIRED BELOW THIS POINT #
#########################################################
#########################################################
## Subroutines ...
source ./jqJSON_subroutines.sh
#########################################################
#Parameter Initialization
. ./delphix_engine.conf
#########################################################
## Session and Login ...
echo "Authenticating on ${BaseURL}"
RESULTS=$( RestSession "${DMUSER}" "${DMPASS}" "${BaseURL}" "${COOKIE}" "${CONTENT_TYPE}" )
#echo "Results: ${RESULTS}"
if [ "${RESULTS}" != "OK" ]
then
echo "Error: Exiting ..."
exit 1;
fi
echo "Session and Login Successful ..."
#########################################################
## Get Container ...
#echo "Getting Container Reference Value ..."
STATUS=`curl -s -X GET -k ${BaseURL}/jetstream/container -b "${COOKIE}" -H "${CONTENT_TYPE}"`
RESULTS=$( jqParse "${STATUS}" "status" )
#echo "${STATUS}" | jq "."
#
# Parse out container reference for name of ${DC_NAME} ...
#
CONTAINER_REFERENCE=`echo ${STATUS} | jq --raw-output '.result[] | select(.name=="'"${DC_NAME}"'") | .reference '`
echo "Container Reference: ${CONTAINER_REFERENCE}"
ACTIVE_BRANCH=`echo ${STATUS} | jq --raw-output '.result[] | select(.name=="'"${DC_NAME}"'") | .activeBranch '`
echo "Active Branch Reference: ${ACTIVE_BRANCH}"
#########################################################
## Get Branches ...
STATUS=`curl -s -X GET -k ${BaseURL}/jetstream/branch -b "${COOKIE}" -H "${CONTENT_TYPE}"`
RESULTS=$( jqParse "${STATUS}" "status" )
#echo "${STATUS}" | jq "."
#
# Parse ...
#
echo " "
BRANCH_NAMES=`echo ${STATUS} | jq --raw-output '.result[] | select(.dataLayout=="'"${CONTAINER_REFERENCE}"'") | .name +","+ .reference '`
#echo "Branch Names for ${DC_NAME}: ${BRANCH_NAMES}"
#
# List Branch References and Names ...
#
printf "%-44s | %s\n" "REFERENCE" "BRANCH_NAME"
echo "---------------------------------------------+-----------------"
while read info
do
IFS=,
arr=($info)
###echo "Writing Results for Table: ${arr[0]} id: ${arr[1]}"
TMP_NAME="${arr[0]}"
TMP_REF="${arr[1]}"
if [[ "${ACTIVE_BRANCH}" == "${TMP_REF}" ]]
then
printf "[Active] %-35s : %s\n" ${TMP_REF} ${TMP_NAME}
else
printf "%-44s : %s\n" ${TMP_REF} ${TMP_NAME}
fi
done <<< "${BRANCH_NAMES}"
IFS=
#
# Get user provided branch name or exit ...
#
echo "Enter Branch Name, Copy-n-Paste Branch Name or return to exit: "
read BRANCH_NAME
#
# Get Branch Name from Reference ...
#
BRANCH_REF=`echo ${STATUS} | jq --raw-output '.result[] | select(.name=="'"${BRANCH_NAME}"'" and .dataLayout=="'"${CONTAINER_REFERENCE}"'") | .reference '`
#
# Validate ...
#
if [[ "${BRANCH_REF}" == "" ]]
then
echo "No Branch Name/Reference ${BRANCH_NAME}/${BRANCH_REF} found, Exiting ..."
exit 1;
fi
echo "Branch Reference: ${BRANCH_REF}"
#########################################################
## Get BookMarks per Branch Option ...
STATUS=`curl -s -X GET -k ${BaseURL}/jetstream/bookmark -b "${COOKIE}" -H "${CONTENT_TYPE}"`
RESULTS=$( jqParse "${STATUS}" "status" )
#echo "${STATUS}" | jq "."
echo " "
BM_NAMES=`echo ${STATUS} | jq --raw-output '.result[] | select(.container=="'"${CONTAINER_REFERENCE}"'" and .branch=="'"${BRANCH_REF}"'") | .name +","+ .reference '`
echo "Bookmark Names:"
#echo "${BM_NAMES}"
#
# List Bookmarks and Object References ...
#
printf "%-44s | %s\n" "REFERENCE" "BOOKMARK_NAME"
echo "---------------------------------------------+-----------------"
while read info
do
IFS=,
arr=($info)
###echo "Writing Results for Table: ${arr[0]} id: ${arr[1]}"
TMP_NAME="${arr[0]}"
TMP_REF="${arr[1]}"
printf "%-44s : %s\n" ${TMP_REF} ${TMP_NAME}
done <<< "${BM_NAMES}"
IFS=
#
# Get user provided branch Reference or exit ...
#
echo "Select Bookmark Name to List, Copy-n-Paste Bookmark Name or return to exit: "
read BOOK_NAME
#
# Get Bookmark Reference ...
#
BOOK_REF=`echo ${STATUS} | jq --raw-output '.result[] | select(.container=="'"${CONTAINER_REFERENCE}"'" and .branch=="'"${BRANCH_REF}"'" and .name=="'"${BOOK_NAME}"'") | .reference '`
#
# Validate ...
#
if [[ "${BOOK_REF}" == "" ]]
then
echo "No Bookmark Name/Reference ${BOOK_NAME}/${BOOK_REF} found, Exiting ..."
exit 1;
fi
echo "Bookmark Reference: ${BOOK_REF}"
#########################################################
## List Bookmark ...
STATUS=`curl -s -X GET -k ${BaseURL}/jetstream/bookmark/${BOOK_REF} -b "${COOKIE}" -H "${CONTENT_TYPE}"`
RESULTS=$( jqParse "${STATUS}" "status" )
echo "${STATUS}" | jq "."
#########################################################
## Delete Bookmark ...
echo "Do you want DELETE this Bookmark? [yes/NO] "
read ANS
if [[ "${ANS}" == "yes" ]]
then
echo "Delete Bookmark ${BOOK_REF} ..."
STATUS=`curl -s -X POST -k --data @- $BaseURL/jetstream/bookmark/${BOOK_REF}/delete -b "${COOKIE}" -H "${CONTENT_TYPE}" <<EOF
${json}
EOF
`
echo "Delete Bookmark Results: ${STATUS}"
RESULTS=$( jqParse "${STATUS}" "status" )
#echo "${STATUS}" | jq "."
#########################################################
#
# Get Job Number ...
#
JOB=$( jqParse "${STATUS}" "job" )
echo "Job: ${JOB}"
#########################################################
#
# Job Information ...
#
JOB_STATUS=`curl -s -X GET -k ${BaseURL}/job/${JOB} -b "${COOKIE}" -H "${CONTENT_TYPE}"`
RESULTS=$( jqParse "${JOB_STATUS}" "status" )
#########################################################
#
# Get Job State from Results, loop until not RUNNING ...
#
JOBSTATE=$( jqParse "${JOB_STATUS}" "result.jobState" )
PERCENTCOMPLETE=$( jqParse "${JOB_STATUS}" "result.percentComplete" )
echo "Current status as of" $(date) ": ${JOBSTATE} ${PERCENTCOMPLETE}% Completed"
while [ "${JOBSTATE}" == "RUNNING" ]
do
echo "Current status as of" $(date) ": ${JOBSTATE} ${PERCENTCOMPLETE}% Completed"
sleep ${DELAYTIMESEC}
JOB_STATUS=`curl -s -X GET -k ${BaseURL}/job/${JOB} -b "${COOKIE}" -H "${CONTENT_TYPE}"`
JOBSTATE=$( jqParse "${JOB_STATUS}" "result.jobState" )
PERCENTCOMPLETE=$( jqParse "${JOB_STATUS}" "result.percentComplete" )
done
#########################################################
## Producing final status
if [ "${JOBSTATE}" != "COMPLETED" ]
then
echo "Error: Delphix Job Did not Complete, please check GUI ${JOB_STATUS}"
# exit 1
else
echo "Job: ${JOB} ${JOBSTATE} ${PERCENTCOMPLETE}% Completed ..."
fi
echo " "
fi # end if yes to delete ...
############## E O F ####################################
echo "Done ..."
echo " "
exit 0
|
duckback00/dxapikit
|
API/jetstream_list_bookmarks.sh
|
Shell
|
apache-2.0
| 8,174 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.