code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
GLOG_logtosterr=1 ../../build/tools/extract_image_features.bin prototxt/c3d_suturing_feature_extractor_frm.prototxt conv3d_deepnetA_sport1m_iter_1900000 -1 100 1 prototxt/output_list_prefix_suturing.txt conv5b
|
BerkeleyAutomation/vtsc
|
scripts/bash/c3d_suturing_feature_extraction.sh
|
Shell
|
mit
| 209 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2014:0434
#
# Security announcement date: 2014-04-24 17:49:23 UTC
# Script generation date: 2017-01-01 21:15:15 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - qemu-img-rhev.x86_64:0.12.1.2-2.415.el6_5.8
# - qemu-kvm-rhev.x86_64:0.12.1.2-2.415.el6_5.8
# - qemu-kvm-rhev-debuginfo.x86_64:0.12.1.2-2.415.el6_5.8
# - qemu-kvm-rhev-tools.x86_64:0.12.1.2-2.415.el6_5.8
#
# Last versions recommanded by security team:
# - qemu-img-rhev.x86_64:0.12.1.2-2.491.el6_8.3
# - qemu-kvm-rhev.x86_64:0.12.1.2-2.491.el6_8.3
# - qemu-kvm-rhev-debuginfo.x86_64:0.12.1.2-2.491.el6_8.3
# - qemu-kvm-rhev-tools.x86_64:0.12.1.2-2.491.el6_8.3
#
# CVE List:
# - CVE-2014-0142
# - CVE-2014-0143
# - CVE-2014-0144
# - CVE-2014-0145
# - CVE-2014-0146
# - CVE-2014-0147
# - CVE-2014-0148
# - CVE-2014-0150
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install qemu-img-rhev.x86_64-0.12.1.2 -y
sudo yum install qemu-kvm-rhev.x86_64-0.12.1.2 -y
sudo yum install qemu-kvm-rhev-debuginfo.x86_64-0.12.1.2 -y
sudo yum install qemu-kvm-rhev-tools.x86_64-0.12.1.2 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2014/RHSA-2014:0434.sh
|
Shell
|
mit
| 1,282 |
#!/bin/sh
while true
do
git pull origin master
git submodule init
git submodule update
make -B
./cloogleirc "$@"
sleep 5s
done
|
clean-cloogle/clean-irc
|
run.sh
|
Shell
|
mit
| 133 |
#!/bin/bash
php bin/console doctrine:database:drop --force && php bin/console doctrine:database:create && php bin/console doctrine:schema:create && echo "Y"|php bin/console doctrine:fixtures:load
|
InsaLan/insalan.fr
|
load_fixtures.sh
|
Shell
|
mit
| 196 |
#!/bin/bash
#
# Josh Forester
# 2009/09/09
#
# This script upgrades the production database to be up to speed with development schema
# changes. This should only be used during a site roll.
#
SCHEMA_UPGRADE_PATH=`pwd`
source ../leaderboardAdmCreds.profile
printUsage() {
echo "Usage: $0"
echo " [--dev|--prd] --version <schema upgrade version number>"
return
}
# Vet arguments
if [ $# -ne 3 ] ; then
printUsage
exit 1
fi
case $2 in
--version) SCHEMA_UPGRADE_VERSION=$3;;
--help) printUsage; exit 1;;
"") ;;
*) printUsage; exit 1;;
esac
case $1 in
--dev) MYSQL_DB=leaderboard_dev;;
--prd) MYSQL_DB=leaderboard;;
--help) printUsage; exit 1;;
"") ;;
*) printUsage; exit 1;;
esac
mysql -u $MYSQL_USER -p -h $MYSQL_HOST $MYSQL_DB < ${SCHEMA_UPGRADE_PATH}/schemaUpgrade.${SCHEMA_UPGRADE_VERSION}.sql
exit 0
|
joshforester/rdboard
|
src/sql/upgrade/upgradeDB.sh
|
Shell
|
mit
| 859 |
## *
## String length.
##
## Params:
## *: {String} Text.
##
## Out: {Integer} String length.
local str="$(@remove-format "$@")"
echo ${#str}
|
reduardo7/hsabx
|
src/utils/str-len.sh
|
Shell
|
gpl-2.0
| 145 |
#!/bin/sh
. `dirname ${0}`/lcp.sh
java -classpath $LOCALCLASSPATH -Djava.util.logging.config.file=logging.properties org.semanticdesktop.aperture.examples.ExampleImapCrawler $*
|
yorkulibraries/vufind
|
import/aperture/bin/imapcrawler.sh
|
Shell
|
gpl-2.0
| 177 |
#!/bin/bash
aptitude -y install expect
// Not required in actual script
MYSQL_ROOT_PASSWORD=abcd1234
SECURE_MYSQL=$(expect -c "
set timeout 10
spawn mysql_secure_installation
expect \"Enter current password for root (enter for none):\"
send \"$MYSQL\r\"
expect \"Change the root password?\"
send \"n\r\"
expect \"Remove anonymous users?\"
send \"y\r\"
expect \"Disallow root login remotely?\"
send \"y\r\"
expect \"Remove test database and access to it?\"
send \"y\r\"
expect \"Reload privilege tables now?\"
send \"y\r\"
expect eof
")
echo "$SECURE_MYSQL"
aptitude -y purge expect
|
jomijournal/jomi_wp
|
mysql_secure.sh
|
Shell
|
gpl-2.0
| 595 |
#!/bin/bash
# Determine python binary on CLI.
pythonBin=`which python`
# Run python script.
${pythonBin} $1 $2 $3
|
woutsanders/stendenInnovate
|
server/ajaxWrap.sh
|
Shell
|
gpl-2.0
| 115 |
#!/bin/sh
port=53201
DOMAIN_NAME="project1.app"
FILE_NAME="project1_app"
if [ -z "$1" ]
then
echo "Info: No Environment is given will take prod as <env>"
else
ENV=$1
fi
case $ENV in
dev)
echo "-- DEV"
if [ ! -z "$2" ]
then
DOMAIN_NAME=$2
FILE_NAME=${DOMAIN_NAME}| sed 's/\./_/g'
fi
echo "Add $DOMAIN_NAME in hosts file"
echo "127.0.0.1 $DOMAIN_NAME" | sudo tee -a /etc/hosts
break ;;
prod)
echo "-- PROD"
break ;;
*)
echo "Erro : bad option $ENV!"
echo "option should be prod|dev"
exit 0
break ;;
esac
storage_dir='/tmp/storage/'$FILE_NAME
image_name=$FILE_NAME'_mysql'
mkdir -p $storage_dir
echo 'Will try yo kill container if exists'
docker kill $image_name
docker rm $image_name
echo 'Launch container from mysql image'
docker run --name $image_name -v $storage_dir:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=dbpass -p $port:3306 -d mysql
echo '-----------------------------------------------------------'
echo '| update your parameters.yml with these values'
echo '| host : '`docker inspect --format '{{ .NetworkSettings.IPAddress }}' $image_name`
echo '| port : '"$port"
echo '| user : root'
echo '| password : dbpass'
echo '|'
echo '| you should be able to run : '
echo '| mysql -u root -pdbpass -h 0.0.0.0 --port='"$port"
echo '| or'
echo '| mysql -u root -pdbpass -h '`docker inspect --format '{{ .NetworkSettings.IPAddress }}' $image_name`
echo '| or even better'
echo '| mysql -u root -pdbpass -h '`docker inspect --format '{{ .NetworkSettings.IPAddress }}' $image_name` '< your_dump.sql'
echo '-----------------------------------------------------------'
#composer update
php app/console cache:clear --env=$ENV
php app/console assets:install --symlink
php app/console assetic:dump
php app/console doctrine:database:create && php app/console doctrine:schema:create && php app/console doctrine:fixtures:load -n
#php app/console server:run 127.0.0.1:8050
|
jguido/time-tracking
|
run.sh
|
Shell
|
gpl-2.0
| 2,029 |
#! /bin/sh
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Superficial test to check that dejagnu tests and automake-style
# tests can coexist. See also related deeper test 'check12.sh'.
. test-init.sh
cat > Makefile.am << 'END'
AUTOMAKE_OPTIONS = dejagnu
TESTS = frob.test
END
test x"$am_serial_tests" = x"yes" || : > test-driver
$ACLOCAL
$AUTOMAKE
grep '^check-TESTS' Makefile.in
grep '^check-DEJAGNU' Makefile.in
:
|
pylam/automake
|
t/dejagnu.sh
|
Shell
|
gpl-2.0
| 1,059 |
#!/bin/bash
###############################################################################
#
# DEFAULT CONFIGURATION FILE
#
# Do not edit this, edit config.sh instead.
#
###############################################################################
# Disk geometry
DISK_CYLINDERS=980
DISK_HEADS=5
DISK_SECTORS_PER_TRACK=17
DISK_BYTES_PER_SECTOR=512
# ROM (BIOS) image
ROMIMAGE="file=/usr/share/bochs/BIOS-bochs-latest"
# ROM (VGA) image
VGAIMAGE="file=/usr/share/vgabios/vgabios.bin"
# How much memory to give our test machine
MEMORY=64
# Processor
IPS=2
|
Quetuo/osdev
|
test/config-defaults.sh
|
Shell
|
gpl-2.0
| 565 |
#/bin/sh
string=filenametags
echo -e "!_TAG_FILE_SORTED\t2\t/2=foldcase/" > $string
find . -regex '.*\.\(c\|C\|h\|H\|cpp\|hpp\|cxx\|hxx\|sh\|xml\|inl\)' ! \( -path "*svn*" -o -path "*repo*" -o -path "*git*" \) -type f -printf "%f\t%p\t1\n" | sort -f >> $string
pwd=`pwd`
sed -i "s+\.\/+$pwd\/+g" $string
|
chengshiding/vim
|
tools/lookfile.sh
|
Shell
|
gpl-2.0
| 306 |
#################
# File with commands that make it easier to work with system mails
#################
#MODULE_PREFIX=m
# TODO: mail command is an utter load of crap. We'll have to process mail file directly using:
# grep -E '^Subject: ' /var/mail/kshitiz -n
# Returns list of mails that are unread
m_unread() {
:
}
# Returns mails recently received. Recently is defined by following variable in number of hours
RECENT_LIMIT=1
recent_mails() {
:
}
# Selects all mails that match given conditions
# Some sample conditions: from a particular sender, containing given text in subject line, containing given text in body, received between given dates
select_mails() {
:
}
# Delete given list of mails. Can be passed output of select_mails
delete_mails() {
:
}
|
Kshitiz-Sharma/BashFramework
|
scripts/system/mail.sh
|
Shell
|
gpl-2.0
| 767 |
#!/bin/bash -ex
source config.cfg
apt-get install -y mongodb-server mongodb-clients python-pymongo
sed -i "s/bind_ip = 127.0.0.1/bind_ip = $CON_MGNT_IP/g" /etc/mongodb.conf
service mongodb restart
sleep 40
cat << EOF > mongo.js
db = db.getSiblingDB("ceilometer");
db.addUser({user: "ceilometer",
pwd: "$CEILOMETER_DBPASS",
roles: [ "readWrite", "dbAdmin" ]})
EOF
sleep 20
mongo --host $CON_MGNT_IP ./mongo.js
## Tao user, endpoint va gan role cho CEILOMETER
openstack user create --password $CEILOMETER_PASS ceilometer
openstack role add --project service --user ceilometer admin
openstack service create --name ceilometer --description "Telemetry" metering
openstack endpoint create \
--publicurl http://$CON_MGNT_IP:8777 \
--internalurl http://$CON_MGNT_IP:8777 \
--adminurl http://$CON_MGNT_IP:8777 \
--region RegionOne \
metering
# Cai dat cac goi trong CEILOMETER
apt-get -y install ceilometer-api ceilometer-collector \
ceilometer-agent-central ceilometer-agent-notification \
ceilometer-alarm-evaluator ceilometer-alarm-notifier \
python-ceilometerclient
mv /etc/ceilometer/ceilometer.conf /etc/ceilometer/ceilometer.conf.bka
cat << EOF > /etc/ceilometer/ceilometer.conf
[DEFAULT]
verbose = True
rpc_backend = rabbit
auth_strategy = keystone
[database]
connection = mongodb://ceilometer:$CEILOMETER_DBPASS@$CON_MGNT_IP:27017/ceilometer
[keystone_authtoken]
auth_uri = http://$CON_MGNT_IP:5000
auth_url = http://$CON_MGNT_IP:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = ceilometer
password = $CEILOMETER_PASS
[service_credentials]
os_auth_url = http://$CON_MGNT_IP:5000/v2.0
os_username = ceilometer
os_tenant_name = service
os_password = $CEILOMETER_PASS
os_endpoint_type = internalURL
os_region_name = RegionOne
# [publisher]
# telemetry_secret = $METERING_SECRET
[matchmaker_redis]
[matchmaker_ring]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host = $CON_MGNT_IP
rabbit_userid = openstack
rabbit_password = $RABBIT_PASS
[oslo_policy]
EOF
echo "Restart service"
sleep 3
service ceilometer-agent-central restart
service ceilometer-agent-notification restart
service ceilometer-api restart
service ceilometer-collector restart
service ceilometer-alarm-evaluator restart
service ceilometer-alarm-notifier restart
echo "Restart service"
sleep 10
service ceilometer-agent-central restart
service ceilometer-agent-notification restart
service ceilometer-api restart
service ceilometer-collector restart
service ceilometer-alarm-evaluator restart
service ceilometer-alarm-notifier restart
|
vietstacker/openstack-liberty-multinode
|
LIBERTY-U14.04-OVS/ctl-10-ceilometer.sh
|
Shell
|
gpl-2.0
| 2,624 |
#!/bin/sh
# the spi-config set the config to the PICAN module
# 10MHz SPI clock GPIO 25 for MCP2515 /INT and 16Mhz clock (crystal) - 0x2002 -> IRQF_ONESHOT
modprobe spi-config devices=bus=0:cs=0:modalias=mcp2515:speed=10000000:gpioirq=25:pd=20:pds32-0=16000000:pdu32-4=0x2002:force_release
modprobe mcp251x
ip link set can0 up type can bitrate 125000 triple-sampling on
|
rdnetto/teapot-buildroot
|
board/rasplayer/rootfs_overlay/root/start-canbus.sh
|
Shell
|
gpl-2.0
| 372 |
cp ./tactile_object_recognition_main.lua $ICUBcontrib_DIR/bin/
cp ./tactile_object_recognition_funcs.lua $ICUBcontrib_DIR/share/ICUBcontrib/contexts/tactile-object-recognition/lua/
cp ./tactile_object_recognition_interact_fsm.lua $ICUBcontrib_DIR/share/ICUBcontrib/contexts/tactile-object-recognition/lua/
cp ./tactile_object_recognition_root_fsm.lua $ICUBcontrib_DIR/share/ICUBcontrib/contexts/tactile-object-recognition/lua/
|
tacman-fp7/tactile-control
|
data/objectRecognition/lua/install_lua.sh
|
Shell
|
gpl-2.0
| 428 |
convert images/OCS-205-A.png -crop 1513x233+0+0 +repage images/OCS-205-A-0.png
convert -append images/OCS-204-B-4.png images/OCS-205-A-0.png images/OCS-204-B-4.png
rm images/OCS-205-A-0.png
convert images/OCS-205-A.png -crop 1513x874+0+234 +repage images/OCS-205-A-1.png
convert images/OCS-205-A.png -crop 1513x805+0+1101 +repage images/OCS-205-A-2.png
convert images/OCS-205-A.png -crop 1513x399+0+1897 +repage images/OCS-205-A-3.png
convert images/OCS-205-A.png -crop 1513x324+0+2291 +repage images/OCS-205-A-4.png
convert images/OCS-205-A.png -crop 1513x633+0+2616 +repage images/OCS-205-A-5.png
convert images/OCS-205-A.png -crop 1513x877+0+3252 +repage images/OCS-205-A-6.png
convert images/OCS-205-A.png -crop 1513x67+0+4136 +repage images/OCS-205-A-7.png
convert images/OCS-205-A.png -crop 1513x69+0+4216 +repage images/OCS-205-A-8.png
convert images/OCS-205-A.png -crop 1513x235+0+4288 +repage images/OCS-205-A-9.png
#
#/OCS-205.png
convert images/OCS-205-B.png -crop 1573x545+0+0 +repage images/OCS-205-B-0.png
convert -append images/OCS-205-A-9.png images/OCS-205-B-0.png images/OCS-205-A-9.png
rm images/OCS-205-B-0.png
convert images/OCS-205-B.png -crop 1573x2618+0+550 +repage images/OCS-205-B-1.png
convert images/OCS-205-B.png -crop 1573x486+0+3163 +repage images/OCS-205-B-2.png
convert images/OCS-205-B.png -crop 1573x309+0+3656 +repage images/OCS-205-B-3.png
convert images/OCS-205-B.png -crop 1573x385+0+3972 +repage images/OCS-205-B-4.png
convert images/OCS-205-B.png -crop 1573x147+0+4370 +repage images/OCS-205-B-5.png
#
#/OCS-205.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/findindents.OCS-205.sh
|
Shell
|
gpl-2.0
| 1,557 |
#!/bin/sh
TEMPLATE=messages.pot
./utils/update-schema-translations.sh
xgettext -kT_js_decl -kT_sprintf -kT_ngettext:1,2 -k__ -L PHP -o $TEMPLATE *.php help/*.php mobile/*.php classes/*.php include/*.php
xgettext --from-code utf-8 -k__ -L Java -j -o $TEMPLATE js/*.js
update_lang() {
if [ -f $1.po ]; then
TMPFILE=/tmp/update-translations.$$
msgmerge -o $TMPFILE $1.po $TEMPLATE
mv $TMPFILE $1.po
msgfmt --statistics $1.po
msgfmt -o $1.mo $1.po
else
echo "Usage: $0 [-p|<basename>]"
fi
}
LANGS=`find locale -name 'messages.po'`
for lang in $LANGS; do
echo Updating $lang...
PO_BASENAME=`echo $lang | sed s/.po//`
update_lang $PO_BASENAME
done
#./utils/update-js-translations.sh
|
johnkeeping/tt-rss
|
utils/update-translations.sh
|
Shell
|
gpl-2.0
| 704 |
#!/usr/bin/env bash
# Run Drupal tests (@group Thunder)
cd ${TEST_DIR}/docroot
# execute Drupal tests
php ${TEST_DIR}/docroot/core/scripts/run-tests.sh --php `which php` --verbose --color --url http://localhost:8080 Thunder
|
pixelmord/thunder-distribution
|
scripts/travis/run-tests.sh
|
Shell
|
gpl-2.0
| 226 |
#!/bin/bash
#
# Copyright (c) 2017 - Present Jeong Han Lee
# Copyright (c) 2017 - Present European Spallation Source ERIC
#
# The program is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 2 of the
# License, or any newer version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see https://www.gnu.org/licenses/gpl-2.0.txt
#
# Author : Jeong Han Lee
# email : [email protected]
# Date : Wednesday, September 13 10:47:26 CEST 2017
# version : 0.0.3
#
declare -gr SC_SCRIPT="$(realpath "$0")"
declare -gr SC_SCRIPTNAME=${0##*/}
declare -gr SC_TOP="$(dirname "$SC_SCRIPT")"
set -a
. ${SC_TOP}/archappl_systemd.conf
set +a
. ${SC_TOP}/../functions
declare -gr SUDO_CMD="sudo";
${SUDO_CMD} -v
pushd ${SC_TOP}
mkdir -p tmp
cat > ./tmp/${AA_SYSTEMD_UNIT_M4} <<EOF
include(\`${AA_SYSTEMD_CONF_M4}')
AA_SYSTEMD_UNIT(\`${SC_TOP}/../')
EOF
m4 ./tmp/${AA_SYSTEMD_UNIT_M4} > ./tmp/${AA_SYSTEMD_UNIT}
${SUDO_CMD} install -m 644 ./tmp/${AA_SYSTEMD_UNIT} ${SD_UNIT_PATH01}
popd
${SUDO_CMD} systemctl daemon-reload;
${SUDO_CMD} systemctl enable ${AA_SYSTEMD_UNIT};
#${SUDO_CMD} systemctl start ${AA_SYSTEMD_UNIT};
exit 0;
|
jeonghanlee/epicsarchiverap-sites
|
systemd_service/archappl_systemd_setup.bash
|
Shell
|
gpl-2.0
| 1,571 |
#!/usr/bin/env bash
PORT=${1:-8888}
INDEX_NAME=${2:-index_getjenny_english_0}
curl -v -H "Authorization: Basic $(echo -n 'test_user:p4ssw0rd' | base64)" \
-H "Content-Type: application/json" \
-X GET http://localhost:${PORT}/${INDEX_NAME}/stream/term
|
GetJenny/starchat
|
scripts/api_test/getTermStream.sh
|
Shell
|
gpl-2.0
| 256 |
#!/bin/bash
SNAPSHOT_MOUNT_POINT='/mnt/snapshots'
SNAPSHOT_MOUNT_OPTIONS='nouuid,ro' # xfs needs nouuid
SNAPSHOT_SIZE='50%ORIGIN'
SNAPSHOT_PREFIX='snap_'
DEFAULT_VOLUME_GROUP='vg00'
REQUIREMENTS="/sbin/lvremove /sbin/lvcreate /sbin/lvs"
# check for waitmax binary
WAITMAX_BINARY="$(which waitmax)"
get_volumes(){
LVS=$(/sbin/lvs --separator / --noheadings -o vg_name,lv_name 2>&- | tr -d ' ') || true
echo $LVS
}
find_volume(){
local target volume_group volumes volume
target=$1
volume_group=$DEFAULT_VOLUME_GROUP
test -z $target && return 1
if [[ $# -eq 2 ]]; then
volume_group=$2
fi
test -z $volume_group && return 1
volumes=$(get_volumes)
test -z volume && return 1
for volume in $volumes; do
if [[ "${volume}" == "${volume_group}/${target}" ]]; then
echo $volume
return 0
fi
done
return 1
}
make_snapshot(){
local volume name size
volume=$1
name=$2
size=$3
test -z $volume && return 1
test -z $name && return 1
test -z $size && return 1
if [ -x $WAITMAX_BINARY ];then
# Wait 300 seconds to create snapshot or kill create command
WAITMAX_COMMAND="${WAITMAX_BINARY} -s9 300"
fi
echo "INFO: LVM creating $name for $volume"
$WAITMAX_COMMAND /sbin/lvcreate -n $name --extents "${size}" -s $volume <<<y
}
remove_snapshot(){
local volume
volume=$1
test -z $volume && return 1
echo "INFO: LVM removing ${volume}"
/sbin/lvremove -f ${volume}
}
mount_snapshot(){
local device mount_target mount_options mount_args
device=$1
mount_target=$2
mount_options=$3
test -z $device && return 1
test -z $mount_target && return 1
mount_args="${device} ${mount_target}"
test -n $mount_options && mount_args="-o ${mount_options} ${mount_args}"
create_mountpoint $mount_target
echo "INFO: mounting $mount_args"
mount $mount_args
}
unmount_snapshot(){
local mount_target
mount_target=$1
test -z $mount_target && return 1
echo "INFO: unmounting $mount_target"
umount $mount_target
}
is_mounted(){
local mount_target ret
mount_target=$1
test -z $mount_target && return 1
result=$(awk "{ if (\$2 == \"${mount_target}\") { print \$1 } }" /proc/mounts)
if [[ -n $result ]]; then
return 0
else
return 1
fi
}
create_mountpoint(){
local mount_target
mount_target=$1
test -z $mount_target && return 1
test -d $mount_target || mkdir -p $mount_target
}
check_requirements(){
local errorcnt req requirements
requirements=$1
errorcnt=0
for req in $requirements; do
test -x $req && continue
ret=$?
test $ret -eq 0 || let errorcnt=errorcnt+1
test $ret -eq 0 || echo "${req} is missing"
done
test $errorcnt -gt 0 && echo "ERROR: requirements failed" && exit $errorcnt
}
usage(){
echo "USAGE: ${0} mode lvm_target [volume_group]"
echo " mode: (mount|unmount)"
echo " lvm_target: volume"
echo " volume_group: vgroup (default=${DEFAULT_VOLUME_GROUP})"
test -n $1 && exit $1
}
if [[ $# -lt 2 ]]; then
usage 1
fi
if [[ $# -eq 3 ]]; then
volume_group=$3
else
volume_group=$DEFAULT_VOLUME_GROUP
fi
check_requirements $REQUIREMENTS
mode=$1
target=$2
snapshot_name="${SNAPSHOT_PREFIX}${target}"
snapshot_mount_name="${volume_group}-${snapshot_name}"
snapshot_mount_device="/dev/mapper/${snapshot_mount_name}"
snapshot_mount_target="${SNAPSHOT_MOUNT_POINT}/${snapshot_mount_name}"
case $mode in
'mount')
snapshot_vol=$(find_volume $snapshot_name $volume_group)
if [[ -n $snapshot_vol ]]; then
echo "ERROR: found old snapshot volume, please remove it before creating new"
exit 2
fi
is_mounted $snapshot_mount_target && unmount_snapshot $snapshot_mount_target
volume=$(find_volume $target $volume_group)
if [[ -z $volume ]]; then
echo "ERROR: failed to find volume ${volume_group}/${target}"
exit $ret
fi
make_snapshot $volume $snapshot_name $SNAPSHOT_SIZE
ret=$?
if [[ $ret != 0 ]]; then
echo "ERROR: failed to create snapshot ${snapshot_name} for volume ${volume}"
exit $ret
fi
mount_snapshot $snapshot_mount_device $snapshot_mount_target $SNAPSHOT_MOUNT_OPTIONS
;;
'unmount')
is_mounted $snapshot_mount_target && unmount_snapshot $snapshot_mount_target
snapshot=$(find_volume $snapshot_name $volume_group)
test -z $snapshot || remove_snapshot $snapshot || exit 0
;;
*)
echo "WARN: unknown mode"
usage 3
;;
esac
|
stevie-/helper-scripts
|
lvm_mount_snapshots.sh
|
Shell
|
gpl-2.0
| 4,829 |
#!/bin/bash
set -e
#set -x
. 00-configs
KEYSTONEPASS="`openssl rand -base64 16`"
setupsql() {
slapt-get -u
slapt-get -i mariadb
sed -i 's,SKIP="--skip-networking",# SKIP="--skip-networking",' /etc/rc.d/rc.mysqld
echo "[mysqld]
default-storage-engine = innodb
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
" > /etc/my.cnf.d/utf8.cnf
mysql_install_db --user=mysql
sh /etc/rc.d/rc.mysqld restart
sleep 5
}
setuprabbit() {
( cd /tmp && wget http://packages.nimblex.net/nimblex/rabbitmq-server-3.1.5-x86_64-1.txz && wget http://packages.nimblex.net/nimblex/erlang-otp-16B03-x86_64-1.txz )
installpkg /tmp/rabbitmq-server-*-x86_64-1.txz
installpkg /tmp/erlang-otp-*-x86_64-1.txz
useradd -d /var/lib/rabbitmq/ rabbitmq
chown rabbitmq /var/{lib,log}/rabbitmq/
sed -i 's/127.0.0.1/0.0.0.0/' /etc/rabbitmq/rabbitmq-env.conf
sed -i 's/example/openstack/' /etc/rabbitmq/rabbitmq-env.conf
chmod +x /etc/rc.d/rc.rabbitmq
/etc/rc.d/rc.rabbitmq start
}
setupsql-keystone() {
mysql -e "CREATE DATABASE keystone; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '$KEYSTONEPASS';"
}
install() {
easy_install pip pbr MySQL-python
cd keystone
useradd -s /bin/false -d /var/lib/keystone -m keystone
python setup.py install
mkdir -p /etc/keystone/ssl/{private,certs} /var/log/openstack
touch /var/log/openstack/keystone.log
cp ../openstack-files/openssl.conf /etc/keystone/ssl/certs/
chown -R keystone /etc/keystone/ /var/log/openstack/keystone.log
CONF="/etc/keystone/keystone.conf"
cp etc/keystone.conf.sample $CONF
cp etc/keystone-paste.ini etc/policy.json /etc/keystone/
sed -i "s,#connection = <None>,connection = mysql://keystone:[email protected]/keystone," $CONF
unset KEYSTONEPASS
keystone-manage db_sync
export OS_SERVICE_TOKEN=`openssl rand -hex 10`
sed -i "s,#admin_token = ADMIN,admin_token = $OS_SERVICE_TOKEN," $CONF
sed -i "s,#log_dir = <None>,log_dir = /var/log/openstack," $CONF
sed -i "s,#log_file = <None>,log_file = keystone.log," $CONF
su -s /bin/sh -c 'exec keystone-manage pki_setup' keystone
keystone-all &
sleep 3
}
configure() {
# --- Define users, tenants, and roles ---
# http://docs.openstack.org/icehouse/install-guide/install/apt/content/keystone-users.html
echo "Executing the configure procedure"
export OS_SERVICE_ENDPOINT=http://$CONTROLLER_IP:35357/v2.0
export OS_SERVICE_TOKEN=`awk -F '=' '/admin_token=/ {print $2}' /etc/keystone/keystone.conf`
keystone user-create --name=admin --pass=$ADMINPASS [email protected]
keystone role-create --name=admin
keystone tenant-create --name=admin --description="Admin Tenant"
sleep 3
keystone user-role-add --user=admin --tenant=admin --role=admin
keystone user-role-add --user=admin --role=_member_ --tenant=admin
keystone user-create --name=demo --pass=demo
keystone tenant-create --name=demo --description="Demo Tenant"
keystone user-role-add --user=demo --role=_member_ --tenant=demo
keystone tenant-create --name=service --description="Service Tenant"
keystone service-create --name=keystone --type=identity --description="OpenStack Identity"
keystone endpoint-create --service-id=$(keystone service-list | awk '/ identity / {print $2}') --publicurl=http://$CONTROLLER_IP:5000/v2.0 --internalurl=http://$CONTROLLER_IP:5000/v2.0 --adminurl=http://$CONTROLLER_IP:35357/v2.0
}
validate() {
keystone --os-username=admin --os-password=$ADMINPASS --os-auth-url=http://$CONTROLLER_IP:35357/v2.0 token-get
}
clean() {
mysql -e "DROP DATABASE keystone;"
rm -r /etc/keystone/
userdel -r keystone
}
if [[ ! -d /var/lib/mysql/mysql/ ]]; then
setupsql
fi
if [[ ! -f /usr/bin/rabbitmq-server ]]; then
setuprabbit
fi
if [[ ! -d /var/lib/mysql/keystone/ ]]; then
setupsql-keystone
fi
if [[ ! -f /etc/keystone/keystone.conf ]]; then
install
fi
# This should make it sufficient to run the script again after installing to see if all went fine.
if ! validate; then
configure
fi
echo -e "\n === The password for admin user is $ADMINPASS === \n"
|
bogdanr/openstack-build
|
01-keystone.sh
|
Shell
|
gpl-2.0
| 4,154 |
#!/bin/bash
# Make sure warpspeed environment vars are available before proceeding.
if [ -z "$WARPSPEED_ROOT" ] || [ -z "$WARPSPEED_USER" ]; then
echo "Error: It appears that this server was not provisioned with Warpspeed."
echo "WARPSPEED_ROOT and WARPSPEED_USER env vars were not found."
exit 1
fi
# Import the warpspeed functions.
source $WARPSPEED_ROOT/includes/installer-functions.sh
# Require that the root user be executing this script.
ws_require_root
ws_log_header "Installing nginx."
apt-get install -y nginx
# Add phusion APT repository.
apt-get install -y dirmngr gnupg
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 561F9B9CAC40B2F7
apt-get install -y apt-transport-https ca-certificates
echo 'deb https://oss-binaries.phusionpassenger.com/apt/passenger focal main' >> /etc/apt/sources.list.d/passenger.list
apt-get update
# Install nginx and passenger.
apt-get install -y libnginx-mod-http-passenger
# Ensure the config files are in-place
if [ ! -f /etc/nginx/modules-enabled/50-mod-http-passenger.conf ]; then
sudo ln -s /usr/share/nginx/modules-available/mod-http-passenger.load /etc/nginx/modules-enabled/50-mod-http-passenger.conf
fi
# Disable the default site and back up the config.
rm -f /etc/nginx/sites-enabled/default
mv /etc/nginx/sites-available/default /etc/nginx/sites-available/default.sample
# Copy the warpspeed default site config and enable it.
cp $WARPSPEED_ROOT/templates/nginx/default /etc/nginx/sites-available/default
ln -fs /etc/nginx/sites-available/default /etc/nginx/sites-enabled/default
# Create a location for site specific log files.
mkdir -p /var/log/nginx
# Backup original nginx config and use template version.
mv -f /etc/nginx/nginx.conf /etc/nginx/nginx.conf.orig
cp $WARPSPEED_ROOT/templates/nginx/nginx.conf /etc/nginx/nginx.conf
sed -i "s/{{user}}/$WARPSPEED_USER/g" /etc/nginx/nginx.conf
service nginx restart
|
warpspeed/warpspeed
|
installers/nginx.sh
|
Shell
|
gpl-2.0
| 1,914 |
#!/bin/bash
. `dirname $0`/functions.sh
rm -f shuffle5 shuffle5lib*.so shuffle5.log shuffle5.lds
rm -f prelink.cache
$CC -shared -O2 -fpic -o shuffle5lib1.so $srcdir/reloc1lib1.c
$CC -shared -O2 -fpic -o shuffle5lib2.so $srcdir/reloc1lib2.c shuffle5lib1.so
BINS="shuffle5"
LIBS="shuffle5lib1.so shuffle5lib2.so"
$CCLINK -o shuffle5 $srcdir/reloc1.c -Wl,--rpath-link,. shuffle5lib2.so \
-Wl,--verbose 2>&1 | sed -e '/^=========/,/^=========/!d;/^=========/d' \
-e 's/0x08048000/0x08000000/;s/SIZEOF_HEADERS.*$/& . += 180;/' > shuffle5.lds
$CCLINK -o shuffle5 $srcdir/reloc1.c -Wl,--rpath-link,. shuffle5lib2.so \
-Wl,-T,shuffle5.lds
savelibs
echo $PRELINK ${PRELINK_OPTS--vm} ./shuffle5 > shuffle5.log
$PRELINK ${PRELINK_OPTS--vm} ./shuffle5 >> shuffle5.log 2>&1 || exit 1
grep -q ^`echo $PRELINK | sed 's/ .*$/: /'` shuffle5.log && exit 2
LD_LIBRARY_PATH=. ./shuffle5 || exit 3
readelf -a ./shuffle5 >> shuffle5.log 2>&1 || exit 4
# So that it is not prelinked again
chmod -x ./shuffle5
comparelibs >> shuffle5.log 2>&1 || exit 5
|
ystk/debian-prelink
|
testsuite/shuffle5.sh
|
Shell
|
gpl-2.0
| 1,036 |
#!/bin/bash
#
# Start the simulation
#
# Be sure to create a Makefile first and compile all Verilog and VHDL files.
#
vsim -t ps -voptargs=+acc wrapreconfmodule_cfg -do "do wave-<app>.do ; do config.do ; run -all"
|
hansiglaser/chll
|
tools/flowcmd/templates/app/sim-reconfmodule/sim.sh
|
Shell
|
gpl-2.0
| 215 |
# Skript zum Neustart aller OpenStack Nova Dienste
for i in nova-novncproxy nova-api nova-cert nova-conductor nova-consoleauth nova-scheduler nova-network nova-api-metadata nova-compute
do
sudo service "$i" restart
done
|
rm--/CustomizeSetup
|
additional/nrestart.sh
|
Shell
|
gpl-2.0
| 221 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2010-2020 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Try to trigger an issue I had with extra_inputs referencing something in
# vardb.
. ./tup.sh
cat > Tupfile.lua << HERE
CFLAGS_c = '-Dcfile'
CFLAGS_S = '-DSfile'
inputs = '*.c'
inputs += '*.S'
tup.foreach_rule(inputs, 'gcc -c %f \$(CFLAGS_%e) -o %o', '%B.o')
HERE
tup touch foo.c bar.S
parse
tup_object_exist . 'gcc -c foo.c -Dcfile -o foo.o'
tup_object_exist . 'gcc -c bar.S -DSfile -o bar.o'
eotup
|
ppannuto/tup
|
test/t2186-lua-foreach-ext.sh
|
Shell
|
gpl-2.0
| 1,154 |
#!/usr/bin/env bash
set -eo pipefail
# Check for release branch - not using grep as set -e means it fails script
RELEASE_BRANCH=$(echo "$TRAVIS_BRANCH" | sed -n 's/^release\-/&/p')
#Get the release type (dev/master) from the branch name
TYPE="$TRAVIS_BRANCH"
if ([ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "${TRAVIS_REPO_SLUG}" == "ScottLogic/StockFlux" ] && ([ "$TYPE" == "dev" ] || [ "$TYPE" == "master" ] || [ -n "$RELEASE_BRANCH" ]))
then
#Clone the latest gh-pages
git clone https://github.com/ScottLogic/StockFlux.git --branch gh-pages gh-pages
#Get line with version from the file -> get the second word -> remove quotes around the value
VERSION=$(grep "version" package.json | awk -v N=$2 '{print $2}' | cut -d \" -f2)
echo "Type is: $TYPE"
echo "Version is: $VERSION"
if ([ $TYPE == "master" ] || [ $TYPE == "dev" ])
then
echo "Preparing to build version $TYPE"
grunt ci --build-target=$TYPE
rm -rf "./gh-pages/$TYPE"
cp -r "./public" "./gh-pages/$TYPE"
fi
if ([ $TYPE == "master" ] || [ -n "$RELEASE_BRANCH" ])
then
echo "On $TYPE - building versioned build"
if ([ -z "$VERSION" ])
then
echo "Unable to determine version from package.json."
exit 1
fi
if [ -n "$RELEASE_BRANCH" ]
then
#For release branches add rc postfix
VERSION="$VERSION-rc"
echo "Release branch - updating version to $VERSION"
fi
# Rebuild everything to do $VERSION
echo "Cleaning build. Targetting $VERSION"
grunt ci --build-target=$VERSION
rm -rf "./gh-pages/$VERSION"
cp -r "./public" "./gh-pages/$VERSION"
fi
cd gh-pages
#Removing git history
rm -rf .git
git init
# inside this git repo we'll pretend to be a new user
git config user.name "Travis CI"
git config user.email "[email protected]"
# The first and only commit to this new Git repo contains all the
# files present with the commit message "Deploy to GitHub Pages".
git add .
git commit -m "Deploy to GitHub Pages"
# Force push from the current repo's master branch to the remote
# repo's gh-pages branch. (All previous history on the gh-pages branch
# will be lost, since we are overwriting it.) We redirect any output to
# /dev/null to hide any sensitive credential data that might otherwise be exposed.
echo "Pushing to Github..."
git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" master:gh-pages > /dev/null 2>&1
echo "Cleaning residual gh-pages folder"
rm -rf ./gh-pages
else
echo "Nothing needs deploying"
fi
|
mbssantos/StockFlux
|
deploy.sh
|
Shell
|
gpl-3.0
| 2,701 |
#!/bin/bash
svgexport ACI.svg ACI.png 100% 5000:
svgexport ASCI.svg ASCI.png 100% 5000:
svgexport CISD.svg CISD.png 100% 5000:
svgexport FCI.svg FCI.png 100% 5000:
svgexport HBCI.svg HBCI.png 100% 5000:
|
shivupa/pyci
|
plots/convert.sh
|
Shell
|
gpl-3.0
| 204 |
#!/bin/bash
# uncomment next line for interactive checking of generated output
PYTHON="ipython2 --pylab -i"
# non-interactive shell. Check results afterwards
PYTHON="python2.7"
# Follow a gradient. Attraction depends on the distance from the attractor.
# The randome xcursions of the neurite become smaller as the neurite approaches
# the attractor
PYTHONPATH=gradient/:$PYTHONPATH
time python ../Admin.py 1 gradient/to_gradient.cfg
python ../scripts/generate_SWC_data.py gradient/to_gradient.cfg gradient/gradient.db
python ../scripts/helper_generate_movie.py gradient/to_gradient.cfg gradient/gradient.db
$PYTHON ../scripts/generate_wire_plot.py gradient/to_gradient.cfg gradient/gradient.db
$PYTHON ../scripts/generate_radii_plot.py gradient/to_gradient.cfg gradient/gradient.db
|
russellijarvis/neuromac
|
examples/run_gradient.sh
|
Shell
|
gpl-3.0
| 785 |
#!/bin/bash
version="$1"
mkdir -p "binaries/""$version"
name="binaries/""$version""/linx-client-v""$version""_"
GOOS=darwin GOARCH=amd64 go build -o "$name"osx-amd64
GOOS=darwin GOARCH=386 go build -o "$name"osx-386
GOOS=freebsd GOARCH=amd64 go build -o "$name"freebsd-amd64
GOOS=freebsd GOARCH=386 go build -o "$name"freebsd-386
GOOS=openbsd GOARCH=amd64 go build -o "$name"openbsd-amd64
GOOS=openbsd GOARCH=386 go build -o "$name"bsd-386
GOOS=linux GOARCH=arm GOARM=7 go build -o "$name"linux-armv7
GOOS=linux GOARCH=arm64 go build -o "$name"linux-arm64
GOOS=linux GOARCH=amd64 go build -o "$name"linux-amd64
GOOS=linux GOARCH=386 go build -o "$name"linux-386
GOOS=windows GOARCH=amd64 go build -o "$name"windows-amd64.exe
GOOS=windows GOARCH=386 go build -o "$name"windows-386.exe
|
andreimarcu/linx-client
|
build.sh
|
Shell
|
gpl-3.0
| 797 |
# ubuntu-restricted-*
sudo apt install -y ubuntu-restricted-extras ubuntu-restricted-addons
exit $?
|
Yixf-Education/configure_Linux
|
01_essential/01_ubuntu-restricted.sh
|
Shell
|
gpl-3.0
| 101 |
#!/bin/bash
rm *.txt
echo "removing files *.txt"
for i in $(seq 3 1 12)
do
echo "start of simulation with ROV period $i"
ns $1 $i>> my_log.txt
grep -Po 'x = ([-0-9.\n]*[,]*)' $2 | grep -o '[-0-9.]*' >> x$i.txt
grep -Po 'y = ([-0-9.\n]*[,]*)' $2 | grep -o '[-0-9.]*' >> y$i.txt
grep -Po 'z = ([-0-9.\n]*[,]*)' $2 | grep -o '[-0-9.]*' >> z$i.txt
grep -io 'applicationROV Throughput[ ]* : [0-9.]*' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -io 'applicationROV PER[ ]* : [-0-9.]*' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -io 'applicationCTR Throughput[ ]* : [0-9.]*' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -io 'applicationCTR PER[ ]* : [-0-9.]*' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -io 'Sent Packets CTR --> ROV[ ]*: [0-9]*' my_log.txt | grep -io [0-9]* >> data$i.txt
grep -io 'Sent Packets ROV --> CTR[ ]*: [0-9]*' my_log.txt | grep -io [0-9]* >> data$i.txt
grep -io 'Received Packets ROV --> CTR[ ]*: [0-9]*' my_log.txt | grep -io [0-9]* >> data$i.txt
grep -io 'Received Packets CTR --> ROV[ ]*: [0-9]*' my_log.txt | grep -io [0-9]* >> data$i.txt
grep -io 'ROV period[]*: [0-9]*' my_log.txt | grep -io [0-9]* >> data$i.txt
grep -i 'ROV packet delivery delay' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -i 'ROV std packet delivery delay' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -i 'CTR packet delivery delay' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -i 'CTR std packet delivery delay' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -i 'adaptive' my_log.txt | grep -io [0-9] >> data$i.txt
grep -i 'constant' my_log.txt | grep -io [0-9] >> data$i.txt
grep -i 'opt(send_ack_immediately)' my_log.txt | grep -io [0-9] >> data$i.txt
grep -i 'slot duration[ ]*: [0-9.]*' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -i 'number of nodes[ ]*: [0-9.]*' my_log.txt | grep -io [0-9.]* >> data$i.txt
grep -i 'CTR round trip time[ ]*: [0-9.]*' my_log.txt | grep -io [0-9.]* >> data$i.txt
cp ./x$i.txt /media/sf_VirtualMachine/Project_Simulation/matlab
cp ./y$i.txt /media/sf_VirtualMachine/Project_Simulation/matlab
cp ./z$i.txt /media/sf_VirtualMachine/Project_Simulation/matlab
cp ./data$i.txt /media/sf_VirtualMachine/Project_Simulation/matlab
rm my_log.txt
echo "end of simulation with ROV period $i"
done
|
AlbertoSig/Project_Simulation
|
dataExtraction_rtt.sh
|
Shell
|
gpl-3.0
| 2,303 |
#!/bin/bash
#
# Creates the jsdoc documentation
#
# Usage:
# jsdoc.sh [outputdir]
#
OUTDIR=$1
: ${OUTDIR:="out"}
# the path where jsdoc is installed
# If not installed, clone it from github:
# $ git clone https://github.com/jsdoc3/jsdoc
JSDOC_HOME=../jsdoc
# the path to the jsdoc templates and project specific js modules
TEMPLATE_PATH=$(pwd)/doc/templates
# add the template path to the node path if not yet present
if [ "$(echo $NODE_PATH | tr ":" "\n" | grep $TEMPLATE_PATH)" == "" ] ; then
export NODE_PATH=$NODE_PATH:$TEMPLATE_PATH
fi
if [ ! -f $JSDOC_HOME/jsdoc.js ] ; then
echo "error: $JSDOC_HOME/jsdoc.js not found"
echo "Make sure jsdoc is installed and configure JSDOC_HOME in this script."
echo "Clone jsdoc from github to install it."
echo "==> $ git clone https://github.com/jsdoc3/jsdoc"
exit 1
fi
echo "Generating documentation to '$OUTDIR' ..."
node $JSDOC_HOME/jsdoc.js -c jsdoc.conf -t doc/templates -d $OUTDIR
|
Gubaer/josm-scripting-plugin
|
jsdoc.sh
|
Shell
|
gpl-3.0
| 953 |
#!/bin/bash
# Description: It reads email messages that contain the names
#
# Author: Bernardo Gomez.
# Creation date: March 2000
#
#modified to run with Alma to notify library staff that a file is available for pickup 04-01-2016-agc
#check to see if the record is marc8
is_marc8 () {
unset tmarc8;
tmarc8=$(egrep '[0-9][0-9][0-9][0-9][0-9][acdnp][acdefgijkmoprt][abcdims][ a] ' $1) ;
if [ "$tmarc8" ]; then echo -n "true"; fi
}
#check to see if the record is utf8
is_utf8 () {
unset tmarc8;
tutf8=$(egrep '[0-9][0-9][0-9][0-9][0-9][acdnp][acdefgijkmoprt][abcdims][ a]a' $1) ;
if [ "$tutf8" ]; then echo -n "true"; fi
}
function parse_outcome_data {
if [ $status -eq 0 ]
then
if [ "$(is_marc8 $archivedir/$data_file)" ]; then
/usr/bin/yaz-marcdump -f MARC-8 -t UTF-8 -i marc -o marc -l 9=97 $archivedir/$data_file > $datadir/${file_prefix}_$target_file 2> $errors ;
#echo "$? error code for yaz $errors ";
elif [ "$(is_utf8 $archivedir/$data_file)" ]; then
cp $archivedir/$data_file $datadir/${file_prefix}_$target_file 2> $errors ;
# echo "$? error code for cp $errors ";
else
#send email because $archivedir/$data_file is neither marc8 nor utf8
#echo "unkown text encoding";
mail -s "$0 on turing. GOBI Marc Record with unkown text encoding $archivedir/$data_file" $mail_list <<EOM
file $archivedir/$data_file has bad text encoding on $(date +"%Y/%m/%d")
EOM
false;
fi
if [ $? -eq 0 ]; then
work_file_agc="/tmp/ybp_sr_univ_work_${today}"
mail_body="/tmp/univ_gobi_mail_body"
mail_body_02="/tmp/univ_gobi_mail_body_02"
work_file_02_agc="/tmp/ybp_sr_univ_work_02.txt"
work_file_03_agc="/tmp/ybp_unique_sr_univ.txt"
duplicates="/tmp/ybp_sr_univ_duplicates.txt"
duplicates_marc="${archivedir}/ybp_sr_univ_duplicates_${today}.mrc"
unique="${archivedir}/promptcatF_univ_${today}.mrc"
cat $datadir/${file_prefix}_$target_file | marc_to_txt > ${work_file_02_agc}
cat ${work_file_02_agc} | check_oclcno_via_sru.py > ${work_file_03_agc}
cat ${work_file_03_agc} | txt_to_marc > ${unique}
cat ${unique} | marc_to_txt -e 020,022,245 | sed 's/^020| |/ISBN\: /' | sed 's/^245|.*|/Title: /' | sed 's/\\pa//' | sed 's/\\pb/ /' |sed 's/\\ph/ /' | sed 's/\\pc//' > $mail_body
cat ${duplicates} | txt_to_marc > ${duplicates_marc}
cat ${duplicates_marc} | marc_to_txt -e 020,022,245 | sed 's/^020| |/ISBN\: /' | sed 's/^245|.*|/Title: /' | sed 's/\\pa//' | sed 's/\\pb/ /' |sed 's/\\ph/ /' | sed 's/\\pc//' > ${mail_body_02}
cat ${unique} | marc_to_txt | /alma/bin/promptcat.py --promptcat PROMPTCATF --library EMU > $work_file_agc
cat $work_file_agc | /alma/bin/marc_splitter.py --directory /tmp --prefix ${file_prefix}_${today}_$$ --size 50
if [ -s ${duplicates_marc} ]; then
mutt -s "shelf-ready ${duplicates_marc}" -a ${duplicates_marc} -- ${mail_list} <<EOM
marc file of duplicates is attached.
With these titles:
$(cat ${mail_body_02})
EOM
else
mutt -s "shelf-ready without duplicates" -- ${mail_list} <<EOM
No duplicates were found in today's files.
EOM
fi
for file in /tmp/$file_prefix*.txt; do
nfile=$(basename ${file} .txt)
cat $file | txt_to_marc > ${dataout}/${nfile}.mrc
mutt -s"shelf-ready ${nfile}.mrc" -a ${dataout}/${nfile}.mrc -- $katie_email <<EOM
marc file is attached.
EOM
rm $file
done
if [ -s ${work_file_agc} ]; then
mail -s "$0 on turing. GOBI $data_file received OK" $mail_list <<EOM
$datadir/${file_prefix}_$target_file has been created on $(date +"%Y/%m/%d")
With these titles:
$(cat ${mail_body})
EOM
fi
touch $tracking
else
mail -s "$0 on turing. Could not deposit GOBI file" $mail_list <<EOM
$datadir/${file_prefix}_$target_file was not created on $(date +"%Y/%m/%d")
EOM
outcome=$?
chmod 664 $datadir/${file_prefix}_$target_file
rm -f $errors
fi
elif [ $status -eq 101 ]
then
mail -s "$0 on turing. GOBI ftp failed - 101" $mail_list <<EOM
File: $data_file . Reason: ftp server didn't respond.
Action recommended: NONE. Will try again later.
EOM
elif [ $status -eq 102 ]
then
mail -s "$0 on turing. GOBI ftp failed - 102" $mail_list <<EOM
File: $data_file. Reason: GOBI didn't accept login name.
Action recommended: Revise gobi.exp
EOM
elif [ $status -eq 103 ]
then
mail -s "$0 on turing. GOBI ftp failed - 103" $mail_list <<EOM
File: $data_file . Reason: GOBI didn't accept password.
Action recommended: Revise gobi.exp
EOM
elif [ $status -eq 104 ]
then
mail -s "$0 on turing. GOBI ftp failed - 104" $mail_list <<EOM
File: $data_file . Reason: Couldn't change to "orders" directory.
Action recommended: Revise gobi.exp
EOM
elif [ $status -eq 105 ]
then
mail -s "$0 on turing. GOBI ftp failed - 105" $mail_list <<EOM
File: $data_file . Reason: Problem with bin command.
Action recommended: Revise gobi.exp
EOM
elif [ $status -eq 106 ]; then
operation=NONE #no file this time.
elif [ $status -eq 107 ]
then
mail -s "$0 on turing. GOBI ftp failed - 107" $mail_list <<EOM
File: $data_file . Reason: File didn't arrive.
Action recommended: NONE. Will try again later.
EOM
elif [ $status -eq 108 ]
then
mail -s "$0 on turing. GOBI ftp failed - 108" $mail_list <<EOM
File: $count_file . Reason: directory or file must have wrong name.
Action recommended: Revise GOBI scripts.
EOM
else
mail -s "$0 on turing. GOBI ftp failed " $mail_list <<EOM
File: $data_file . Reason: Problems with expect script.
Action recommended: Review gobi.exp.
EOM
fi
}
config="/alma/config/"
. ${config}environ # dot in environ variables
# export all of the environ variables to my children
for env_var in $(cat ${config}environ | awk -F'=' '{print $1}')
do
export ${env_var}
done
###### begin global variables ######
exp_file=/alma/bin
expect_err=/tmp/genybpsr_exp1
loginID=[id]
pass=[password]
mode=bin
errors=/tmp/gengobi.mail.error$$
archivedir=/alma/integrations/vendors/ybp/univ/shelf_ready/archive
trackingdir=/alma/integrations/vendors/ybp/univ/shelf_ready/tracking
temp=/alma/integrations/vendors/ybp/univ/shelf_ready/work/
datadir="/alma/integrations/data/ybp/shelf_ready/in"
dataout="/alma/integrations/data/ybp/shelf_ready/out"
mail_list="[emails]"
katie_email="[emails]"
account_number="[account number]"
file_prefix="[file prefix]"
gobi_directory="[gobi directory]"
umask u+rw,g+rw,o+rw
today=$(date +%Y%m%d)
if [ $# -lt 1 ]; then
count=8
else
count=$1
fi
date_file=${temp}date_file_$$
while [ $count -gt 0 ]; do
my_date=$(date +"%y%m%d" -d "${count} days ago")
echo $my_date >> ${date_file}
count=$(expr $count - 1)
done
####### main loop with enchilada follows here #####
if [ -s "${date_file}" ]; then
cat "${date_file}" |\
while read today_date; do
tracking="$trackingdir/${file_prefix}_${account_number}${today_date}.cnt"
data_file=${account_number}${today_date}.mrc
target_file=${account_number}${today_date}.mrc
target_file=$(echo "${target_file}" | tr '[A-Z]' '[a-z]')
if [ ! -f $tracking ]; then
$exp_file/gobi.exp $data_file $archivedir $loginID $pass ${gobi_directory} 2>> $expect_err
status=$?
parse_outcome_data
fi
done
fi
exit 0
|
Emory-LCS/Alma-Public
|
PromptCatFirm/ybp_firm_receive.sh
|
Shell
|
gpl-3.0
| 7,942 |
#!/bin/sh
# Run this to generate all the initial makefiles, etc.
args=$@
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
if [ ! -f $srcdir/configure.in ]; then
echo -n "**Error**: Directory '"$srcdir"' does not look like the top-level package directory"
exit 1
fi
autoconf --version > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo
echo "**Error**: You need 'autoconf' version 2.5 or newer installed."
echo "Download the appropriate package for your distribution, or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
exit 1
fi
automake --version > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo
echo "**Error**: You need 'automake' version 1.9 or newer installed."
echo "Download the appropriate package for your distribution, or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
exit 1
fi
aclocal --version > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo
echo "**Error**: Missing 'aclocal'. You need 'automake' version 1.9 or newer installed."
echo "Download the appropriate package for your distribution, or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
exit 1
fi
libtoolize --version > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo
echo "**Error**: Missing 'libtoolize'. You need 'libtoolize' version 1.5.2 or newer installed."
echo "Download the appropriate package for your distribution, or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
exit 1
fi
echo "Creating aclocal.m4..."
test -r aclocal.m4 || touch aclocal.m4
echo "Making aclocal.m4 writable..."
test -r aclocal.m4 && chmod u+w aclocal.m4
echo "Running aclocal..."
aclocal -I m4 --force
if [ $? -ne 0 ]; then
echo
echo "Error while running aclocal"
exit 1
fi
echo "Running autoheader..."
autoheader --force
if [ $? -ne 0 ]; then
echo
echo "Error while running autoheader"
exit 1
fi
echo "Running autoconf..."
autoconf --force
if [ $? -ne 0 ]; then
echo
echo "Error while running autoconf"
exit 1
fi
echo "Running libtoolize..."
libtoolize --force
if [ $? -ne 0 ]; then
echo
echo "Error while running libtoolize"
exit 1
fi
echo "Running automake..."
automake --gnu --add-missing --copy --force-missing
if [ $? -ne 0 ]; then
echo
echo "Error while running automake"
exit 1
fi
echo Running $srcdir/configure $args ...
$srcdir/configure $args
if [ $? -ne 0 ]; then
echo
echo "Error while running configure"
exit 1
fi
echo "Now type 'make' to compile."
|
brestows/kdeNeur
|
autogen.sh
|
Shell
|
gpl-3.0
| 2,376 |
#!/bin/bash
for DRAFT in draft-kundrat-imap-submit draft-kundrat-incthread; do
~/.local/bin/xml2rfc "${DRAFT}.xml" --html --text \
&& ./convert-txt-rfc-to-pdf.sh "${DRAFT}"
done
|
bobo1993324/qmlTrojita
|
src/trojita/docs/proposed-extensions/build.sh
|
Shell
|
gpl-3.0
| 191 |
#!/bin/sh
go test -v ./...
#go test -v./... -cover
#go test ./...
|
daviddavis/speedtest
|
test.sh
|
Shell
|
gpl-3.0
| 66 |
current_branch=`git rev-parse --abbrev-ref HEAD`
if [ "$current_branch" == "chaudhary" ]; then
#echo -e 'Checkout to master'
echo #Newline
git checkout master
echo "Merge chaudhary in ^"
git merge chaudhary
echo #Newline
git checkout tanjot
echo "Merge master in ^"
git merge master
echo -e '\nFinal checkout to chaudhary'
git checkout chaudhary
echo -e '\nPushing all to origin'
git push origin --all --verbose
else
echo "Not in branch chaudhary"
fi
|
carts-uiet/cartsbusboarding
|
merge-all.sh
|
Shell
|
gpl-3.0
| 520 |
#!/bin/bash
set -e
dockerImage="bcosorg/bcos:latest"
genConfig()
{
idx=0
miner_path=$PWD/node-0
genesis="false"
minerInfo=""
systemproxyaddress=0x0
if [ $# = 0 ];then
genesis="true"
elif [ $# = 2 ];then
idx=$1
miner_path=$2
if [ ! -f "${miner_path}/config.json" ];then
echo "${miner_path}/config.json doesn't exist."
return -1
fi
minerInfo=`cat ${miner_path}/config.json | tail -n +24|head -n 9`
systemproxyaddress=`cat ${miner_path}/config.json |grep systemproxyaddress| awk -F ':' '{print $2}'|sed 's/\([^0-9a-z]\)//g'`
else
return -1
fi
if [ ${idx} -gt 99 ]; then
echo "Node numbers > 99 isn't supported."
return -1
fi
output=$PWD/node-${idx}
while ([ ${idx} -gt 0 ] && [ -d ${output} ]); do
idx=`expr ${idx} + 1`
output=$PWD/node-${idx}
done
if [ ! -d ${output} ]; then
mkdir ${output}
fi
port=${idx}
if [ ${idx} -lt 10 ]; then
port=`printf "%02d" ${idx}`
fi
echo "------generate node-${idx}------"
if ! id -nG $(whoami)|grep -qw "docker"; then SUDO='sudo'; else SUDO=''; fi
$SUDO docker run -it --rm -v $output:/nodedata ${dockerImage} bcoseth --gennetworkrlp /nodedata/network.rlp
sudo chown -R ${USER} node-${idx}
nodeId=`sudo cat $output/network.rlp.pub`
echo "{
\"sealEngine\": \"PBFT\",
\"systemproxyaddress\":\"${systemproxyaddress}\",
\"listenip\":\"0.0.0.0\",
\"rpcport\":\"355${port}\",
\"p2pport\":\"533${port}\",
\"wallet\":\"/nodedata/keys.info\",
\"keystoredir\":\"/nodedata/keystore/\",
\"datadir\":\"/nodedata/\",
\"vm\":\"interpreter\",
\"networkid\":\"123456\",
\"logverbosity\":\"4\",
\"coverlog\":\"OFF\",
\"eventlog\":\"ON\",
\"logconf\":\"/nodedata/log.conf\",
\"params\": {
\"accountStartNonce\": \"0x0\",
\"maximumExtraDataSize\": \"0x0\",
\"tieBreakingGas\": false,
\"blockReward\": \"0x0\",
\"networkID\" : \"0x0\"
},
\"NodeextraInfo\":[" >$output/config.json
if [ ${genesis} == "false" ];then
cp ${miner_path}/genesis.json node-${idx}
echo " ${minerInfo}," >>$output/config.json
fi
echo " {
\"Nodeid\":\"$nodeId\",
\"Nodedesc\": \"node${idx}\",
\"Agencyinfo\": \"node${idx}\",
\"Peerip\": \"172.17.0.1\",
\"Identitytype\": 1,
\"Port\":533${port},
\"Idx\":${idx}
}
]
}" >>$output/config.json
echo "* GLOBAL:
ENABLED = true
TO_FILE = true
TO_STANDARD_OUTPUT = false
FORMAT = \"%level|%datetime{%Y-%M-%d %H:%m:%s}|%msg\"
FILENAME = \"/nodedata/logs/log_%datetime{%Y%M%d%H}.log\"
MILLISECONDS_WIDTH = 3
PERFORMANCE_TRACKING = false
MAX_LOG_FILE_SIZE = 209715200 ## 200MB - Comment starts with two hashes (##)
LOG_FLUSH_THRESHOLD = 100 ## Flush after every 100 logs
* TRACE:
ENABLED = false
* DEBUG:
ENABLED = false
* FATAL:
ENABLED = false
* ERROR:
FILENAME = \"/nodedata/logs/error_log_%datetime{%Y%M%d%H}.log\"
* WARNING:
ENABLED = false
* INFO:
FILENAME = \"/nodedata/logs/info_log_%datetime{%Y%M%d%H}.log\"
* VERBOSE:
ENABLED = false" > $output/log.conf
echo "{
\"id\":\"$nodeId\",
\"ip\":\"172.17.0.1\",
\"port\":533${port},
\"category\":1,
\"desc\":\"node${idx}\",
\"CAhash\":\"\",
\"agencyinfo\":\"node${idx}\",
\"idx\":${idx}
}" > $output/node.json
if [ ${genesis} == "true" ];then
echo "{
\"nonce\": \"0x0\",
\"difficulty\": \"0x0\",
\"mixhash\": \"0x0\",
\"coinbase\": \"0x0\",
\"timestamp\": \"0x0\",
\"parentHash\": \"0x0\",
\"extraData\": \"0x0\",
\"gasLimit\": \"0x13880000000000\",
\"god\":\"0x4d23de3297034cdd4a58db35f659a9b61fc7577b\",
\"alloc\": {},
\"initMinerNodes\":[\"$nodeId\"]
}" > $output/genesis.json
fi
}
configPath=$PWD
numbers=1
minerPath=$PWD/node-0
if [ $# = 0 ];then
echo "Generate config file to ./node-0"
genConfig
elif [ $# = 2 ];then
numbers=$1
minerPath=$2
for id in $( seq 1 ${numbers} )
do
genConfig ${id} ${minerPath}
done
else
echo "Usage: $0 [numbers of node config files] [node-0 path]"
exit -1
fi
echo "Config files in $configPath"
|
bcosorg/bcos
|
docker/scripts/genConfig.sh
|
Shell
|
gpl-3.0
| 4,551 |
#!/usr/bin/env bash
set -e
set -x
# This script assumes the ASTE binaries and python scripts are in $PATH
# Furthermore, execute in ./contrib or copy precice.xml from there to $PWD
# Download the red blood cell
test -f rbc.vtk || wget "https://people.sc.fsu.edu/~jburkardt/data/vtk/rbc_001.vtk" -O rbc.vtk
# Download the bunny
test -f bunny.vtk || wget "https://www.ece.lsu.edu/xinli/Meshing/Data/bunny.vtk" -O bunny.vtk
# Evaluate the function x+y on the bunny, write to colored.vtk
eval_mesh.py bunny.vtk -o colored.vtk "x + y"
# Decompose both meshes to two procesors
partition_mesh.py colored.vtk -n 2
partition_mesh.py rbc.vtk -n 2
rm -rf colored.dt0 && mv colored colored.dt0
rm -rf rbc.dt0 && mv rbc rbc.dt0
# The result directory of preciceMap needs to exist beforehand
mkdir -p mapped
# Map from the bunny to the red blood cell (yeah, that doesn't really make sense)
mpirun -n 2 preciceMap -v -p A --mesh colored &
mpirun -n 2 preciceMap -v -p B --mesh rbc --output mapped
# Join the output files together to result.vtk,
# recovering the connectivity from the rbc mesh
# and using all 2 partitions of each mesh.
join_mesh.py -o result.vtk -r rbc.dt0 -n 2 mapped
# Measure the difference between the original function and the mapped values
eval_mesh.py result.vtk -d "x + y"
|
precice/aste
|
contrib/demo.sh
|
Shell
|
gpl-3.0
| 1,295 |
#!/usr/bin/env bash
clear
echo ""
echo "Loading $1 project file..."
echo ""
while IFS='' read -r LINE || [[ -n $LINE ]];
do
echo $LINE
arrLINE=(${LINE//" "/ })
key=${arrLINE[0]}
value=${arrLINE[1]}
case $key in
-pr)
PRWD=$value
;;
-i)
INDEX=$value
;;
-d)
READS=$value
;;
-g)
GENOMES=$value
;;
-t)
TRANSCRIPTOMES=$value
;;
-r)
REFGENOME=$value
;;
-mo)
MAPOUT=$value
;;
-ao)
ASSEMOUT=$value
;;
-c)
COMMANDS=$value
;;
-tc)
COMMAND_FILE=$value
;;
-mac)
MAPPING_ASSEMBLY_COMMANDS=$value
;;
-dec)
DIFFEXP_COMMANDS=$value
;;
-s)
SRAFILES=$value
;;
-tf)
TRANSCRIPTOME_FILE=$value
;;
-log)
LOCAL_LOG=$value
;;
-vc)
VCF_COMMANDS=$value
;;
-vo)
VCF_OUT=$value
;;
--cm-out)
CUFFMERGE_OUT=$value
;;
--cd-out)
CUFFDIFF_OUT=$value
;;
-ko)
KALL_OUT=$value
;;
-kc)
KALLISTO_CMD=$value
;;
*)
echo "Bad option"
;;
esac
shift
done < $1
echo "Exporting variables..."
export PRWD=$PRWD; [ -d $PRWD ] || mkdir $PRWD
export INDEX=$INDEX; [ -d $PRWD/$INDEX ] || mkdir $PRWD/$INDEX
export READS=$READS; [ -d $PRWD/$READS ] || mkdir $PRWD/$READS
export GENOMES=$GENOMES; [ -d $PRWD/$GENOMES ] || mkdir $PRWD/$GENOMES
export TRANSCRIPTOMES=$TRANSCRIPTOMES; [ -d $PRWD/$TRANSCRIPTOMES ] || mkdir $PRWD/$TRANSCRIPTOMES
export REFGENOME=$REFGENOME
export COMMANDS=$COMMANDS; [ -d $PRWD/$COMMANDS ] || mkdir $PRWD/$COMMANDS
export SRAFILES=$SRAFILES; [ -f $PRWD/$READS/$SRAFILES ] || touch $PRWD/$READS/$SRAFILES
export COMMAND_FILE=$COMMAND_FILE; [ -f $PRWD/$COMMANDS/$COMMAND_FILE ] || touch $PRWD/$COMMANDS/$COMMAND_FILE
export MAPPING_ASSEMBLY_COMMANDS=$MAPPING_ASSEMBLY_COMMANDS; [ -f $PRWD/$COMMANDS/$MAPPING_ASSEMBLY_COMMANDS ] || touch $PRWD/$COMMANDS/$MAPPING_ASSEMBLY_COMMANDS
export DIFFEXP_COMMANDS=$DIFFEXP_COMMANDS; [ -f $PRWD/$COMMANDS/$DIFFEXP_COMMANDS ] || touch $PRWD/$COMMANDS/$DIFFEXP_COMMANDS
export TRANSCRIPTOME_FILE=$TRANSCRIPTOME_FILE; [ -f $PRWD/$TRANSCRIPTOMES/$TRANSCRIPTOME_FILE ] || touch $PRWD/$TRANSCRIPTOMES/$TRANSCRIPTOME_FILE
export LOCAL_LOG=$LOCAL_LOG; [ -d $PRWD/$LOCAL_LOG ] || mkdir $PRWD/$LOCAL_LOG
export VCF_COMMANDS=$VCF_COMMANDS; [ -f $PRWD/$COMMANDS/$VCF_COMMANDS ] || touch $PRWD/$COMMANDS/$VCF_COMMANDS
export VCF_OUT=$VCF_OUT; [ -d $PRWD/$VCF_OUT ] || mkdir $PRWD/$VCF_OUT
export MAPOUT=$MAPOUT; [ -d $PRWD/$MAPOUT ] || mkdir $PRWD/$MAPOUT
export ASSEMOUT=$ASSEMOUT; [ -d $PRWD/$ASSEMOUT ] || mkdir $PRWD/$ASSEMOUT
export CUFFMERGE_OUT=$CUFFMERGE_OUT; [ -d $PRWD/$CUFFMERGE_OUT ] || mkdir $PRWD/$CUFFMERGE_OUT
export CUFFDIFF_OUT=$CUFFDIFF_OUT; [ -d $PRWD/$CUFFDIFF_OUT ] || mkdir $PRWD/$CUFFDIFF_OUT
export KALL_OUT=$KALL_OUT; [ -d $PRWD/$KALL_OUT ] || mkdir $PRWD/$KALL_OUT
export KALLISTO_CMD=$KALLISTO_CMD
echo "Variables were exported..."
echo "Redirecting to $PRWD"
cd $PRWD
echo "Success! You have been redirected to $PRWD"
echo ""
#__EOF__//~~CAT
|
exseivier/scripts
|
ban-4.0/load.sh
|
Shell
|
gpl-3.0
| 2,914 |
#!/bin/bash
# This script is meant to be called by the "install" step defined in
# .travis.yml. See http://docs.travis-ci.com/ for more details.
# The behavior of the script is controlled by environment variabled defined
# in the .travis.yml in the top level folder of the project.
set -e
# Check if we are running Python 2 or 3. This is needed for the apt-get package names
if [[ $TRAVIS_PYTHON_VERSION == '3.4' ]]; then
export PYTHON_SUFFIX="3"
fi
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
sudo apt-get -qq update
sudo apt-get -qq install g++-4.8
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 90
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90
sudo apt-get install -qq python${PYTHON_SUFFIX}-pip python${PYTHON_SUFFIX}-numpy python${PYTHON_SUFFIX}-sphinx python${PYTHON_SUFFIX}-nose
# matplotlib and PyTables are not available for Python 3 as packages from the main repo yet.
if [[ $TRAVIS_PYTHON_VERSION == '2.7' ]]; then
time sudo apt-get install -qq python${PYTHON_SUFFIX}-matplotlib python${PYTHON_SUFFIX}-tables
fi
pip install uncertainties
# Install the ROOT binary
ROOT_BUILD=ROOT-${ROOT}_Python-${TRAVIS_PYTHON_VERSION}_GCC-4.8_x86_64
time wget --no-check-certificate https://copy.com/rtIyUdxgjt7h/ci/root_builds/${ROOT_BUILD}.tar.gz
time tar zxf ${ROOT_BUILD}.tar.gz
mv ${ROOT_BUILD} root
source root/bin/thisroot.sh
# Install the master branch of root_numpy
git clone https://github.com/rootpy/root_numpy.git && (cd root_numpy && python setup.py install --user)
|
mverzett/rootpy
|
ci/install.sh
|
Shell
|
gpl-3.0
| 1,549 |
#!/bin/bash
echo "$1" | grep -q "^[1-9]$" || { echo "Usage: `basename $0` <1|2|3|..|9>"; exit 0; }
[ "$2" != "" ] && DURATION_S=$2 || DURATION_S=200
OPT="-i 10 -N64 -B"
OPT="$OPT -u -R1.1M"
BS=32
BS=736
BS=1400
BS=1472
BS=1473
BS=2944
BS=2945
BS=31072 # ?!
BS=64768
BS=64769
BS=65507
BS=200
BS_DD=512k
DIRECT="oflag=direct"
CLIENT_IP=192.168.100.1
date
set -x
#taskset 0x4 nuttcp $OPT -T${DURATION_S} -P5${1}00 -p5${1}01 -r -fparse -l$BS $CLIENT_IP
taskset 0x4 nuttcp $OPT -T${DURATION_S} -P5${1}00 -p5${1}01 -r -fparse -s -l$BS $CLIENT_IP | dd of=d${1}.dat bs=$BS_DD $DIRECT
date
|
joymarquis/mscc
|
projects/sp/utils/perf/nuttcp/tsa_s.sh
|
Shell
|
gpl-3.0
| 589 |
# DEL key
bindkey "${terminfo[kdch1]}" delete-char
# Fish autosuggestions
bindkey '^N' up-line-or-beginning-search
bindkey '^P' down-line-or-beginning-search
# Vi Mode
export KEYTIMEOUT=1
bindkey -v
# Fix backspace after leaving vi mode
bindkey '^?' backward-delete-char
# Fix delete last word after leaving vi mode
fix() {
CURSOR=0
zle .vi-insert
CURSOR=$1
}
vi-insert() { fix $CURSOR }
vi-add-next() { fix $(($CURSOR + 1)) }
vi-add-eol() { fix $#BUFFER }
zle -N vi-insert
zle -N vi-add-next
zle -N vi-add-eol
# Allow replacing inside of parenthesis/quotes
autoload -U select-quoted select-bracketed surround
zle -N select-quoted
zle -N select-bracketed
zle -N delete-surround surround
zle -N change-surround surround
for m in visual viopp; do
for c in {a,i}{\',\",\`}; do
bindkey -M $m $c select-quoted
done
for c in {a,i}${(s..)^:-'()[]{}<>bB'}; do
bindkey -M $m $c select-bracketed
done
done
bindkey -a cs change-surround
bindkey -a ds delete-surround
|
UndeadLeech/dotfiles
|
files/zsh/keys.zsh
|
Shell
|
gpl-3.0
| 974 |
#!/bin/bash
echo "### test global config - must run alone so the global config does not confuse others"
echo /etc/parallel/config | sudo parallel "mkdir -p /etc/parallel; echo --tollef > "
parallel -k echo -- 1 2 3 ::: a b c
parallel -k --gnu echo ::: 1 2 3 -- a b c
echo --gnu > ~/.parallel/config
parallel -k echo ::: 1 2 3 -- a b c
parallel -k --gnu echo ::: 1 2 3 -- a b c
sudo rm /etc/parallel/config
rm ~/.parallel/config
echo "<<< End test global config - must run alone so the global config does not confuse others"
|
thinrope/GNU_parallel
|
testsuite/tests-to-run/parallel-local152.sh
|
Shell
|
gpl-3.0
| 541 |
#/bin/sh
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd $DIR
cd ../
echo "- Utility Function tests"
./esstoolkit/tests/runtest_macos.sh esstoolkit.tests.test_utility_functions
echo "- Gate Transformer tests"
./esstoolkit/tests/runtest_macos.sh esstoolkit.tests.test_gate_transformer
echo "- Network Segmenter tests"
./esstoolkit/tests/runtest_macos.sh esstoolkit.tests.test_segmenter
echo "- Road Centerline Cleaner tests"
./esstoolkit/tests/runtest_macos.sh esstoolkit.tests.test_rcl_cleaner
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
scripts/runTestsMacOS.sh
|
Shell
|
gpl-3.0
| 521 |
#!/usr/bin/env bash
dpkg-buildpackage -F -I*.userprefs -Iobj -I.git* -Ibin -Idebian -I*.sh -I*.png
|
ce3a/indicator-stocks
|
package-deb.sh
|
Shell
|
gpl-3.0
| 100 |
:
# ----------------------------------
# $APPASERVER_HOME/utility/du_gig.sh
# ----------------------------------
if [ "$#" -eq 1 ]
then
cd $1
fi
find . -xdev -type d -print 2>/dev/null |
xargs.e 'du -hxs "{}" 2>/dev/null' |
grep '^[1-9][0-9]*\.*[0-9]*G' |
sort -nr |
head -20
exit 0
|
timhriley/appaserver
|
utility/du_gig.sh
|
Shell
|
gpl-3.0
| 298 |
#!/bin/bash
#cmp_or_quit expected returned
function cmp_or_quit {
cmp $1 $2
if [[ "$?" == 0 ]]; then
rm $2
else
exit
fi
}
echo "test paired:"
python2.6 ../demultadapt.py -l 1.0 -f indi_A_1.fastq -F indi_A_2.fastq -p returned -v adaptateur.txt
echo "check:"
cmp_or_quit expected-indiv_1_1.fastq returned-indiv_1_1.fastq
cmp_or_quit expected-indiv_1_2.fastq returned-indiv_1_2.fastq
cmp_or_quit expected-indiv_2_1.fastq returned-indiv_2_1.fastq
cmp_or_quit expected-indiv_2_2.fastq returned-indiv_2_2.fastq
cmp_or_quit expected-rebut_1.fastq returned-rebut_1.fastq
cmp_or_quit expected-rebut_2.fastq returned-rebut_2.fastq
echo "TEST PASS"
echo "test single"
python2.6 ../demultadapt.py -l 1.0 -f indi_A_1.fastq -p returned -v adaptateur.txt
echo "check:"
cmp_or_quit expected-indiv_1.fastq returned-indiv_1.fastq
cmp_or_quit expected-indiv_2.fastq returned-indiv_2.fastq
cmp_or_quit expected-rebut.fastq returned-rebut.fastq
echo "TEST PASS"
|
SouthGreenPlatform/arcad-hts
|
tests/demultadapt/test.sh
|
Shell
|
gpl-3.0
| 992 |
#!/bin/bash
# List of langs supported by Paperwork. Langs are separated by spaces.
# For each language, the most common system locale and its short writing
# must be specified (separated by ':')
LANGS="de_DE.UTF-8:de
es_ES.UTF-8:es
fr_FR.UTF-8:fr
uk_UA.UTF-8:uk"
usage()
{
echo "usage:" >&2
echo " $0 (upd-po|gen-mo)" >&2
echo "" >&2
echo " upd-po: Will generate or update .po files" >&2
echo " gen-mo: Will use .po files to regenerate the .mo file" >&2
echo
echo "Usual steps to update translations are:"
echo "1) upd-po"
echo "2) Edit locale/<lang>.po (look for the \"fuzzy\" keyword and empty strings !)"
echo "3) gen-mo"
echo "4) commit"
exit 1
}
if [ -z "${BACKEND_DIRECTORY}" ] ; then
BACKEND_DIRECTORY=$(python3 -c "import paperwork_backend; print(paperwork_backend.__file__)")
BACKEND_DIRECTORY=$(dirname "${BACKEND_DIRECTORY}")
fi
if ! [ -d src ]
then
echo "$0: Must be run from the root of the paperwork source tree" >&2
exit 2
fi
if [ "$1" = "--help" ] || [ "$1" = "-h" ]
then
usage
exit 0
elif [ "$1" = "upd-po" ]
then
tmpdir=""
echo "[paperwork-backend] Will look for backend sources in ${BACKEND_DIRECTORY}"
echo "[paperwork-backend] Please set BACKEND_DIRECTORY if not correct"
if [[ $(dirname "${BACKEND_DIRECTORY}") == *egg ]]; then
echo "[paperwork-backend] Egg file detected. Extracting ..."
tmpdir="$(mktemp -d)"
unzip $(dirname "${BACKEND_DIRECTORY}") -d "${tmpdir}" > /dev/null
BACKEND_DIRECTORY="${tmpdir}/$(basename ${BACKEND_DIRECTORY})"
echo "[paperwork-backend] Extraction done."
fi
#if [ ! -e "${BACKEND_DIRECTORY}/__init__.py" ] ; then
# echo "[paperwork-backend] paperwork backend sources not found !"
# exit 1
#fi
mkdir -p locale
rm -f locale/messages.pot
for glade_file in \
$(find src/paperwork/frontend -name \*.glade) \
$(find src/paperwork/frontend -name \*.xml)
do
echo "${glade_file} --> .(glade|xml).h ..."
if ! intltool-extract --type=gettext/glade ${glade_file} > /dev/null; then
echo "intltool-extract Failed ! Unable to extract strings to translate from .glade files !"
exit 2
fi
done
echo "*.py + *.glade.h --> locale/messages.pot"
xgettext -k_ -kN_ -o locale/messages.pot \
$(find src/paperwork -name \*.py ! -path src/paperwork/frontend/labeleditor/__init__.py) \
$(find "${BACKEND_DIRECTORY}" -name \*.py) \
$(find src/paperwork/frontend -name \*.glade.h) \
$(find src/paperwork/frontend -name \*.xml.h) \
> /dev/null
if [ $? -ne 0 ]; then
echo "xgettext failed ! Unable to extract strings to translate !"
exit 3
fi
rm -f $(find src/paperwork/frontend -name \*.glade.h)
rm -f $(find src/paperwork/frontend -name \*.xml.h)
for lang in ${LANGS}
do
locale=$(echo $lang | cut -d: -f1)
po_file=locale/$(echo $lang | cut -d: -f2).po
if ! [ -f ${po_file} ]
then
echo "locale/messages.pot --> ${po_file} (gen)"
msginit --no-translator -l ${locale} -i locale/messages.pot -o ${po_file} > /dev/null
else
echo "locale/messages.pot --> ${po_file} (upd)"
msgmerge -U ${po_file} locale/messages.pot > /dev/null
fi
if [ $? -ne 0 ] ; then
echo "msginit / msgmerge failed ! Unable to create or update .po file !"
exit 4
fi
done
if [ -n "${tmpdir}" ]; then
echo "[paperwork-backend] Deleting temporary directory ..."
rm -rf "${tmpdir}"
fi
echo "Done"
exit 0
elif [ "$1" = "gen-mo" ]
then
for lang in ${LANGS}
do
long_locale=$(echo $lang | cut -d: -f1)
short_locale=$(echo $lang | cut -d: -f2)
po_file="locale/${short_locale}.po"
locale_dir=locale/${short_locale}/LC_MESSAGES
echo "${po_file} --> ${locale_dir}/paperwork.mo"
rm -rf local/${short_locale}
mkdir -p ${locale_dir}
if ! msgfmt ${po_file} -o ${locale_dir}/paperwork.mo ; then
echo "msgfmt failed ! Unable to update .mo file !"
exit 5
fi
done
echo "Done"
exit 0
else
usage
exit 1
fi
|
openpaperwork/paperwork
|
paperwork-gtk/localize.sh
|
Shell
|
gpl-3.0
| 3,847 |
#!/bin/sh
# manuell:
# for D in $(ls -l /var/www/$SUB | grep ^d | while read LINE; do set -- $LINE; echo $9; done); do du -sh "/var/www/$D" ; done
# SUB='networks'; for D in $(ls -l /var/www/$SUB | grep ^d | while read LINE; do set -- $LINE; echo $9; done); do du -sh "/var/www/$SUB/$D"; done
BASEDIR="${1:-/var/www/networks}"
OPTION="$2" # e.g. 'all' or 'whatever'
MAX_SIZE="5M" # find-syntax
[ -z "$OPTION" ] && {
echo "# omitting all *-vds-* files, call with '$0 \"\" all' to show everything"
echo "Usage: $0 <basedir> <option> <size>"
echo " e.g.: $0 /var/www/networks whatever 10M"
exit 1
}
for DIR in $( ls -1 "$BASEDIR" ); do {
find 2>/dev/null "$BASEDIR/$DIR" -type f -size +$MAX_SIZE |
while read -r LINE; do {
case "$LINE" in
*"-vds"*|*".ulog"*)
[ "$OPTION" = "all" ] && ls -lh "$LINE"
;;
*)
ls -lh "$LINE" # h = humanreadable filesize
;;
esac
} done
} done
list_networks()
{
find /var/www/networks/ -type d -name registrator | cut -d'/' -f5 | sort
}
show_megabytes_only()
{
while read -r LINE; do {
set -- $LINE
case "$1" in
*'M'|*'G')
echo $LINE
;;
esac
} done
}
echo "[START] vds"
for NETWORK in $( list_networks ); do {
DIR="/var/www/networks/$NETWORK/vds"
du -sh "$DIR" | show_megabytes_only
} done
echo "[READY] vds"
echo
echo "[START] size network"
for NETWORK in $( list_networks ); do {
DIR="/var/www/networks/$NETWORK"
du -sh "$DIR" | show_megabytes_only
} done
echo "[READY] size network"
echo
echo "[START] size media"
for NETWORK in $( list_networks ); do {
DIR="/var/www/networks/$NETWORK/media"
du -sh "$DIR" | show_megabytes_only
} done
echo "[READY] size media"
echo
echo "[START] size special" # ls -1 | grep -v '01_' | while read LINE; do rm $LINE; done
for DIR in /root/backup/ejbw/pbx foo; do {
[ -d "$DIR" ] && du -sh "$DIR" | show_megabytes_only
} done
echo "[READY] size media"
|
bittorf/kalua
|
openwrt-monitoring/show_bigfiles.sh
|
Shell
|
gpl-3.0
| 1,923 |
#!/bin/sh
# Use in mingw shell
# KEIL 5 ARM MDK must be installed at: PATH_KEILARM
# flash_write_loader D:/Projects/EdiabasLib/EdiabasLib/CanAdapterElm/Other/Bk3231Flash/write_flash.bin
arm_path=$(echo "/$PATH_KEILARM/ARMCC/bin/" | sed 's/\\/\//g' | sed 's/://')
export PATH=$arm_path:$PATH
make -k -B
|
uholeschak/ediabaslib
|
EdiabasLib/CanAdapterElm/Other/Bk3231Flash/bk3231_make.sh
|
Shell
|
gpl-3.0
| 303 |
# Common utility EC2 functions. Meant to be sourced by other scripts
# Depends on:
# - ec2-api-tools
# - awk
# execute OUPUT_VAR "some command and its opts"
# the output will be flatenned to one line
INSTANCE_POSSIBLE_STATUS="pending running shutting-down terminated stopping stopped"
DATE_REGEXP='^20[0-9][0-9]-[0-1][0-9]-[0-3][0-9]T'
function execute
{
echo "Executing: $2" >&2
export ${1}="$(eval $2)"
}
# print SOME_TEXT
function print
{
echo -en "\033[0;36m############################################################\n"
echo -en "##### $1""\033[0m \n"
}
# print_error SOME_TEXT
function print_error
{
echo -e "$1" >&2
}
# usage USAGE_DESCRIPTION
function usage
{
print_error "$1"
exit 1
}
# check_for_runtime_value VARIABLE_TO_CHECK
function check_for_runtime_value {
eval RUNTIME_VALUE="\$$1"
if [ -z "$RUNTIME_VALUE" ] ; then
echo -en "Couldn't find a proper value for \033[1;31m$1\033[0m, exiting now...\n" >&2
exit 1
else
echo -en "Found \033[1;33m$RUNTIME_VALUE\033[0m as the value for $1 \n"
fi
}
# search_by_regexp RESULT_VAR_NAME SOME_TEXT REGEXP_TO_SEARCH_IN_SOMETEXT
function search_by_regexp
{
local result=`echo "$2" | awk -v regexp="$3" '{ for (i=1; i<=NF; i++) if ($i ~ regexp) print $i }'`
export ${1}="$result"
}
# create_or_append_to_var VAR_NAME TEXT_TO_APPEND [ SEPARATOR=' ']
function create_or_append_to_var
{
SEPARATOR="$3"
if [ -z "$SEPARATOR" ] ; then SEPARATOR=' ' ; fi
CURRENT_VAR_VALUE=$(eval echo "\$$1")
if [ -z "$CURRENT_VAR_VALUE" ]; then
local result="$2"
else
local result="$CURRENT_VAR_VALUE""$SEPARATOR""$2"
fi
export ${1}="$result"
}
function missing_param {
echo "Missing mandatory parameter $1" >&2
usage "$USAGE_DESCRIPTION"
exit 1
}
function check_given_mandatory_params {
IFS=$' '
for PARAM in "$@" ; do
eval "PARAM_VALUE=\$$PARAM"
if [ -z "$PARAM_VALUE" ] ; then
missing_param "$PARAM"
fi
done
}
# var_not_empty_or_fail BUCKET_NAME "The bucket can't be empty!"
function var_not_empty_or_fail
{
eval local VAR_VALUE=\$$1
if [ -z $"$VAR_VALUE" ] ; then
print_error "$2"
usage "$USAGE_DESCRIPTION"
fi
}
function print_ec2_vars
{
for i in EC2_PRIVATE_KEY EC2_CERT EC2_URL ; do
VAR_VALUE="$(eval echo \$$i)"
echo "Using $i=$VAR_VALUE" >&2
done
}
EC2_PARAMS_DESC="[ -O aws_access_key ] [ -W aws_secret_key ] [ -U URL ]"
EC2_PARAMS_OPTS="O:W:U:"
# parse_common_ec2_params SOME_PARAM SOME_VALUE
function parse_common_ec2_param
{
case $1 in
O)
export AWS_ACCESS_KEY="$OPTARG"
;;
W)
export AWS_SECRET_KEY="$OPTARG"
;;
U)
export EC2_URL="$OPTARG"
;;
*)
return 1
;;
esac
}
|
ali-shaikh1190/workarounds
|
aws-bash-tools/aws-common.sh
|
Shell
|
gpl-3.0
| 2,769 |
#!/bin/bash
#------------------------------------------------------------------------------------------------------
# SolarPi - Installation script
#------------------------------------------------------------------------------------------------------
# (C) 2017 Dominik Schäfer - [email protected]
# This file is executed after the successful installation of DietPi OS.
# Solarcoind is downloaded, compiled and configured as a service.
# You may select a custom RPC password here.
# If you don't, a strong random password is generated (recommended).
# As a normal user you won't need the RPC password anyway.
# This file will be deleted after the installation is completed.
# Nevertheless, it's never a good idea to store any password in plain text.
# It is more secure to edit ~/.solarcoin/solarcoin.conf after the installation.
rpcpassword=0
# NO CHANGES BELOW THIS LINE UNLESS YOU KNOW WHAT YOU'RE DOING!
# Start setup
echo "Starting installation process..." > ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
# Update OS
cd
echo "Updating OS..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
apt-get update
apt-get upgrade -y
# Increase swap size
echo "Increasing swap size..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
sed -i "s/\(CONF_SWAPSIZE *= *\).*/\11024/" /etc/dphys-swapfile
dphys-swapfile setup
dphys-swapfile swapon
# Install dependencies
echo "Installing dependencies..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
apt-get install autoconf libevent-dev libtool libssl-dev libboost-all-dev libminiupnpc-dev libdb-dev libdb4.8++ libdb5.3++-dev git hardening-includes rng-tools g++ make -y
# Build BerkeleyDB
echo "Downloading BerkeleyDB..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
wget http://download.oracle.com/berkeley-db/db-4.8.30.NC.tar.gz
sudo tar -xzvf db-4.8.30.NC.tar.gz
cd db-4.8.30.NC/build_unix
../dist/configure --enable-cxx --disable-shared
echo "Building BerkeleyDB..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
make
echo "Installing BerkeleyDB..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
make install
export CPATH="/usr/local/BerkeleyDB.4.8/include"
export LIBRARY_PATH="/usr/local/BerkeleyDB.4.8/lib"
# Build solarcoind
cd
echo "Downloading solarcoind..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
git clone https://github.com/onsightit/solarcoin.git
cd db-4.8.30.NC/build_unix
../dist/configure --prefix=/usr/local --enable-cxx --disable-shared
cd
cd solarcoin/src
echo "Building solarcoind..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
make -f makefile.unix -e PIE=1
strip solarcoind
hardening-check solarcoind >> ~/solarcoind_setup.log
echo "Installing solarcoind..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
install -m 755 solarcoind /usr/local/bin/solarcoind
cd
solarcoind
echo "Installation process complete! =)" >> ~/solarcoind_setup.log
# Create config file
echo "Creating config file..." >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
# Generate random password if no custom password is set.
if [ $rpcpassword = 0 ]; then
rpcpassword=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c${1:-64};echo;)
fi
cd ~/.solarcoin/
# Write config file
/bin/cat <<EOM >solarcoin.conf
addnode=162.243.214.120
server=1
daemon=1
rpcuser=solarcoinrpc
rpcpassword=$rpcpassword
listen=1
EOM
chmod 400 solarcoin.conf
sleep 1
# Set up solarcoind as a service
/bin/cat <<EOM >/etc/systemd/system/solarcoind.service
[Unit]
Description=SolarCoin daemon services
After=tlp-init.service
[Service]
Type=forking
ExecStart=/usr/local/bin/solarcoind
PIDFile=/root/.solarcoin/solarcoind.pid
RemainAfterExit=yes
Restart=on-failure
RestartSec=3
User=root
[Install]
WantedBy=multi-user.target
EOM
systemctl enable solarcoind.service
systemctl daemon-reload
systemctl start solarcoind.service
echo "solarcoind is now ready to operate! =)" >> ~/solarcoind_setup.log
echo "Please encrypt your wallet now!" >> ~/solarcoind_setup.log
date >> ~/solarcoind_setup.log
# Clean up
rm -rf db-4.8.30.NC*
rm solarcoind_setup.sh
rm /boot/AUTO_CustomScript.sh
rm /boot/Automation_Custom_Script.sh
|
Dommsko/SolarPi
|
Automation_Custom_Script.sh
|
Shell
|
gpl-3.0
| 4,199 |
#!/bin/bash
DIR=`dirname $0`/
TRANSLATED=${DIR}temp/
mkdir -p ${TRANSLATED}
# $1 contains the input filename (the name of the TLSF-file).
# we need to call syfco to create the ltl and part files for acacia
INPUT_FILE=$1
BASE_FILE=$(basename $INPUT_FILE)
BASE_FILE_NOEXT="${BASE_FILE%.*}"
COMMAND="${DIR}binary/syfco -f acacia-specs -o ${TRANSLATED}${BASE_FILE}.ltl -pf ${TRANSLATED}${BASE_FILE}.part ${INPUT_FILE}"
$COMMAND
# we can now feed the files to acacia and determine if it is realizable or not
COMMAND="python ${DIR}binary/acacia_plus.py --ltl ${TRANSLATED}${BASE_FILE} --part ${TRANSLATED}${BASE_FILE}.part --player 1 --check BOTH --v 0 --kbound 5000 --kstep 1"
$COMMAND
res=$?
if [[ $res == 10 ]]; then
echo "REALIZABLE"
elif [[ $res == 20 ]]; then
echo "UNREALIZABLE"
elif [[ $res == 15 ]]; then
echo "Acacia does not know :("
else
echo "Strange exit code ${res}"
fi
exit $res
|
gaperez64/acacia4aiger
|
par_real_conf1.sh
|
Shell
|
gpl-3.0
| 905 |
#!/bin/bash
# Script to generate field with an low amplitude traveling pulse
# to test the nonreflecting boundary condition
set -eu
Nx=1
Ny=96
k=6
htdelta=1
Nz=1
bulk_rho=NaN
# Generate a restart file and then modify it with an initial pulse
filename="pulse_perfect_physical.h5"
../perfect_initial --clobber ${filename} \
--Nx=$Nx --Ny=$Ny --k=$k --htdelta=$htdelta --Nz=$Nz \
--restart_physical --bulk_rho=${bulk_rho}
./pulse_perfect.py "$filename"
|
RhysU/suzerain
|
apps/perfect/manual.nrbc/pulse_perfect_init.sh
|
Shell
|
gpl-3.0
| 512 |
#!/bin/sh
# Author: Eray Ozkural <[email protected]>
pwd
PATH=$PATH:.
set -x -e
pisi-cli -Dtmp -E --ignore-build-no build tests/zip/pspec.xml tests/unzip/pspec.xml
pisi-cli -Dtmp --yes-all --ignore-comar install unzip-5.50-1.pisi zip-2.3-1.pisi
mkdir -p myrepo
cd myrepo
mkdir -p tmp
../pisi-cli -Dtmp -E --ignore-build-no build ../tests/zip2/pspec.xml ../tests/unzip2/pspec.xml
cd ..
pisi-cli -Dtmp --absolute-uris index myrepo
pisi-cli -Dtmp remove-repo repo1
pisi-cli -Dtmp add-repo repo1 pisi-index.xml
pisi-cli -Dtmp list-repo
pisi-cli -Dtmp update-repo repo1
pisi-cli -Dtmp list-available
pisi-cli -Dtmp --install-info list-installed
pisi-cli -Dtmp list-upgrades
pisi-cli -Dtmp --ignore-comar upgrade zip
pisi-cli -Dtmp --install-info list-installed
|
examachine/pisi
|
tests/upgrade.sh
|
Shell
|
gpl-3.0
| 761 |
#!/bin/bash
set -euo pipefail
script_dir="$(readlink -f "$(dirname "$0")")"
reformat() {
expand -t 4 "$1" | clang-format > "$1.__reformat-tmp"
mv "$1.__reformat-tmp" "$1"
}
git diff --name-only -z '*.cpp' '*.h' |
while read -r -d '' file; do
"$script_dir/fix-includes.pl" "$file"
reformat "$file"
done
# Note: perl -i on cygwin always creates bak files.
perl -i "$script_dir/fix-cmakelists.pl" "$script_dir/CMakeLists.txt" && rm -f "$script_dir/CMakeLists.txt.bak"
|
MHeasell/rwe
|
format-changed.sh
|
Shell
|
gpl-3.0
| 486 |
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#
#
# - Updated by Shannon Mitchell([email protected]) on
# 30-dec-2011. Added a check for existing permissions before making a change.
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-22319
#Group Title: GEN001362
#Rule ID: SV-26395r1_rule
#Severity: CAT II
#Rule Version (STIG-ID): GEN001362
#Rule Title: The /etc/resolv.conf file must be owned by root.
#
#Vulnerability Discussion: The resolv.conf (or equivalent) file configures the system's DNS resolver. DNS is used to resolve host names to IP addresses. If DNS configuration is modified maliciously, host name resolution may fail or return incorrect information. DNS may be used by a variety of system security functions such as time synchronization, centralized authentication, and remote system logging.
#
#
#Responsibility: System Administrator
#IAControls: ECLP-1
#
#Check Content:
#Check that the /etc/resolv.conf file is owned by root.
# ls -l /etc/resolv.conf
#If the file is not owned by root, this is a finding.
#
#Fix Text: Change the owner of the /etc/resolv.conf file to root.
# chown root /etc/resolv.conf
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN001362
#Start-Lockdown
if [ -a "/etc/resolv.conf" ]; then
echo '==================================================='
echo ' Patching GEN001362: /etc/resolve.conf Ownership'
echo '==================================================='
CUROWN=`stat -c %U /etc/resolv.conf`;
if [ "$CUROWN" != "root" ]; then
chown root /etc/resolv.conf
fi
fi
|
jason-callaway/stig-fix
|
cat2/gen001362.sh
|
Shell
|
gpl-3.0
| 2,322 |
#!/bin/bash
#Here is a sample custom api script.
#This file name is "dns-myapi.sh"
#So, here must be a method dns-myapi-add()
#Which will be called by le.sh to add the txt record to your api system.
#returns 0 meanst success, otherwise error.
######## Public functions #####################
#Usage: add _acme-challenge.www.domain.com "XKrxpRBosdIKFzxW_CT3KLZNf6q0HG9i01zxXp5CPBs"
dns-myapi-add() {
fulldomain=$1
txtvalue=$2
_err "Not implemented!"
return 1;
}
#################### Private functions bellow ##################################
_debug() {
if [ -z "$DEBUG" ] ; then
return
fi
if [ -z "$2" ] ; then
echo $1
else
echo "$1"="$2"
fi
}
_info() {
if [ -z "$2" ] ; then
echo "$1"
else
echo "$1"="$2"
fi
}
_err() {
if [ -z "$2" ] ; then
echo "$1" >&2
else
echo "$1"="$2" >&2
fi
}
|
colegatron/le
|
dnsapi/dns-myapi.sh
|
Shell
|
gpl-3.0
| 874 |
#!/bin/bash
#
# This script create a flat list of all content items, then use that list to
# construct a JSON object string for publishing. To use this script:
#
# 1) Copy the work-area folder of a site via WebDAV to the local machine.
#
# 2) While in that local work-area folder, run this script like this:
#
# ~/cstudio-2-2-x/alfresco-svcs/get-pub-list.sh > ~/pub-list.json
#
# 3) Then submit the JSON object string for publishing:
#
# curl -d @$HOME/pub-list.json -u admin:admin -H "Content-Type: application/json" http://localhost:8080/alfresco/service/cstudio/wcm/workflow/go-live?site=test-site
#
# Be sure to specify the matching site name at the very end of the URI.
#
echo '{"items":['
IFS=$'\n'
for f in `find * -type f`; do
echo ' {'
for s in assets children components documents deletedItems renderingTemplates levelDescriptors; do
echo " \"${s}\":[],"
done
echo ' "deleted":false,'
echo ' "now":false,'
echo ' "scheduledDate":"",'
echo ' "submittedForDeletion":false,'
echo ' "submitted":false,'
echo ' "inProgress":true,'
echo ' "reference":false,'
echo " \"uri\":\"/${f}\","
echo ' "user":""'
echo ' },'
done
echo '],'
echo '"submissionComment":"",'
echo '"publishChannel":{"index":"0","name":"Sample Group"},'
echo '"status":{"channels":[],"message":""},'
echo '"now":"true",'
echo '"scheduledDate":""}'
|
hlim/studio
|
alfresco-svcs/get-pub-list.sh
|
Shell
|
gpl-3.0
| 1,381 |
#! /bin/bash
function ngc-compile() {
ghc -Wall -O2 -o ngc Main.hs -prof -v
}
|
R-Morgan/NGramCrackers
|
src/compile.sh
|
Shell
|
agpl-3.0
| 80 |
#!/bin/bash
CONTAINER="golang:1.8-alpine"
SOURCES="$GOPATH/src"
TARGET="$GOPATH/bin/alpine"
ATTRS="`bash version.sh`"
rm -rf $TARGET exchange
mkdir -p $TARGET/bin $TARGET/pkg
/usr/bin/docker run --rm --user `id -u $USER`:`id -g $USER` \
--volume ${SOURCES}:/usr/p/src:ro \
--volume $TARGET/pkg:/usr/p/pkg \
--volume ${TARGET}/bin:/usr/p/bin \
--workdir /usr/p/src/github.com/z0rr0/exchange \
--env GOPATH=/usr/p \
${CONTAINER} go install -v -ldflags "${ATTRS}" github.com/z0rr0/exchange
if [[ $? -gt 0 ]]; then
echo "ERROR: build container"
exit 1
fi
cp -v $TARGET/bin/exchange ./
|
z0rr0/exchange
|
container.sh
|
Shell
|
agpl-3.0
| 593 |
#!/bin/bash
DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. $DIR/set_partition_vars.sh
# data & docker - important names, used in write_fstab.sh
btrfs subvolume create ${STORAGE_MOUNTPOINT}/data
btrfs subvolume create ${STORAGE_MOUNTPOINT}/docker
# potentially stop all running containers
docker_pids=$(docker ps -a -q)
if [[ ! -z "${docker_pids}" ]]; then
docker stop $docker_pids
fi
# stop docker if exists
service docker stop
# if paths exist, move their contents to a /tmp folder
rm -rf /tmp/cf-data && mkdir -p /tmp/cf-data/
if [ -d $CLOUDFLEET_DATA_PATH ] ; then
rsync -avz ${CLOUDFLEET_DATA_PATH}/ /tmp/cf-data/
fi
# in case it was on an external drive
umount /var/lib/docker
# create paths if they don't exist
mkdir -p $CLOUDFLEET_DATA_PATH
mkdir -p $DOCKER_DATA_PATH
# These partitions are now marked noauto
sync
mount $DOCKER_DATA_PATH
if [ $? -ne 0 ]; then
echo "There was an error mounting $DOCKER_DATA_PATH. Won't delete anything."
exit 1
fi
sync
mount $CLOUDFLEET_DATA_PATH
if [ $? -ne 0 ]; then
echo "There was an error mounting $CLOUDFLEET_DATA_PATH. Won't delete anything."
exit 1
fi
sync
# move data back
rsync -avz /tmp/cf-data/ ${CLOUDFLEET_DATA_PATH}/
sync
# once again to be sure
mount /var/lib/docker
# write the new docker sysv/upstart/systemd options that use btrfs
$DIR/write_docker_opts.sh
# resume docker
service docker restart
exit
|
cloudfleet/blimp-engineroom
|
bin/cryptpart/create_btrfs_partitions.sh
|
Shell
|
agpl-3.0
| 1,411 |
#!/usr/bin/env bash
#===============================================================================
# FILE: transmission.sh
#
# USAGE: ./transmission.sh
#
# DESCRIPTION: Entrypoint for transmission docker container
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: David Personette ([email protected]),
# ORGANIZATION:
# CREATED: 09/28/2014 12:11
# REVISION: 1.0
#===============================================================================
set -o nounset # Treat unset variables as an error
dir="/var/lib/transmission-daemon"
### timezone: Set the timezone for the container
# Arguments:
# timezone) for example EST5EDT
# Return: the correct zoneinfo file will be symlinked into place
timezone() { local timezone="${1:-EST5EDT}"
[[ -e /usr/share/zoneinfo/$timezone ]] || {
echo "ERROR: invalid timezone specified: $timezone" >&2
return
}
if [[ $(cat /etc/timezone) != $timezone ]]; then
echo "$timezone" > /etc/timezone
ln -sf /usr/share/zoneinfo/$timezone /etc/localtime
dpkg-reconfigure -f noninteractive tzdata >/dev/null 2>&1
fi
}
### usage: Help
# Arguments:
# none)
# Return: Help text
usage() { local RC=${1:-0}
echo "Usage: ${0##*/} [-opt] [command]
Options (fields in '[]' are optional, '<>' are required):
-h This help
-t \"\" Configure timezone
possible arg: \"[timezone]\" - zoneinfo timezone for container
The 'command' (if provided and valid) will be run instead of transmission
" >&2
exit $RC
}
cd /tmp
while getopts ":ht:" opt; do
case "$opt" in
h) usage ;;
t) timezone "$OPTARG" ;;
"?") echo "Unknown option: -$OPTARG"; usage 1 ;;
":") echo "No argument value for option: -$OPTARG"; usage 2 ;;
esac
done
shift $(( OPTIND - 1 ))
[[ "${TZ:-""}" ]] && timezone "$TZ"
[[ "${USERID:-""}" =~ ^[0-9]+$ ]] && usermod -u $USERID debian-transmission
[[ "${GROUPID:-""}" =~ ^[0-9]+$ ]] && usermod -g $GROUPID debian-transmission
[[ -d $dir/downloads ]] || mkdir -p $dir/downloads
[[ -d $dir/incomplete ]] || mkdir -p $dir/incomplete
[[ -d $dir/info/blocklists ]] || mkdir -p $dir/info/blocklists
chown -Rh debian-transmission. $dir 2>&1 | grep -iv 'Read-only' || :
if [[ $# -ge 1 && -x $(which $1 2>&-) ]]; then
exec "$@"
elif [[ $# -ge 1 ]]; then
echo "ERROR: command not found: $1"
exit 13
elif ps -ef | egrep -v 'grep|transmission.sh' | grep -q transmission; then
echo "Service already running, please restart container to apply changes"
else
url='http://list.iblocklist.com'
curl -Ls "$url"'/?list=bt_level1&fileformat=p2p&archiveformat=gz' |
gzip -cd > $dir/info/blocklists/bt_level1
chown debian-transmission. $dir/info/blocklists/bt_level1
grep -q peer-socket-tos $dir/info/settings.json ||
sed -i '/"peer-port"/a \
"peer-socket-tos": "lowcost",' $dir/info/settings.json
sed -i '/"queue-stalled-enabled"/s/:.*/: true,/' $dir/info/settings.json
sed -i '/"speed-limit-up"/s/:.*/: 10,/' $dir/info/settings.json
sed -i '/"speed-limit-up-enabled"/s/:.*/: true,/' $dir/info/settings.json
exec su -l debian-transmission -s /bin/bash -c "exec transmission-daemon \
--config-dir $dir/info --blocklist --encryption-preferred \
--log-error -e /dev/stdout --global-seedratio 2.0 --dht \
--incomplete-dir $dir/incomplete --paused --auth --foreground \
--username '${TRUSER:-admin}' --password '${TRPASSWD:-admin}' \
--download-dir $dir/downloads --no-portmap --allowed \\* 2>&1"
fi
|
romlinch/transmission
|
transmission.sh
|
Shell
|
agpl-3.0
| 3,715 |
#!/bin/bash
usage() {
echo "Usage: ${0##*/} <version>
Downloads and packs the phar file of composer.
"
exit 1
}
[ -z "$1" ] && usage
set -e
version="$1"
self_bin=$(readlink -e "$0")
if [ $? -ne 0 ]; then
echo "Error: unable to determine self path" 1>&2
exit 1
fi
self_dir=${self_bin%/*}
sys_dir=${self_dir%/*/*/*}
temp_dir=$(mktemp -d)
pack_dir="$temp_dir/pack"
path_dir="$pack_dir/bin/.path"
composer_url="https://getcomposer.org/download/$version/composer.phar"
main_pkg_dir="$pack_dir/bin/packages/composer"
target_file="$main_pkg_dir/composer.phar"
umask 022
mkdir -p "$path_dir"
mkdir -p "$main_pkg_dir"
curl -sS -o "$target_file" -L "$composer_url" || \
{ st=$?; echo "Curl returned $st"; exit $st; }
chmod 755 "$target_file"
ln -s "${target_file##*/}" "$main_pkg_dir/compose"
ln -s "../${main_pkg_dir#$pack_dir/bin/}/compose" "$path_dir/compose"
ln -s "../${main_pkg_dir#$pack_dir/bin/}/compose" "$path_dir/composer"
"$sys_dir/libexec/pack-package" -d "$pack_dir" "composer-$version.tar.gz" .
echo "Inspect: $temp_dir"
|
devpanel/serverlink
|
src/packages/composer/pack.sh
|
Shell
|
agpl-3.0
| 1,061 |
#!/bin/sh
#
# Run the DDX Crawler program
#profiler="-javaagent:/Users/jimg/src/jip-src-1.2/profile/profile.jar \
#-Dprofile.properties=/Users/jimg/src/olfs/resources/metacat/profile.properties"
java $profiler -Xms256m -Xmx1024m -jar ../libexec/DDXCrawler.jar $*
|
OPENDAP/olfs
|
retired/resources/metacat/ddx_crawler.sh
|
Shell
|
lgpl-2.1
| 266 |
#!/bin/sh
# Run ipython notebook tests
testfail=0
curl -OLk http://www.dropbox.com/s/1x4ny0c93gvu54n/toy_mstis_1k_OPS1.nc
curl -OLk http://www.dropbox.com/s/qaeczkugwxkrdfy/toy_mistis_1k_OPS1.nc
ls *nc
#python ipynbtest.py "sliced_sequential_ensembles.ipynb" || testfail=1
cd toy_model_mstis/
date
ipynbtest.py "toy_mstis_1_setup.ipynb" || testfail=1
date
ipynbtest.py "toy_mstis_2_run.ipynb" || testfail=1
date
ipynbtest.py "toy_mstis_3_analysis.ipynb" || testfail=1
date
ipynbtest.py "toy_mstis_4_repex_analysis.ipynb" || testfail=1
#date
#ipynbtest.py "toy_mstis_5_srtis.ipynb" || testfail=1
cd ../toy_model_mistis/
date
ipynbtest.py "toy_mistis_1_setup_run.ipynb" || testfail=1
date
# skip toy_mistis_2_flux: not needed
ipynbtest.py "toy_mistis_3_analysis.ipynb" || testfail=1
date
cd ../tests/
cp ../toy_model_mstis/mstis.nc ./
ipynbtest.py --strict --show-diff "test_openmm_integration.ipynb" || testfail=1
date
ipynbtest.py --strict "test_snapshot.ipynb" || testfail=1
date
ipynbtest.py --strict "test_netcdfplus.ipynb" || testfail=1
date
ipynbtest.py --strict "test_cv.ipynb" || testfail=1
date
ipynbtest.py --strict "test_pyemma.ipynb" || testfail=1
date
cd ../misc/
cp ../toy_model_mstis/mstis.nc ./
ipynbtest.py "tutorial_storage.ipynb" || testfail=1
cd ..
rm toy_mstis_1k_OPS1.nc
rm toy_mistis_1k_OPS1.nc
if [ $testfail -eq 1 ]
then
exit 1
fi
|
jhprinz/openpathsampling
|
examples/ipynbtests.sh
|
Shell
|
lgpl-2.1
| 1,361 |
#!/bin/bash
# Bash version of homework client
# Author: Michael Verkhovykh
echo "/home/michael/Projects/ifmo/os/hw1/c/tester.c" > input
nc 127.0.0.1 10800 < input # send request
nc 127.0.0.1 10800 # receive answer
|
mihver1/os4-hw1-bash
|
client.sh
|
Shell
|
lgpl-3.0
| 223 |
rake db:drop
rake db:create
rake db:migrate
rake db:seed --trace
|
tuliglowicz/resource-substitution
|
rake.sh
|
Shell
|
lgpl-3.0
| 65 |
#!/bin/bash
script_dir="$(dirname "$0")"
source "${script_dir}/ubuntu-package-env.sh"
cd ${script_dir}/../..
variant=${variant:-test}
echo "Build variant: $variant"
#ICL_OPTIONS+=" -DBUILD_WITH_IPP=ON -DBUILD_WITH_MKL=ON "
configure_icl
echo $HOSTNAME > docker_container_id.log
if [ "$variant" = "binary" ]
then
mk-build-deps --install debian/control
debuild -b -uc -us
elif [ "$variant" = "source" ]
then
mk-build-deps --install debian/control
gpg --passphrase-file ../packaging_passphrase.txt --batch --import ../packaging.key
debuild -S -i -I -sa -us -uc
debsign \
-p'gpg --batch --pinentry-mode=loopback --passphrase-file ../packaging_passphrase.txt' \
-S ../icl_*.changes
dput ppa:iclcv/icl ../icl_*.changes && rm ../icl_*
elif [ "$variant" = "test" ]
then
cd build
make -j3
make test
elif [ "$variant" = "pages" ]
then
cd build
make -j3
make test
make pages
else
echo "please specify a variant ('source', 'test', 'pages' or 'binary')"
fi
|
iclcv/icl
|
packaging/scripts/build-ubuntu-packages.sh
|
Shell
|
lgpl-3.0
| 987 |
add_package --build generic \
--version 5.3 \
http://webserver2.tecgraf.puc-rio.br/~lhf/ftp/lua/ar/lxml-101.tar.gz
pack_set --module-requirement lua
pack_set --install-query $(pack_get --LD lua)/lua/$lua_V/xml.so
pack_cmd "make LUA_TOPDIR=$(pack_get --prefix lua) CFLAGS='$CFLAGS' LIBDIR=$(pack_get --LD lua)/lua/$lua_V/ so install"
|
zerothi/bash-build
|
lua/xml.bash
|
Shell
|
lgpl-3.0
| 345 |
add_package https://launchpad.net/xmlf90/trunk/1.5/+download/xmlf90-1.5.4.tar.gz
pack_set -s $IS_MODULE -s $BUILD_DIR
pack_set --install-query $(pack_get --LD)/libxmlf90.a
pack_set --lib -lxmlf90
# Install commands that it should run
pack_cmd "../configure" \
"--prefix $(pack_get --prefix)"
# Make commands
pack_cmd "make $(get_make_parallel)"
pack_cmd "make install"
|
zerothi/bash-build
|
libs/xmlf90.bash
|
Shell
|
lgpl-3.0
| 375 |
#!/bin/bash
./bootstrap.sh
source setup.env
cd bundles
for b in $(ls); do
if [ -d $b ];then
cd $b
./build.sh
cd ..
fi
done
|
Colibri-Embedded/colibri-linux
|
build.sh
|
Shell
|
lgpl-3.0
| 134 |
#!/bin/bash
doySTART=2000000
doyEND=2016000
#Merge datasets
#mkdir -p ~/DATA
#DEM PROCESSING
#dump all srtm tiles you downloaded into the srtm directory
#then set your geographical box of interest (tiles have to cover it)
xmin=79.4
ymin=5.9
xmax=82.0
ymax=9.9
WGS84='EPSG:4326'
#Go to Data Directory and fetch the first raster file resolution
cd ~/DATA
PRoot=~/dev/distRS/trunk
#Define number of (virtual) cores
ncores=`grep -c 'processor' /proc/cpuinfo`
echo "ncores=" $ncores
#NDVI PROCESSING
prog_root=$PRoot/prog/prog_NDVI
cd $prog_root
make clean
make
cd ~/DATA
for (( doy = $doySTART ; doy <= $doyEND ; doy ++ ))
do
if [ $(expr $doy % 1000) -lt 366 ]
then
echo "ndvi" $doy
#count tarball identified
c1=$(find -type f | grep MOD13A2 | grep A$doy | grep NDVI | wc -l)
if [ $c1 -eq 1 ]
then
#count tarball identified
c2=$(find -type f | grep MOD13A2 | grep A$doy | grep reliability | wc -l)
if [ $c2 -eq 1 ]
then
#NDVI file
f1=$(find -type f | grep MOD13A2 | grep A$doy | grep NDVI)
#QA file
f2=$(find -type f | grep MOD13A2 | grep A$doy | grep reliability)
#Output filename
out=ndvi_$doy.tif
#does it already exist?
outno=$(find -type f | grep $out | wc -l)
#Define number of gdalwarp running
npid=$(echo "$(ps aux | grep ndvi | wc -l) - 1" | bc)
while [ $npid -ge $ncores ]
do
sleep 1
#Update number of ndvi running
npid=$(echo "$(ps aux | grep ndvi | wc -l) - 1" | bc)
#Update number of (virtual) cores (for heterogeneous systems)
ncores=`grep -c 'processor' /proc/cpuinfo`
done
echo -e "\e[01;36m" "ndvi" $f1 $f2 $out "\e[00m"
#process
$prog_root/ndvi $f1 $f2 $out &
fi
fi
fi
done
file=$(ls ndvi_* | head -1)
xpix=$(gdalinfo $(echo $file) | grep Pixel | sed 's/Pixel\ Size\ =\ (\(.*\),\(.*\))/\1/g')
ypix=$(gdalinfo $(echo $file) | grep Pixel | sed 's/Pixel\ Size\ =\ (\(.*\),-\(.*\))/\2/g')
echo $xpix $ypix
xmin=$(gdalinfo $(echo $file) | grep "Lower\ Left" | sed 's/Lower\ Left\ \ (\ \ \(.*\)\,\ \ \(.*\),\(.*\)/\1/g')
xmax=$(gdalinfo $(echo $file) | grep "Upper\ Right" | sed 's/Upper\ Right\ (\ \ \(.*\)\,\ \ \(.*\),\(.*\)/\1/g')
ymin=$(gdalinfo $(echo $file) | grep "Lower\ Left" | sed 's/Lower\ Left\ \ (\ \ \(.*\)\,\ \ \(.*\))\ (\(.*\),\(.*\)/\2/g')
ymax=$(gdalinfo $(echo $file) | grep "Upper\ Right" | sed 's/Upper\ Right\ (\ \ \(.*\)\,\ \ \(.*\))\ (\(.*\),\(.*\)/\2/g')
echo $xmin $xmax $ymin $ymax
height=$(gdalinfo $(echo $file) | grep 'Size is' | sed 's/Size\ is\ \(.*\),\(.*\)/\2/g')
width=$(gdalinfo $(echo $file) | grep 'Size is' | sed 's/Size\ is\ \(.*\),\(.*\)/\1/g')
#Go to SRTM Data Directory
#mkdir -p ~/DATA/SRTM
cd ~/DATA/SRTM
#cp -f ~/SRTM/srtm_tonlesap.tif ~/DATA/SRTM/srtm.tif
#cp -f ~/Osaka/SRTM/srtm_tonlesap.tif ~/DATA/SRTM/srtm.tif
#merge tiles and reproject
rm -f srtm.tif
#gdalwarp -of GTiff -ot Float32 -t_srs $WGS84 -te $xmin $ymin $xmax $ymax -tr $xpix $ypix ~/Osaka/SRTM/srtm_tonlesap.tif srtm.tif
gdalwarp -of GTiff -ot Float32 -t_srs $WGS84 -te $xmin $ymin $xmax $ymax -ts $width $height ~/DATA/SRTM/srtm_LK.tif ~/DATA/SRTM/srtm.tif
#Go to Data Directory
cd ~/DATA
#ALBEDO PROCESSING
prog_root=$PRoot/prog/prog_ALB
cd $prog_root
make clean
make
cd ~/DATA
dfs=0.25
for (( doy = $doySTART ; doy <= $doyEND ; doy ++ ))
do
if [ $(expr $doy % 1000) -lt 366 ]
then
echo "Alb" $doy
#count tarball identified
c1=$(find -type f | grep MCD43B3 | grep A$doy | grep WSA_shortwave.tif | wc -l)
if [ $c1 -eq 1 ]
then
#count tarball identified
c2=$(find -type f | grep MCD43B3 | grep A$doy | grep BSA_shortwave.tif | wc -l)
if [ $c2 -eq 1 ]
then
#count tarball identified
c3=$(find -type f | grep MCD43B2 | grep A$doy | grep Albedo_Quality | wc -l)
if [ $c3 -eq 1 ]
then
echo "Found all MCDs"
#BSA file
#find tarball
f1=$(find -type f | grep MCD43B3 | grep A$doy | grep WSA_shortwave.tif)
#WSA file
#find tarball
f2=$(find -type f | grep MCD43B3 | grep A$doy | grep BSA_shortwave.tif)
#QA file
#find tarball
f3=$(find -type f | grep MCD43B2 | grep A$doy | grep Albedo_Quality)
#Output filename
out=alb_$doy.tif
#does it already exist?
outno=$(find -type f | grep $out | wc -l)
#Define number of gdalwarp running
npid=$(echo "$(ps aux | grep alb | wc -l) - 1" | bc)
while [ $npid -ge $ncores ]
do
sleep 1
#Update number of alb running
npid=$(echo "$(ps aux | grep alb | wc -l) - 1" | bc)
#Update number of (virtual) cores (for heterogeneous systems)
ncores=`grep -c 'processor' /proc/cpuinfo`
done
echo -e "\e[01;36m" "alb" $out $dfs $f1 $f2 $f3 "\e[00m"
#process
$prog_root/alb $out $dfs $f1 $f2 $f3 &
fi
fi
fi
fi
done
#LST PROCESSING
prog_root=$PRoot/prog/prog_LST
cd $prog_root
make clean
make
cd ~/DATA
for (( doy = $doySTART ; doy <= $doyEND ; doy ++ ))
do
if [ $(expr $doy % 1000) -lt 366 ]
then
echo "lst" $doy
#LST Day file
#count tarball identified
c1=$(find -type f | grep MOD11A1 | grep A$doy | grep LST_LST | wc -l)
if [ $c1 -eq 1 ]
then
#count tarball identified
c2=$(find -type f | grep MOD11A1 | grep A$doy | grep LST_QC | wc -l)
if [ $c2 -eq 1 ]
then
#find tarball
f1=$(find -type f | grep MOD11A1 | grep A$doy | grep LST_LST)
#QC file
#find tarball
f2=$(find -type f | grep MOD11A1 | grep A$doy | grep LST_QC)
#Output filename
out=lst_$doy.tif
#does it already exist?
outno=$(find -type f | grep $out | wc -l)
#Define number of gdalwarp running
npid=$(echo "$(ps aux | grep lst | wc -l) - 1" | bc)
while [ $npid -ge $ncores ]
do
sleep 1
#Update number of lst running
npid=$(echo "$(ps aux | grep lst | wc -l) - 1" | bc)
#Update number of (virtual) cores (for heterogeneous systems)
ncores=`grep -c 'processor' /proc/cpuinfo`
done
echo -e "\e[01;36m" "lst" $f1 $f2 $out "\e[00m"
#process
$prog_root/lst $f1 $f2 $out &
fi
fi
fi
done
#T0dem PROCESSING
prog_root=$PRoot/prog/prog_T0Dem
cd $prog_root
make clean
make
cd ~/DATA
#DEM file
f0=~/DATA/SRTM/srtm.tif
for (( doy = $doySTART ; doy <= $doyEND ; doy ++ ))
do
if [ $(expr $doy % 1000) -lt 366 ]
then
echo "t0dem" $doy
#count tarball identified
c1=$(find -type f | grep MOD11A1 | grep A$doy | grep LST_LST | wc -l)
if [ $c1 -eq 1 ]
then
#find tarball
c2=$(find -type f | grep MOD11A1 | grep A$doy | grep LST_QC | wc -l)
if [ $c2 -eq 1 ]
then
#LST Day file
f1=$(find -type f | grep MOD11A1 | grep A$doy | grep LST_LST)
#QC file
f2=$(find -type f | grep MOD11A1 | grep A$doy | grep LST_QC)
#Output filename
out=t0dem_$doy.tif
#does it already exist?
outno=$(find -type f | grep $out | wc -l)
#Define number of gdalwarp running
npid=$(echo "$(ps aux | grep alb | wc -l) - 1" | bc)
while [ $npid -ge $ncores ]
do
sleep 1
#Update number of alb running
npid=$(echo "$(ps aux | grep alb | wc -l) - 1" | bc)
#Update number of (virtual) cores (for heterogeneous systems)
ncores=`grep -c 'processor' /proc/cpuinfo`
done
echo -e "\e[01;36m" "t0dem" $f0 $f1 $f2 $out "\e[00m"
#process
$prog_root/t0dem $f0 $f1 $f2 $out &
fi
fi
fi
done
|
YannChemin/distRS
|
country/SriLanka/02_preprocessing.sh
|
Shell
|
unlicense
| 7,309 |
#!/usr/bin/env bash
# MIT licensed template copied from https://github.com/fhd
dir="."
cmd="caffeinate -disu"
# not used in this script
# user="$(whoami)"
name=`basename $0`
pid_file="${HOME}/var/run/$name.pid"
stdout_log="${HOME}/var/log/$name.log"
stderr_log="${HOME}/var/log/$name.err"
get_pid() {
cat "$pid_file"
}
is_running() {
[ -f "$pid_file" ] && ps -p `get_pid` > /dev/null 2>&1
}
case "$1" in
start)
if is_running; then
echo "Already started"
else
echo "Starting $name"
cd "$dir"
echo $pid_file
$cmd >> "$stdout_log" 2>> "$stderr_log" &
echo $! > "$pid_file"
if ! is_running; then
echo "Unable to start, see $stdout_log and $stderr_log"
exit 1
fi
fi
;;
stop)
if is_running; then
echo -n "Stopping $name.."
kill `get_pid`
for i in 1 2 3 4 5 6 7 8 9 10
# for i in `seq 10`
do
if ! is_running; then
break
fi
echo -n "."
sleep 1
done
echo
if is_running; then
echo "Not stopped; may still be shutting down or shutdown may have failed"
exit 1
else
echo "Stopped"
if [ -f "$pid_file" ]; then
rm "$pid_file"
fi
fi
else
echo "Not running"
fi
;;
restart)
$0 stop
if is_running; then
echo "Unable to stop, will not attempt to start"
exit 1
fi
$0 start
;;
status)
if is_running; then
echo "Running"
else
echo "Stopped"
exit 1
fi
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac
exit 0
|
SkyLeach/poweruser_tools
|
scripts/macaff.sh
|
Shell
|
unlicense
| 1,773 |
./configure --prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
--disable-docs \
--docdir=/usr/share/doc/fontconfig-2.12.1 &&
make
make install
|
pampa/cyberdeck
|
try4/usr/src/gui/fontconfig.sh
|
Shell
|
unlicense
| 215 |
#!/bin/bash
USER='brahman'
PASSWORD=`/usr/bin/awk -F "'" ' /^password=/{print $2}' </root/.my.cnf`
OUTPUT="/d1/backup/mysql/"
databases=`mysql --user=$USER --password=$PASSWORD -e "SHOW DATABASES;" | tr -d "| " | grep -v Database`
for db in $databases; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != "performance_schema" ]] && [[ "$db" != "mysql" ]] && [[ "$db" != _* ]] ; then
echo "Dumping database: $db"
nice mysqldump --force --opt --user=$USER --password=$PASSWORD --databases $db > $OUTPUT/`date +%Y%m%d`.$db.sql
echo $OUTPUT/`date +%Y%m%d`.$db.sql
gzip $OUTPUT/`date +%Y%m%d`.$db.sql
fi
done
# clean up
find $OUTPUT -mtime +30 -exec rm {} \;
|
carthagecollege/archer
|
bin/mysqlbackup.all.sh
|
Shell
|
unlicense
| 704 |
# -----------------------------------------------------------------------------
#
# Package : arr-union
# Version : 3.1.0
# Source repo : https://github.com/jonschlinkert/arr-union
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=arr-union
PACKAGE_VERSION=3.1.0
PACKAGE_URL=https://github.com/jonschlinkert/arr-union
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
a/arr-union/arr-union_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,066 |
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This script performs cloud training for a PyTorch model.
echo "Submitting AI Platform PyTorch job"
# BUCKET_NAME: Change to your bucket name.
BUCKET_NAME=<your bucket>
# The PyTorch image provided by AI Platform Training.
IMAGE_URI=gcr.io/cloud-ml-public/training/pytorch-cpu.1-4
# JOB_NAME: the name of your job running on AI Platform.
JOB_NAME=pytorch_job_$(date +%Y%m%d_%H%M%S)
PACKAGE_PATH=./trainer # this can be a GCS location to a zipped and uploaded package
# REGION: select a region from https://cloud.google.com/ml-engine/docs/regions
# or use the default '`us-central1`'. The region is where the job will be run.
REGION=us-central1
# JOB_DIR: Where to store prepared package and upload output model.
JOB_DIR=gs://${BUCKET_NAME}/${JOB_NAME}/models
# Datasets are set by datasets/download-taxi.sh script
TRAIN_FILES=${GCS_TAXI_TRAIN_SMALL}
EVAL_FILES=${GCS_TAXI_EVAL_SMALL}
# Define the HyperparameterSpec used for HPTuning.
cat > config.yaml <<EOF
trainingInput:
hyperparameters:
goal: MINIMIZE
hyperparameterMetricTag: test_loss
maxTrials: 2
maxParallelTrials: 2
enableTrialEarlyStopping: True
params:
- parameterName: learning-rate
type: DOUBLE
minValue: 0.0001
maxValue: 1
scaleType: UNIT_LOG_SCALE
- parameterName: batch-size
type: INTEGER
minValue: 1
maxValue: 256
scaleType: UNIT_LINEAR_SCALE
EOF
gcloud ai-platform jobs submit training ${JOB_NAME} \
--region ${REGION} \
--master-image-uri ${IMAGE_URI} \
--scale-tier BASIC \
--job-dir ${JOB_DIR} \
--module-name trainer.task \
--package-path ${PACKAGE_PATH} \
--config config.yaml \
-- \
--train-files ${TRAIN_FILES} \
--eval-files ${EVAL_FILES} \
--num-epochs 10
# Stream the logs from the job
gcloud ai-platform jobs stream-logs ${JOB_NAME}
# Verify the model was exported
echo "Verify the model was exported:"
gsutil ls -r ${JOB_DIR}
|
GoogleCloudPlatform/ai-platform-samples
|
training/pytorch/structured/python_package/scripts/train-hptuning.sh
|
Shell
|
apache-2.0
| 2,613 |
#!/bin/sh
# $Id: cleanbuilds.sh 95621 2012-03-17 12:12:23Z johnnyw $
if test -z $1; then CURRENTDATE=YYYY_MM_DD; else CURRENTDATE=$1; fi
wget http://teststat.theaceorb.nl/teststat/cleanbuild_testfails-$CURRENTDATE.txt -O cleanbuild.txt
#
grep -h \!FIXED_BUGS_ONLY ${ACE_ROOT}/tests/*.lst ${ACE_ROOT}/bin/*.lst ${TAO_ROOT}/bin/*.lst ${CIAO_ROOT}/bin/*.lst ${DANCE_ROOT}/bin/*.lst | sed -e "s/^\([^\:]*\).*/\1/" | sed -e "s/\(\/run_test.pl\)\?\s*$//" > Ignore.txt
#
cat cleanbuild.txt | grep -v -f Ignore.txt | tee cleanbuildresults.txt
|
batmancn/TinySDNController
|
ACE_wrappers/bin/cleanbuilds.sh
|
Shell
|
apache-2.0
| 538 |
#!/usr/bin/env sh
cp -R /usr/src/demo/authenticatedws /usr/src/
npm install --global /usr/src/demo/authenticatedws
npm install --global /usr/src/demo/broker
npm install --global /usr/src/demo/collector
npm install --global /usr/src/demo/dispatcher
|
patriziobruno/wsqdemo
|
install.sh
|
Shell
|
apache-2.0
| 248 |
#!/bin/sh
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PLATFORM=android-21
OUT_PATH=../build/javadoc
cd `dirname $0`
source _locals.sh
javadoc -linkoffline http://developer.android.com/reference ${ANDROID_SDK}/docs/reference \
-sourcepath ../src/main/java:../build/source/aidl/debug \
-classpath ${ANDROID_SDK}/platforms/${PLATFORM}/android.jar:${ANDROID_SDK}/tools/support/annotations.jar \
-d ${OUT_PATH} \
-notree -nonavbar -noindex -notree -nohelp -nodeprecated \
-stylesheetfile javadoc_stylesheet.css \
-windowtitle "DashClock API" \
-doctitle "DashClock API" \
com.google.android.apps.dashclock.api
cp prettify* ${OUT_PATH}/resources/
python tweak_javadoc_html.py ${OUT_PATH}/
|
ifengtech/dashclock
|
api/javadoc-scripts/generate_javadoc.sh
|
Shell
|
apache-2.0
| 1,283 |
#!/usr/bin/env bash
# this code will be executed when build finished
# or on interupt signal.
# skip cleanup argv
[[ "$SKIP_CLEANUP" == "true" ]] && return
echo -n "CLEANUP..."
$SUDO rm -fr "${BUILD_WORSPACE}/*"
echo " [DONE]"
|
weldpua2008/compage
|
ExampleProject/cleanup.sh
|
Shell
|
apache-2.0
| 230 |
#!/bin/bash
#
# Example usage:
# flowchart.sh azure-operator-master --tenant-cluster xyz99 --open-browser
#
# where 'azure-operator-master' is App CR name and 'xyz99' is tenant cluster ID
# and '--open-browser' flag indicates that a default browser will open the
# generated flowchart.
#
# check if required CLI tools are installed
for required in kubectl jq
do
if [ ! -x "$(command -v $required)" ]
then
echo "[err] The required command $required was not found in your system. Aborting."
exit 1
fi
done
args=()
while [[ $# -gt 0 ]]; do
flag="$1"
case $flag in
-b|--open-browser)
flag_open_browser=1
shift
;;
-t|--tenant-cluster)
tenant_cluster_id="$2"
shift
shift
;;
-c|--kube-context)
kubecontext="$2"
shift
shift
;;
-o|--output)
output_file="$2"
shift
shift
;;
*)
args+=("$1")
shift
;;
esac
done
# restore positional arguments
set -- "${args[@]}"
azure_operator_app=$1
if [ -z "$azure_operator_app" ]; then
echo [err] azure-operator CR name must be specified
exit 1
fi
# set default kube context
if [ -z "$kubecontext" ]; then
kubecontext="$(kubectl config current-context)"
fi
# get logs
logfile="/tmp/${azure_operator_app}.logs"
if ! kubectl --context "${kubecontext}" -n giantswarm logs deployment/${azure_operator_app} > "${logfile}"; then
echo "[err] azure-operator app '$azure_operator_app' not found"
exit 1
fi
# filter by event message, get only state change events
query='. | select(.message | test("state changed")) '
# filter by tenant cluster ID
if ! [ -z "$tenant_cluster_id" ]; then
query+="| select(.object | endswith(\"/azureconfigs/$tenant_cluster_id\")) "
generated_flowchart="${azure_operator_app}.${tenant_cluster_id}.flowchart.generated.html"
else
generated_flowchart="${azure_operator_app}.flowchart.generated.html"
fi
if [ -z "$output_file" ]; then
output_file="${generated_flowchart}"
fi
# echo state transition in format 'stateX --> stateY'
query+='| " " +
(if (.oldState | length) > 0 then .oldState else "DeploymentUninitialized" end) +
" --> " +
(if (.newState | length) > 0 then .newState else "DeploymentUninitialized" end)'
# idented transition lines: " stateX --> stateY"
transitions=$(cat "${logfile}" \
| jq -r "$query" \
| sort \
| uniq)
mermaid="graph TD
${transitions}"
script_dir="$( cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd )"
template=$(cat "${script_dir}/flowchart.template.html")
# generate flowchart
echo "${template/_FLOWCHART_DATA_/$mermaid}" > "${output_file}"
echo "Generated flowchart in file '${output_file}'."
# open default browser
if [ "$flag_open_browser" == 1 ]; then
if which xdg-open > /dev/null; then
xdg-open "${output_file}"
elif which gnome-open > /dev/null; then
gnome-open "${output_file}"
fi
fi
|
giantswarm/azure-operator
|
scripts/flowchart.sh
|
Shell
|
apache-2.0
| 3,075 |
SOLR_DIR=/home/mlp/apps/solr
SOLR_HOME=`pwd`/solr-conf
DEBUG_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1044"
cd $SOLR_DIR/bin
# . solr start -s $SOLR_HOME
. solr start -s $SOLR_HOME -a "${DEBUG_OPTS}"
|
flaxsearch/BioSolr
|
spot/spot-ontology/ontology_solr.sh
|
Shell
|
apache-2.0
| 232 |
#!/bin/sh
export PYTHONPATH=$PYTHONPATH:./libs
ls problem_*py | xargs -n1 python
|
mytram/learning-python
|
projecteuler.net/run-all.sh
|
Shell
|
apache-2.0
| 83 |
#!/bin/bash
source ~/.profile
if [ ! -d /usr/local/secor ]; then
cd /usr/local
sudo git clone https://github.com/pinterest/secor.git
sudo mkdir /usr/local/secor/bin
fi
if ! grep "export SECOR_HOME" ~/.profile; then
echo -e "\nexport SECOR_HOME=/usr/local/secor\nexport PATH=\$PATH:\$SECOR_HOME/bin" | cat >> ~/.profile
fi
. ~/.profile
sudo chown -R ubuntu $SECOR_HOME
cd $SECOR_HOME
sudo mvn clean package &
wait
sudo tar -zxvf ./target/secor-*-SNAPSHOT-bin.tar.gz -C ./bin/
|
InsightDataScience/pegasus
|
install/kibana/install_kibana.sh
|
Shell
|
apache-2.0
| 488 |
#!/bin/bash
DIR=$(cd "$(dirname "$0")"; pwd)
sh $DIR/maven.sh clean install -q -DskipTests -s $DIR/central-settings.xml
|
jooby-project/jooby
|
etc/travis-install.sh
|
Shell
|
apache-2.0
| 122 |
#!/bin/bash
# Make the build fail on errors.
set -e
# Strip the first part to avoid credentials leaks.
echo "repository=$(echo $repository | sed s/^.*@//g)"
echo "package_type=$package_type"
echo "packages=$packages"
echo "upgrade=$upgrade"
# Strip leading/trailing quotes if present.
repository=`echo $repository | sed 's/^"\(.*\)"$/\1/'`
# Strip leading/trailing quotes if present.
# Also convert a comma-separated list to a whitespace-separated one.
packages=`echo $packages | sed 's/^"\(.*\)"$/\1/' | sed 's/,/ /g'`
function provision_deb() {
# https://www.packer.io/docs/builders/amazon-chroot.html look at gotchas at the end.
if [[ "$disable_services" == "true" ]]; then
echo "creating /usr/sbin/policy-rc.d to prevent services from being started"
echo '#!/bin/sh' | sudo tee /usr/sbin/policy-rc.d > /dev/null
echo 'exit 101' | sudo tee -a /usr/sbin/policy-rc.d > /dev/null
sudo chmod a+x /usr/sbin/policy-rc.d
fi
if [[ "$repository" != "" ]]; then
IFS=';' read -ra repo <<< "$repository"
for i in "${repo[@]}"; do
echo "deb $i" | sudo tee -a /etc/apt/sources.list.d/spinnaker.list > /dev/null
done
fi
sudo apt-get update
if [[ "$upgrade" == "true" ]]; then
sudo unattended-upgrade -v
fi
# Enforce the package installation order.
for package in $packages; do sudo apt-get install --force-yes -y $package; done
# https://www.packer.io/docs/builders/amazon-chroot.html look at gotchas at the end.
if [[ "$disable_services" == "true" ]]; then
echo "removing /usr/sbin/policy-rc.d"
sudo rm -f /usr/sbin/policy-rc.d
fi
if [[ "$repository" != "" ]]; then
# Cleanup repository configuration
sudo rm /etc/apt/sources.list.d/spinnaker.list
fi
}
function provision_rpm() {
if [[ "$repository" != "" ]]; then
cat > /tmp/spinnaker.repo <<EOF
[spinnaker]
name=spinnaker
baseurl=$repository
gpgcheck=0
enabled=1
EOF
sudo mv /tmp/spinnaker.repo /etc/yum.repos.d/
fi
if [[ "$upgrade" == "true" ]]; then
sudo yum -y update
fi
# Enforce the package installation order.
for package in $packages; do sudo yum -y install $package; done
if [[ "$repository" != "" ]]; then
# Cleanup repository configuration
sudo rm /etc/yum.repos.d/spinnaker.repo
fi
}
function main() {
if [[ "$package_type" == "deb" ]]; then
provision_deb
elif [[ "$package_type" == "rpm" ]]; then
provision_rpm
fi
}
main
|
cfieber/rosco
|
rosco-web/config/packer/install_packages.sh
|
Shell
|
apache-2.0
| 2,425 |
#!/bin/bash
# The call to setenv.sh can be commented out if necessary.
export XAP_HOME=`dirname $0`/../../
. `dirname $0`/../../bin/setenv.sh
JAVACMD="${JAVA_HOME}/bin/java"
CPS=":"
export CPS
VELOCITY_JARS="${XAP_HOME}"/lib/platform/velocity/*
export VELOCITY_JARS
COMMONS_JARS="${XAP_HOME}"/lib/platform/commons/*
export COMMONS_JARS
COMMAND_LINE="${JAVACMD} ${JAVA_OPTIONS} -Dlb.vmDir="${XAP_HOME}/tools/apache" ${RMI_OPTIONS} ${XAP_OPTIONS} -Djava.security.policy=${POLICY} -Dcom.gs.home=${XAP_HOME} -classpath "${PRE_CLASSPATH}:${GS_JARS}:${SPRING_JARS}:${JDBC_JARS}:${VELOCITY_JARS}:${COMMONS_JARS}:${POST_CLASSPATH}" org.openspaces.pu.container.jee.lb.apache.ApacheLoadBalancerAgent $*"
echo
echo
echo Starting apache-lb-agent with line:
echo ${COMMAND_LINE}
${COMMAND_LINE}
echo
echo
|
Gigaspaces/xap-openspaces
|
tools/apache/apache-lb-agent.sh
|
Shell
|
apache-2.0
| 800 |
#!/bin/bash
# This is a basic online neural net training.
# After this you can do discriminative training with run_nnet2_discriminative.sh.
set -e
#set -x
train_stage=-10
use_gpu=true
test_sets=
nj=8
num_jobs_nnet=3
gauss=19200
pdf=9000
srcdir=$EXP/tri2b
tgtdir=$EXP/nnet2
. utils/parse_options.sh
if [ $# -ne 2 ] ; then
echo usage $0: WORK EXP
exit 1
fi
WORK=$1
EXP=$2
if $use_gpu; then
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
parallel_opts="-l gpu=1"
num_threads=1
minibatch_size=512
else
num_threads=4
parallel_opts="-pe smp $num_threads"
minibatch_size=128
fi
nj=`cat $srcdir/num_jobs` || exit 1;
mkdir -p $tgtdir
# Because we have a lot of data here and we don't want the training to take
# too long so we reduce the number of epochs from the defaults (15 + 5) to (8
# + 4).
# The number of parameters is a bit smaller than the baseline system we had in mind
# (../nnet2/run_5d.sh), which had pnorm input/output dim 2000/400 and 4 hidden
# layers, versus our 2400/300 and 4 hidden layers, even though we're training
# on more data than the baseline system (we're changing the proportions of
# input/output dim to be closer to 10:1 which is what we believe works well).
# The motivation of having fewer parameters is that we want to demonstrate the
# capability of doing real-time decoding, and if the network was too bug we
# wouldn't be able to decode in real-time using a CPU.
#
# I copied the learning rates from wsj/s5/local/nnet2/run_5d.sh
local/check.sh steps/nnet2/train_pnorm_simple2.sh --stage $train_stage \
--num-epochs 8 \
--splice-width 7 --feat-type raw \
--cmvn-opts "--norm-means=false --norm-vars=false" \
--num-threads "$num_threads" \
--minibatch-size "$minibatch_size" \
--parallel-opts "$parallel_opts" \
--num-jobs-nnet $num_jobs_nnet \
--num-hidden-layers 4 \
--mix-up 4000 \
--initial-learning-rate 0.01 --final-learning-rate 0.001 \
--cmd "$gpu_cmd" \
--pnorm-input-dim 2400 \
--pnorm-output-dim 300 \
--combine-num-threads 1 \
--combine-parallel-opts "$parallel_opts" \
$WORK/train $WORK/lang ${srcdir}_ali $tgtdir || exit 1;
exit 0
|
UFAL-DSG/kams
|
kams/local/run_nnet_online.sh
|
Shell
|
apache-2.0
| 2,401 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# shellcheck shell=bash
set -euo pipefail
declare -a packages
MYSQL_VERSION="8.0"
readonly MYSQL_VERSION
COLOR_BLUE=$'\e[34m'
readonly COLOR_BLUE
COLOR_RESET=$'\e[0m'
readonly COLOR_RESET
: "${INSTALL_MYSQL_CLIENT:?Should be true or false}"
install_mysql_client() {
echo
echo "${COLOR_BLUE}Installing mysql client version ${MYSQL_VERSION}${COLOR_RESET}"
echo
if [[ "${1}" == "dev" ]]; then
packages=("libmysqlclient-dev" "mysql-client")
elif [[ "${1}" == "prod" ]]; then
packages=("libmysqlclient21" "mysql-client")
else
echo
echo "Specify either prod or dev"
echo
exit 1
fi
local key="467B942D3A79BD29"
readonly key
GNUPGHOME="$(mktemp -d)"
export GNUPGHOME
set +e
for keyserver in $(shuf -e ha.pool.sks-keyservers.net hkp://p80.pool.sks-keyservers.net:80 \
keyserver.ubuntu.com hkp://keyserver.ubuntu.com:80)
do
gpg --keyserver "${keyserver}" --recv-keys "${key}" 2>&1 && break
done
set -e
gpg --export "${key}" > /etc/apt/trusted.gpg.d/mysql.gpg
gpgconf --kill all
rm -rf "${GNUPGHOME}"
unset GNUPGHOME
echo "deb http://repo.mysql.com/apt/debian/ buster mysql-${MYSQL_VERSION}" | tee -a /etc/apt/sources.list.d/mysql.list
apt-get update
apt-get install --no-install-recommends -y "${packages[@]}"
apt-get autoremove -yqq --purge
apt-get clean && rm -rf /var/lib/apt/lists/*
}
# Install MySQL client from Oracle repositories (Debian installs mariadb)
# But only if it is not disabled
if [[ ${INSTALL_MYSQL_CLIENT:="true"} == "true" ]]; then
install_mysql_client "${@}"
fi
|
Acehaidrey/incubator-airflow
|
scripts/docker/install_mysql.sh
|
Shell
|
apache-2.0
| 2,458 |
#!/bin/bash
ip=$1
host=$2
port=$3
user=$4
password=$5
dbname=$6
community=$7
filter_os_disk=$8
tags=$9
#echo $tags
if [ x"$filter_os_disk" = x ];then
filter_os_disk="none"
fi
mysql_client="mysql -h${host} -P${port} -u${user} -p${password}"
hostname=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} SNMPv2-MIB::sysName.0|awk '{print $NF}'`
if [ "$hostname" != "" ];then
kernel=`snmpwalk -v1 -c ${community} ${ip} SNMPv2-MIB::sysDescr.0|awk '{print $4 " " $6 " " $15}'`
system_date=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} HOST-RESOURCES-MIB::hrSystemDate.0|cut -d '=' -f2|cut -d ' ' -f3`
system_uptime=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} HOST-RESOURCES-MIB::hrSystemUptime.0|cut -d ')' -f2`
process=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} HOST-RESOURCES-MIB::hrSystemProcesses.0|cut -d ' ' -f4`
load_1=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::laLoad.1|awk '{print $NF}'`
load_5=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::laLoad.2|awk '{print $NF}'`
load_15=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::laLoad.3|awk '{print $NF}'`
cpu_user_time=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::ssCpuUser.0 |awk '{print $NF}'`
cpu_system_time=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::ssCpuSystem.0 |awk '{print $NF}'`
cpu_idle_time=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::ssCpuIdle.0 |awk '{print $NF}'`
swap_total=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::memTotalSwap.0 |cut -d= -f2 |awk -F ' ' '{print $2}'`
swap_avail=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::memAvailSwap.0 |cut -d= -f2 |awk -F ' ' '{print $2}'`
mem_total=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::memTotalReal.0 |cut -d= -f2 |awk -F ' ' '{print $2}'`
mem_avail=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::memAvailReal.0 |cut -d= -f2 |awk -F ' ' '{print $2}'`
mem_free=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::memTotalFree.0 |cut -d= -f2 |awk -F ' ' '{print $2}'`
mem_shared=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::memShared.0 |cut -d= -f2 |awk -F ' ' '{print $2}'`
mem_buffered=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::memBuffer.0 |cut -d= -f2 |awk -F ' ' '{print $2}'`
mem_cached=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-SNMP-MIB::memCached.0 |cut -d= -f2 |awk -F ' ' '{print $2}'`
mem_usage_rate=`/usr/bin/snmpdf -v1 -c ${community} ${ip} |grep "Physical"|awk '{print $6}'`
mem_available=$[$mem_avail+$mem_buffered+$mem_cached]
if [ -z $mem_shared ]; then
mem_shared=0
fi
#disk usage
IFS=$'\n'
disk_all=`/usr/bin/snmpdf -v1 -c ${community} ${ip} |grep -E "/"|grep -vE "/dev|/boot" |grep -vE "$filter_os_disk"`
for line in $disk_all
do
IFS=' '
mounted=`echo $line|awk -F ' ' '{print $1}' `
total_size=`echo $line|awk -F ' ' '{print $2}' `
used_size=`echo $line|awk -F ' ' '{print $3}' `
avail_size=`echo $line|awk -F ' ' '{print $4}' `
used_rate=`echo $line|awk -F ' ' '{print $5}' `
$mysql_client -N -e "insert into $dbname.os_disk_his select *, DATE_FORMAT(sysdate(),'%Y%m%d%H%i%s') from $dbname.os_disk where ip='${ip}';"
$mysql_client -N -e "delete from $dbname.os_disk where ip='${ip}';"
$mysql_client -N -e "insert into $dbname.os_disk(ip,tags,mounted,total_size,used_size,avail_size,used_rate) values('${ip}','${tags}','${mounted}','${total_size}','${used_size}','${avail_size}','${used_rate}')"
done
#disk io
IFS=$'\n'
disk_io_reads_total=0
disk_io_writes_total=0
fdisk_io_string=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-DISKIO-MIB::diskIODevice |grep -ivE "ram|loop|md"`
for line in $fdisk_io_string
do
IFS=' '
fdisk=`echo $line|awk -F ' ' '{print $4}'`
fdisk_id=`echo $line|awk -F ' ' '{print $1}'|awk -F '.' '{print $2}'`
disk_io_reads_1=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-DISKIO-MIB::diskIOReads.$fdisk_id|awk '{print $NF}'`
disk_io_writes_1=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-DISKIO-MIB::diskIOWrites.$fdisk_id|awk '{print $NF}'`
sleep 1
disk_io_reads_2=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-DISKIO-MIB::diskIOReads.$fdisk_id|awk '{print $NF}'`
disk_io_writes_2=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} UCD-DISKIO-MIB::diskIOWrites.$fdisk_id|awk '{print $NF}'`
disk_io_reads=$(($disk_io_reads_2-$disk_io_reads_1))
disk_io_writes=$(($disk_io_writes_2-$disk_io_writes_1))
#disk_io_reads=`expr $disk_io_reads / 3`
#disk_io_writes=`expr $disk_io_writes / 3`
$mysql_client -N -e "insert into $dbname.os_diskio_his select *, DATE_FORMAT(sysdate(),'%Y%m%d%H%i%s') from $dbname.os_diskio where ip='${ip}';"
$mysql_client -N -e "delete from $dbname.os_diskio where ip='${ip}';"
$mysql_client -N -e "insert into $dbname.os_diskio(ip,tags,fdisk,disk_io_reads,disk_io_writes) values('${ip}','${tags}','${fdisk}','${disk_io_reads}','${disk_io_writes}')"
let disk_io_reads_total=$disk_io_reads_total+$disk_io_reads
let disk_io_writes_total=$disk_io_writes_total+$disk_io_writes
done
#net
IFS=$'\n'
net_in_bytes_total=0
net_out_bytes_total=0
net_descr_string=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} IF-MIB::ifDescr`
for line in $net_descr_string
do
IFS=' '
net_descr=`echo $line|awk -F '=' '{print $2}' |awk -F ': ' '{print $2}'`
net_descr_id=`echo $line|awk -F ' ' '{print $1}'|awk -F '.' '{print $2}'`
in_bytes_1=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} IF-MIB::ifInOctets.$net_descr_id|awk '{print $NF}'`
out_bytes_1=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} IF-MIB::ifOutOctets.$net_descr_id|awk '{print $NF}'`
sleep 1
in_bytes_2=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} IF-MIB::ifInOctets.$net_descr_id|awk '{print $NF}'`
out_bytes_2=`/usr/bin/snmpwalk -v1 -c ${community} ${ip} IF-MIB::ifOutOctets.$net_descr_id|awk '{print $NF}'`
in_bytes=$(($in_bytes_2-$in_bytes_1))
out_bytes=$(($out_bytes_2-$out_bytes_1))
#in_bytes=`expr $in_bytes / 3`
#out_bytes=`expr $out_bytes / 3`
$mysql_client -N -e "insert into $dbname.os_net_his select *, DATE_FORMAT(sysdate(),'%Y%m%d%H%i%s') from $dbname.os_net where ip='${ip}';"
$mysql_client -N -e "delete from $dbname.os_net where ip='${ip}';"
$mysql_client -N -e "insert into $dbname.os_net(ip,tags,if_descr,in_bytes,out_bytes) values('${ip}','${tags}','${net_descr}','${in_bytes}','${out_bytes}')"
let net_in_bytes_total=$net_in_bytes_total+$in_bytes
let net_out_bytes_total=$net_out_bytes_total+$out_bytes
done
$mysql_client -N -e "insert into $dbname.os_status_his select *, DATE_FORMAT(sysdate(),'%Y%m%d%H%i%s') from $dbname.os_status where ip='${ip}';"
$mysql_client -N -e "delete from $dbname.os_status where ip='${ip}';"
$mysql_client -N -e "insert into $dbname.os_status(ip,snmp,tags,hostname,kernel,system_date,system_uptime,process,load_1,load_5,load_15,cpu_user_time,cpu_system_time,cpu_idle_time,swap_total,swap_avail,mem_total,mem_avail,mem_free,mem_shared,mem_buffered,mem_cached,mem_usage_rate,mem_available,disk_io_reads_total,disk_io_writes_total,net_in_bytes_total,net_out_bytes_total) values('${ip}',1,'${tags}','${hostname}','${kernel}','${system_date}','${system_uptime}','${process}','${load_1}','${load_5}','${load_15}','${cpu_user_time}','${cpu_system_time}','${cpu_idle_time}','${swap_total}','${swap_avail}','${mem_total}','${mem_avail}','${mem_free}','${mem_shared}','${mem_buffered}','${mem_cached}','${mem_usage_rate}','${mem_available}','${disk_io_reads_total}','${disk_io_writes_total}','${net_in_bytes_total}','${net_out_bytes_total}')"
else
$mysql_client -N -e "insert into $dbname.os_status_his select *, DATE_FORMAT(sysdate(),'%Y%m%d%H%i%s') from $dbname.os_status where ip='${ip}';"
$mysql_client -N -e "delete from $dbname.os_status where ip='${ip}';"
$mysql_client -N -e "insert into $dbname.os_status(ip,tags,snmp) values('${ip}','${tags}',0)"
fi
|
JK-Warriors/Heimdallr
|
python/check_os.sh
|
Shell
|
apache-2.0
| 8,271 |
#!/bin/bash
ORACLE_JAVA_URL="http://download.oracle.com/otn-pub/java/jdk"
ORACLE_JAVA7_URL="${ORACLE_JAVA7_URL:-$ORACLE_JAVA_URL/7u80-b15/jdk-7u80}"
ORACLE_JAVA7_NAME="jdk1.7.0_80"
ORACLE_JAVA8_URL="${ORACLE_JAVA8_URL:-$ORACLE_JAVA_URL/8u74-b02/jdk-8u74}"
ORACLE_JAVA8_NAME="jdk1.8.0_74"
function setup_java {
# Java version 8 is the last stable one
local VERSION="${1:-8}"
echo "Setup Java version: $VERSION"
if test_java_version "$VERSION" && setup_java_env; then
echo "Current Java version is already $VERSION."
elif select_java "$VERSION"; then
echo "Java version $VERSION has been selected."
elif install_openjdk "$VERSION" && select_java "$VERSION"; then
echo "OpenJDK version $VERSION has been installed and selected."
elif install_other_java "$VERSION" && select_java "$VERSION"; then
echo "Some Java version $VERSION has been installed and selected."
else
echo "ERROR: Unable to setup Java version $VERSION."
return 1
fi
return 0
}
function setup_java_env() {
local JAVA_COMMAND="${1:-${JAVA:-java}}"
JAVA_LINK="$(which $JAVA_COMMAND)"
if [[ "$JAVA_LINK" == "" ]]; then
return 1
fi
export JAVA="$(readlink -f $JAVA_LINK)"
export JAVA_HOME=$(echo $JAVA | sed "s:/bin/java::" | sed "s:/jre::")
if [ "$JAVA" != "$(readlink -f $(which java))" ]; then
export PATH="$(dirname $JAVA):$PATH"
if [ "$JAVA" != "$(readlink -f $(which java))" ]; then
echo "Unable to set $JAVA as current."
return 1
fi
fi
echo "JAVA is: $JAVA"
echo "JAVA_HOME is: $JAVA_HOME"
echo "Java version is:"
$JAVA -version 2>&1
}
function select_java {
local VERSION="$1"
local COMMAND
for COMMAND in $(list_java_commands); do
if test_java_version "$VERSION" "$COMMAND"; then
if setup_java_env "$COMMAND"; then
return 0
fi
fi
done
echo 'Required java version not found.'
return 1
}
function test_java_version {
local EXPECTED_VERSION="'"*' version "1.'$1'.'*'"'"'"
local COMMAND="${2:-${JAVA:-java}}"
local ACTUAL_VERSION="'"$($COMMAND -version 2>&1 | head -n 1)"'"
if [[ $ACTUAL_VERSION == $EXPECTED_VERSION ]]; then
echo "Found matching java version: $ACTUAL_VERSION"
return 0
else
return 1
fi
}
if is_ubuntu; then
# --- Ubuntu -------------------------------------------------------------
function list_java_commands {
update-alternatives --list java
}
function install_openjdk {
local REQUIRED_VERSION="$1"
apt_get install "openjdk-$REQUIRED_VERSION-jre-headless"
}
function install_other_java {
local VERSION="$1"
local PPA_REPOSITORY="ppa:webupd8team/java"
local JAVA_INSTALLER="oracle-java${VERSION}-installer"
local JAVA_SET_DEFAULT="oracle-java${VERSION}-set-default"
# Accept installer license
echo "$JAVA_INSTALLER" shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
# Remove all existing set-default versions
apt_get remove oracle-java*-set-default
if apt_get install $JAVA_INSTALLER ; then
if apt_get install $JAVA_SET_DEFAULT ; then
return 0 # Some PPA was already providing desired packages
fi
fi
# Add PPA only when package is not available
if apt_get install software-properties-common; then
# I pipe this after echo to emulate an user key-press
if echo | sudo add-apt-repository "$PPA_REPOSITORY"; then
if apt_get update; then
if apt_get install $JAVA_INSTALLER ; then
if apt_get install $JAVA_SET_DEFAULT ; then
return 0
fi
fi
fi
fi
fi
# Something has gone wrong!
return 1
}
else
# --- Red Hat -------------------------------------------------------------
function list_java_commands {
alternatives --display java 2>&1 | grep -v '^[[:space:]]' | awk '/[[:space:]]- priority[[:space:]]/{print $1}'
}
function install_openjdk {
local VERSION="$1"
yum_install java-1.$VERSION.*-openjdk-headless
}
function install_other_java {
local VERSION="$1"
if [[ "$(uname -m)" == "x86_64" ]]; then
local ARCH=linux-x64
else
local ARCH=linux-i586
fi
if [[ "$VERSION" == "7" ]]; then
ORIGIN=$ORACLE_JAVA7_URL
TARGET=$ORACLE_JAVA7_NAME
elif [[ "$VERSION" == "8" ]]; then
ORIGIN=$ORACLE_JAVA8_URL
TARGET=$ORACLE_JAVA8_NAME
else
echo "Unsupported Java version: $VERSION."
return 1
fi
local NEW_JAVA="/usr/java/$TARGET/jre/bin/java"
if test_java_version "$VERSION" "$NEW_JAVA"; then
if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then
return 0
fi
fi
local EXT
local WGET_OPTIONS="-c --no-check-certificate --no-cookies"
local HEADER="Cookie: oraclelicense=accept-securebackup-cookie"
for EXT in "rpm" "tar.gz"; do
local URL="$ORIGIN-$ARCH.$EXT"
local PACKAGE="/tmp/$(basename $URL)"
if wget $WGET_OPTIONS --header "$HEADER" "$URL" -O "$PACKAGE"; then
case "$EXT" in
"rpm")
sudo rpm -i "$PACKAGE"
;;
"tar.gz")
sudo mkdir -p /usr/java && sudo tar -C /usr/java -xzf "$PACKAGE"
;;
*)
echo "Unsupported extension: $EXT"
;;
esac
if test_java_version "$VERSION" "$NEW_JAVA"; then
if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then
return 0
fi
fi
echo "Unable to register installed java."
else
echo "Unable to download java archive: $URL"
fi
done
return 1
}
fi
|
FedericoRessi/networking-odl
|
devstack/setup_java.sh
|
Shell
|
apache-2.0
| 6,410 |
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function getVersion {
cat << EOF | xmllint --noent --shell pom.xml | grep content | cut -f2 -d=
setns pom=http://maven.apache.org/POM/4.0.0
xpath /pom:project/pom:version/text()
EOF
}
current_version=$(getVersion)
major_version=$(expr $current_version : '\(.*\)\..*\..*\-SNAPSHOT')
minor_version=$(expr $current_version : '.*\.\(.*\)\..*\-SNAPSHOT')
bugfix_version=$(expr $current_version : '.*\..*\.\(.*\)-SNAPSHOT')
version="$major_version.$minor_version.0"
echo "Revert the current in-progress release for apache-wicket-$version"
echo ""
echo "Press enter to continue or CTRL-C to abort \c"
read
branch="build/wicket-$version"
tag="wicket-$version"
git checkout wicket-6.x
git branch -D $branch
git tag -d $tag
svn rm https://dist.apache.org/repos/dist/dev/wicket/$version -m "Reverting release $version"
find . -name "*.releaseBackup" -exec rm {} \;
rm release.properties release.txt > /dev/null
echo ""
echo "Cleaned up the release"
echo ""
echo "Don't forget to drop the Maven staging repository"
echo ""
|
mafulafunk/wicket
|
revert-release.sh
|
Shell
|
apache-2.0
| 1,825 |
#!/usr/bin/env bash
#sudo apt-get update
#sudo apt-get install -y git xvfb nodejs npm nodejs-legacy chromium-browser libexif12 iptraf ntp
#cd
#git clone https://github.com/CodeYellowBV/run-headless-chromium.git
#cd run-headless-chromium
#npm install
#cd
#mkdir legion
#cd
sudo find /etc/ntp.conf -type f -print0 | xargs -0 sudo sed -i 's/.ubuntu.pool/.amazon.pool/g'
sleep 2
sudo service ntp stop
sleep 2
sudo service ntp start
sleep 2
|
albertlinde/Legion
|
applications/gdrive_tests/scripts/setup.sh
|
Shell
|
apache-2.0
| 443 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.