code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
# Function sets simulation parameters
function set_simulation_parameters() {
pars=$1
# number of embedded muons from FairBoxGenerator
export LIT_NOF_MUONS_PLUS=${pars[0]}
# number of embedded muons from FairBoxGenerator
export LIT_NOF_MUONS_MINUS=${pars[1]}
# number of embedded electrons from FairBoxGenerator
export LIT_NOF_ELECTRONS=${pars[2]}
# number of embedded positrons from FairBoxGenerator
export LIT_NOF_POSITRONS=${pars[3]}
# number of embedded pions from FairBoxGenerator
export LIT_NOF_PIONS_PLUS=${pars[4]}
# number of embedded pions from FairBoxGenerator
export LIT_NOF_PIONS_MINUS=${pars[5]}
# number of embedded J/Psi decayed to mu+ and mu-
export LIT_NOF_JPSI_TO_MUONS=${pars[6]}
# number of embedded J/Psi decayed to e+ and e-
export LIT_NOF_JPSI_TO_ELECTRONS=${pars[7]}
# number of generated Au ions
export LIT_NOF_AU_IONS=${pars[8]}
# If "yes" than UrQMD will be used as background
export LIT_URQMD=${pars[9]}
# If "yes" than CbmUnigenGenerator will be used instead of FairUrqmdGenerator
export LIT_UNIGEN=${pars[10]}
# If "yes" than PLUTO will be used
export LIT_PLUTO=${pars[11]}
}
# Function sets default file names using a specified DIR and file number
function set_default_file_names() {
dir=$1
XXXX=$2
export LIT_MC_FILE=${dir}/mc.${XXXX}.root
export LIT_PAR_FILE=${dir}/param.${XXXX}.root
export LIT_MVD_DELTA_FILE=${dir}/mc.delta.${XXXX}.root
export LIT_GLOBAL_RECO_FILE=${dir}/global.reco.${XXXX}.root
export LIT_GLOBAL_HITS_FILE=${dir}/global.hits.${XXXX}.root
export LIT_GLOBAL_TRACKS_FILE=${dir}/global.tracks.${XXXX}.root
export LIT_GLOBAL_TRACKS_IDEAL_FILE=${dir}/global.tracks.ideal.${XXXX}.root
export LIT_MVD_RECO_FILE=${dir}/mvd.reco.${XXXX}.root
export LIT_PROP_ANA_FILE=${dir}/propagation.ana.${XXXX}.root
export LIT_RICH_FILE=${dir}/rich.reco.${XXXX}.root
export LIT_ELID_FILE=${dir}/elid.qa.${XXXX}.root
export LIT_FIELD_QA_FILE=${dir}/field.qa.${XXXX}.root
export LIT_FIELDAPR_QA_FILE=${dir}/fieldapr.qa.${XXXX}.root
export LIT_ANALYSIS_FILE=${dir}/analysis.${XXXX}.root
export LIT_QA_FILE=${dir}/qa.${XXXX}.root
}
# Function sets the default muon geometry
function set_default_muon_geometry() {
export LIT_CAVE_GEOM=cave.geo
export LIT_PIPE_GEOM=pipe/pipe_v13c.geo.root
export LIT_SHIELD_GEOM=much/shield_v13f.geo
export LIT_MVD_GEOM=
export LIT_STS_GEOM=sts/sts_v13d.geo.root
export LIT_STS_DIGI=${VMCWORKDIR}/parameters/sts/sts_v13d_std.digi.par
export LIT_STS_MAT_BUDGET_FILE=${VMCWORKDIR}/parameters/sts/sts_matbudget_v13d.root
export LIT_MUCH_GEOM=much/much_v13f.geo
export LIT_MUCH_DIGI=${VMCWORKDIR}/parameters/much/much_v13f.digi.root
export LIT_RICH_GEOM=
export LIT_TRD_GEOM=
export LIT_TRD_DIGI=
export LIT_TOF_GEOM=tof/tof_v13-5e.geo.root
export LIT_TOF_DIGI=${VMCWORKDIR}/parameters/tof/tof_v13-5e.digi.par
export LIT_ECAL_GEOM=
export LIT_FIELD_MAP=field_v12b
export LIT_MAGNET_GEOM=magnet/magnet_v12b.geo.root
export LIT_CONSEQUTIVE_STS_POINTS=1
export LIT_NORM_STS_POINTS=4
export LIT_NORM_TRD_POINTS=0
export LIT_NORM_MUCH_POINTS=16
export LIT_NORM_TOF_POINTS=1
export LIT_NORM_TRD_HITS=0
export LIT_NORM_MUCH_HITS=15
export LIT_NORM_TOF_HITS=1
}
# Function sets the default electron geometry
function set_default_electron_geometry() {
export LIT_CAVE_GEOM=cave.geo
export LIT_PIPE_GEOM=pipe/pipe_v14y.geo.root
export LIT_SHIELD_GEOM=
export LIT_MVD_GEOM=
export LIT_STS_GEOM=sts/sts_v13d.geo.root
export LIT_STS_DIGI=${VMCWORKDIR}/parameters/sts/sts_v13d_std.digi.par
export LIT_STS_MAT_BUDGET_FILE=${VMCWORKDIR}/parameters/sts/sts_matbudget_v13d.root
export LIT_MUCH_GEOM=
export LIT_MUCH_DIGI=
export LIT_RICH_GEOM=rich/rich_v14a.root
export LIT_TRD_GEOM=trd/trd_v15a_3e.geo.root
export LIT_TRD_DIGI=${VMCWORKDIR}/parameters/trd/trd_v15a_3e.digi.par
export LIT_TOF_GEOM=tof/tof_v13-5d.geo.root
export LIT_TOF_DIGI=${VMCWORKDIR}/parameters/tof/tof_v13-5d.digi.par
export LIT_ECAL_GEOM=
export LIT_FIELD_MAP=field_v12b
export LIT_MAGNET_GEOM=magnet/magnet_v12b.geo.root
export LIT_CONSEQUTIVE_STS_POINTS=1
export LIT_NORM_STS_POINTS=4
export LIT_NORM_TRD_POINTS=6
export LIT_NORM_MUCH_POINTS=0
export LIT_NORM_TOF_POINTS=1
export LIT_NORM_TRD_HITS=6
export LIT_NORM_MUCH_HITS=0
export LIT_NORM_TOF_HITS=1
}
# Function set default mvd geometry
function set_default_mvd_geometry() {
export LIT_CAVE_GEOM=cave.geo
export LIT_PIPE_GEOM=pipe/pipe_v14l.root
export LIT_SHIELD_GEOM=
export LIT_MVD_GEOM=mvd/mvd_v14b.geo.root
export LIT_STS_GEOM=sts/sts_v13d.geo.root
export LIT_STS_DIGI=${VMCWORKDIR}/parameters/sts/sts_v13d_std.digi.par
export LIT_STS_MAT_BUDGET_FILE=${VMCWORKDIR}/parameters/sts/sts_matbudget_v13d.root
export LIT_MUCH_GEOM=
export LIT_MUCH_DIGI=
export LIT_RICH_GEOM=
export LIT_TRD_GEOM=
export LIT_TRD_DIGI=
export LIT_TOF_GEOM=
export LIT_TOF_DIGI=
export LIT_ECAL_GEOM=
export LIT_FIELD_MAP=field_v12b
export LIT_MAGNET_GEOM=magnet/magnet_v12b.geo.root
export LIT_CONSEQUTIVE_STS_POINTS=1
export LIT_NORM_STS_POINTS=4
export LIT_NORM_TRD_POINTS=0
export LIT_NORM_MUCH_POINTS=0
export LIT_NORM_TOF_POINTS=0
export LIT_NORM_TRD_HITS=0
export LIT_NORM_MUCH_HITS=0
export LIT_NORM_TOF_HITS=0
}
# Function set default sts geometry
function set_default_sts_geometry() {
export LIT_CAVE_GEOM=cave.geo
export LIT_PIPE_GEOM=pipe/pipe_v13a.geo.root
export LIT_SHIELD_GEOM=
export LIT_MVD_GEOM=
export LIT_STS_GEOM=sts/sts_v13d.geo.root
export LIT_STS_DIGI=${VMCWORKDIR}/parameters/sts/sts_v13d_std.digi.par
export LIT_STS_MAT_BUDGET_FILE=${VMCWORKDIR}/parameters/sts/sts_matbudget_v13d.root
export LIT_MUCH_GEOM=
export LIT_MUCH_DIGI=
export LIT_RICH_GEOM=
export LIT_TRD_GEOM=
export LIT_TRD_DIGI=
export LIT_TOF_GEOM=
export LIT_TOF_DIGI=
export LIT_ECAL_GEOM=
export LIT_FIELD_MAP=field_v12b
export LIT_MAGNET_GEOM=magnet/magnet_v12b.geo.root
export LIT_CONSEQUTIVE_STS_POINTS=1
export LIT_NORM_STS_POINTS=4
export LIT_NORM_TRD_POINTS=0
export LIT_NORM_MUCH_POINTS=0
export LIT_NORM_TOF_POINTS=0
export LIT_NORM_TRD_HITS=0
export LIT_NORM_MUCH_HITS=0
export LIT_NORM_TOF_HITS=0
}
# Function set default sts geometry
function set_default_sts_tof_geometry() {
export LIT_CAVE_GEOM=cave.geo
export LIT_PIPE_GEOM=pipe/pipe_v13a.geo.root
export LIT_SHIELD_GEOM=
export LIT_MVD_GEOM=
export LIT_STS_GEOM=sts/sts_v13d.geo.root
export LIT_STS_DIGI=${VMCWORKDIR}/parameters/sts/sts_v13d_std.digi.par
export LIT_STS_MAT_BUDGET_FILE=${VMCWORKDIR}/parameters/sts/sts_matbudget_v13d.root
export LIT_MUCH_GEOM=
export LIT_MUCH_DIGI=
export LIT_RICH_GEOM=
export LIT_TRD_GEOM=
export LIT_TRD_DIGI=
export LIT_TOF_GEOM=tof/tof_v13b.geo.root
export LIT_TOF_DIGI=${VMCWORKDIR}/parameters/tof/tof_v13b.digi.par
export LIT_ECAL_GEOM=
export LIT_FIELD_MAP=field_v12b
export LIT_MAGNET_GEOM=magnet/magnet_v12b.geo.root
export LIT_CONSEQUTIVE_STS_POINTS=1
export LIT_NORM_STS_POINTS=4
export LIT_NORM_TRD_POINTS=0
export LIT_NORM_MUCH_POINTS=0
export LIT_NORM_TOF_POINTS=1
export LIT_NORM_TRD_HITS=0
export LIT_NORM_MUCH_HITS=0
export LIT_NORM_TOF_HITS=1
}
# Function exports and creates output directories for LIT_DIR
function create_output_dir()
{
export LIT_DIR=$1
rm -r -f ${LIT_DIR}
mkdir -p ${LIT_DIR}
}
# Function exports and creates output directories for LIT_RESULT_DIR
function create_result_dir()
{
export LIT_RESULT_DIR=$1
rm -r -f ${LIT_RESULT_DIR}
mkdir -p ${LIT_RESULT_DIR}
}
|
desdemonda/cbmroot
|
macro/littrack/scripts/common.sh
|
Shell
|
gpl-2.0
| 7,995 |
#!/bin/bash
[ -z ${HOST_IP} ] || exec supervisord
export LC_ALL=en_US.utf8
toilet -f Elite PanteraS | /usr/games/lolcat -f 2>/dev/null || toilet -f Elite PanteraS
cat version
cat << EOF
PanteraS - Platform as a Service
Usage:
$ git clone https://github.com/eBayClassifiedsGroup/PanteraS
$ cd PanteraS
# ------------------------------------------
# Stand Alone (Master & Slave):
$ ./generate_yml.sh
$ docker-compose up -d
# ------------------------------------------
# 3 Masters (without Slaves daemons) mode:
# Repeat on every master docker host, but keep in mind to have ZOOKEEPER_ID uniq:
$ mkdir restricted
$ echo 'ZOOKEEPER_ID=1' >> restricted/host
$ echo 'ZOOKEEPER_HOSTS="master-1:2181,master-2:2181,master-3:2181"' >> restricted/host
$ echo 'CONSUL_HOSTS="-join=master-1 -join=master-2 -join=master-3"' >> restricted/host
$ echo 'MESOS_MASTER_QUORUM=2' >> restricted/host
# On first maser host ONLY set up consul bootstrap
$ echo 'CONSUL_PARAMS="-bootstrap-expect 3"' >> restricted/host
$ SLAVE=false ./generate_yml.sh
$ docker-compose up -d
# ------------------------------------------
# Add Slaves (Master need to be set up before)
# Reapeat on each Slave docker host:
$ mkdir restricted
$ echo 'ZOOKEEPER_HOSTS="master-1:2181,master-2:2181,master-3:2181"' >> restricted/host
$ echo 'CONSUL_HOSTS="-join=master-1 -join=master-2 -join=master-3"' >> restricted/host
$ echo 'MASTER=false' >> restricted/host
$ ./generate_yml.sh
$ docker-compose up -d
more info on github
EOF
|
Kosta-Github/PanteraS
|
infrastructure/supervisord.sh
|
Shell
|
gpl-2.0
| 1,493 |
#!/bin/bash
if [ whoami!="root" ]
then
echo "Droit root requis"
return 0
fi
## Gestionnaire mdp
apt-get install -y keepassx
#
##
###OpenSSH
##
#
apt-get install openssh-server
cp -fv Config/sshd_config /etc/ssh/sshd_config
#cp -fv Config/issue.net /etc/issue.net
# #
## ##
###Iptable###
## ##
# #
#On copie le fichier de configue du parfeu
cp -fv firewall.sh /etc/init.d/firewall.sh
#On execute le script
chmod +x /etc/init.d/firewall.sh
/etc/init.d/firewall.sh
#On le définit comme regle par défaut du parfeu
update-rc.d firewall.sh defaults
# #
## ##
###Portsentry###
## ##
# #
#Lutte contre le scan de ports
apt-get install portsentry
#On copie les fichiers de config
cp -fv Config/portsentry.conf /etc/portsentry/portsentry.conf
#On execute protsentry (udp et tcp)
portsentry –audp
portsentry –atcp
# #
## ##
###Fail2ban###
## ##
# #
#Utilitaire se basant sur les logs pour lutter contre diverse méthodes d'intrusions(brute-fvorce, dictionnaire, déni de service)
apt-get install fail2ban
#Copie des fichiers de config
cp -fv Config/jail.conf /etc/fail2ban/jail.conf
cp -fv Config/apache-w00tw00t.conf /etc/fail2ban/filter.d/apache-w00tw00t.conf
#On recharge la nouvelle config
/etc/init.d/fail2ban restart
# #
## ##
###Rkhunter###
## ##
# #
#Detecte les rookits
apt-get install rkhunter
#Copie fichiers config
cp -fv Config/rkhunter /etc/default/rkhunter
# #
## ##
###Logwatch###
## ##
# #
#Resume les logs
apt-get install logwatch
#Copie des fichiers de config
cp -fv Config/logwatch.conf /usr/share/logwatch/default.conf/logwatch.conf
#
##
###ClamAv
##
#
add-apt-repository ppa:ubuntu-clamav/ppa
apt-get install clamav
#on met à jour les signatures
freshclam
#On charge le script de maj auto
cp -fv clamav.sh /etc/cron.daily/clamav.sh
chmod 700 /etc/cron.daily/clamav.sh
service apache2 restart
apt-get update
apt-get upgrade
|
severus21/Install
|
Secure/install.sh
|
Shell
|
gpl-2.0
| 2,124 |
#!/bin/bash
#
# PM-QA validation test suite for the power management on Linux
#
# Copyright (C) 2011, Linaro Limited.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Contributors:
# Torez Smith <[email protected]> (IBM Corporation)
# - initial API and implementation
#
###
# Determine which CPUidle P States are defined on this system by
# cycling through the sysfs files for cpufreq.
###
echo `cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies`
|
wanghao-xznu/vte
|
testcases/third_party_suite/linaro-pm-qa/testcases/proof_o_concept/PM-list_p_states.sh
|
Shell
|
gpl-2.0
| 1,153 |
#!/bin/bash
# Copyright (C) 2011,2012 Freescale Semiconductor, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @file pmic_bat_charging.sh
#
# @brief shell script for battery charging test.
#
#Revision History:
#Author Date Description of Changes
#Hake 2011/04/25 init pmic bat test
#Spring 2012/06/28 Add support for mx6 max8903
#------------------------- ------------ -----------------------
#
# Function: setup
#
# Description: - Check if required commands exits
# - Export global variables
# - Check if required config files exits
# - Create temporary files and directories
#
# Return - zero on success
# - non zero on failure. return value from commands ($RC)
setup()
{
#TODO Total test case
export TST_TOTAL=3
export TCID="setup"
export TST_COUNT=0
RC=1
trap "cleanup" 0
platfm.sh || platfm_id=$?
if [ $platfm_id -eq 50 ]; then
modprobe mc34708_battery
AC_CHARGER=/sys/class/power_supply/*aux_charger/online
USB_CHARGER=/sys/class/power_supply/*usb_charger/online
BATTERY=/sys/class/power_supply/*_bat/status
else
AC_CHARGER=/sys/class/power_supply/*-ac/online
USB_CHARGER=/sys/class/power_supply/*-usb/online
BATTERY=/sys/class/power_supply/*-charger/status
fi
RC=0
#TODO add setup scripts
return $RC
}
# Function: cleanup
#
# Description - remove temporary files and directories.
#
# Return - zero on success
# - non zero on failure. return value from commands ($RC)
cleanup()
{
RC=0
#TODO add cleanup code here
modprobe -r mc34708_battery
return $RC
}
# Function: test_case_01
# Description - Test if basic charging is OK
#
test_case_01()
{
#TODO give TCID
TCID="test_aux_charging"
#TODO give TST_COUNT
TST_COUNT=1
RC=0
#print test info
tst_resm TINFO "test $TST_COUNT: $TCID "
#test list
clear
echo "please ensure you boot from battery"
read -p "please ensure the dc 5v is switch on, press Enter to continue"
u_online=$(cat $AC_CHARGER)
if [ "$u_online" = "0" ]; then
RC=$(expr $RC + 1)
fi
sleep 2
bat_status=$(cat $BATTERY)
if [ "$bat_status" != "Charging" ] || [ "$bat_status" != "Full" ] ;then
RC=$(expr $RC + 1)
fi
if [ $platfm_id -eq 50 ]; then
bat_charge_current=$(cat /sys/class/power_supply/ripley_bat/charge_now | grep "-")
if [ -z "$bat_charge_current" ]; then
RC=$(expr $RC + 1)
fi
fi
return $RC
}
# Function: test_case_02
# Description - Test if basic charging is OK
#
test_case_02()
{
#TODO give TCID
TCID="test_usb_charging"
#TODO give TST_COUNT
TST_COUNT=2
RC=0
#print test info
tst_resm TINFO "test $TST_COUNT: $TCID "
#test list
clear
echo "please ensure you boot from battery"
read -p "please ensure the usb 5v is switch on, press Enter to continue"
sleep 2
set -x
u_online=$(cat $USB_CHARGER)
if [ "$u_online" = "0" ]; then
RC=$(expr $RC + 1)
fi
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Charging" ] || [ "$bat_status" = "Full" ] || RC=$(expr $RC + 1)
if [ $platfm_id -eq 50 ]; then
bat_charge_current=$(cat /sys/class/power_supply/ripley_bat/charge_now | grep "-")
if [ -z "$bat_charge_current" ]; then
RC=$(expr $RC + 1)
fi
fi
set +x
return $RC
}
test_case_03()
{
#TODO give TCID
TCID="Test_Charging_Standby"
#TODO give TST_COUNT
TST_COUNT=3
RC=0
power_type=$1
#print test info
tst_resm TINFO "test $TST_COUNT: $TCID "
#test list
clear
echo "please ensure you boot from battery"
if [ "$power_type" = "ac" ]; then
read -p "please ensure the AC 5v is in, press Enter to continue"
else
read -p "please ensure the usb 5v is in, press Enter to continue"
fi
sleep 2
set -x
u_online=$(cat $USB_CHARGER)
if [ "$power_type" = "ac" ] ; then
u_online=$(cat $AC_CHARGER)
fi
if [ "$u_online" = "0" ]; then
RC=$(expr $RC + 1)
fi
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Charging" ] || [ "$bat_status" = "Full" ] ||RC=$(expr $RC + 1)
if [ $platfm_id -eq 50 ]; then
bat_charge_current=$(cat /sys/class/power_supply/ripley_bat/charge_now | grep "-")
if [ -z "$bat_charge_current" ]; then
RC=$(expr $RC + 1)
fi
fi
rtc_testapp_6 -T 50
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Charging" ] || [ "$bat_status" = "Full" ] ||RC=$(expr $RC + 1)
if [ $platfm_id -eq 50 ]; then
bat_charge_current=$(cat /sys/class/power_supply/ripley_bat/charge_now | grep "-")
if [ -z "$bat_charge_current" ]; then
RC=$(expr $RC + 1)
fi
fi
set +x
return $RC
}
test_case_04()
{
TCID="BATTERY_PWR_SWITCH"
TST_COUNT=4
RC=0
#print test info
tst_resm TINFO "test $TST_COUNT: $TCID "
#test list
# check AC power plugin
clear
echo "please ensure you boot from battery"
read -p "please ensure the AC 5v is switch on, press Enter to continue"
sleep 2
set -x
tst_resm TINFO "check AC power plugin"
ac_online=$(cat $AC_CHARGER)
if [ "$ac_online" != "1" ]; then
RC=$(expr $RC + 1)
tst_resm TFAIL "AC online status is wrong"
fi
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Charging" ] || [ "$bat_status" = "Full" ] || RC=$(expr $RC + 1)
read -p "please plug out the AC 5v, press Enter to continue"
tst_resm TINFO "check AC power plugout"
ac_online=$(cat $AC_CHARGER)
if [ "$ac_online" != "0" ]; then
RC=$(expr $RC + 1)
tst_resm TFAIL "AC online status is wrong"
fi
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Discharging" ] || RC=$(expr $RC + 1)
read -p "please plug in USB 5v, press Enter to continue"
tst_resm TINFO "check USB power plugin"
u_online=$(cat $USB_CHARGER)
if [ "$u_online" != "1" ]; then
RC=$(expr $RC + 1)
tst_resm TFAIL "USB power online status is wrong"
fi
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Charging" ] || [ "$bat_status" = "Full" ] || RC=$(expr $RC + 1)
read -p "please plug out USB 5v, press Enter to continue"
tst_resm TINFO "check USB power plugout"
u_online=$(cat $USB_CHARGER)
if [ "$u_online" != "0" ]; then
RC=$(expr $RC + 1)
tst_resm TFAIL "USB power online status is wrong"
fi
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Discharging" ] || RC=$(expr $RC + 1)
read -p "please plug in AC 5v and USB 5v, press Enter to continue"
tst_resm TINFO "check both AC and USB power plugin"
ac_online=$(cat $AC_CHARGER)
if [ "$ac_online" != "1" ]; then
RC=$(expr $RC + 1)
tst_resm TFAIL "AC online status is wrong"
fi
u_online=$(cat $USB_CHARGER)
if [ "$u_online" != "1" ]; then
RC=$(expr $RC + 1)
tst_resm TFAIL "USB power online status is wrong"
fi
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Charging" ] || [ "$bat_status" = "Full" ] || RC=$(expr $RC + 1)
read -p "please plug out AC 5v and USB 5v, press Enter to continue"
tst_resm TINFO "check both AC and USB power plugout"
ac_online=$(cat $AC_CHARGER)
if [ "$ac_online" != "0" ]; then
RC=$(expr $RC + 1)
tst_resm TFAIL "AC online status is wrong"
fi
u_online=$(cat $USB_CHARGER)
if [ "$u_online" != "0" ]; then
RC=$(expr $RC + 1)
tst_resm TFAIL "USB power online status is wrong"
fi
bat_status=$(cat $BATTERY)
[ "$bat_status" = "Discharging" ] || RC=$(expr $RC + 1)
set +x
return $RC
}
usage()
{
echo "1: aux charging test"
echo "2: usb charging test"
echo "3: charging suspend test on USB power"
echo "3 ac: charging suspend test on AC power"
echo "4: Different power source charging switch test"
exit 1
}
# main function
RC=0
setup || exit $RC
case "$1" in
1)
test_case_01 || exit $RC
;;
2)
test_case_02 || exit $RC
;;
3)
test_case_03 $2 || exit $RC
;;
4)
test_case_04 || exit $RC
;;
*)
usage
;;
esac
tst_resm TINFO "Test PASS"
|
wanghao-xznu/vte
|
testcases/vte_tests_suite/pmic_tests/scripts/pmic_bat_charging.sh
|
Shell
|
gpl-2.0
| 9,010 |
#!/usr/bin/env bash
set -e
externalExecutableExists() {
local executable="$1"
if builtin type "$executable" &>/dev/null;
then
return 0
else
return 1
fi
}
getFirstExecutable() {
debug "Finding first executable in \$PATH out of '$@'"
local path=""
for executable in "$@";
do
if externalExecutableExists "$executable";
then
path="$executable"
break
fi
done
debug "First executable is '${path}'"
echo -nE "$path"
}
# External player detection.
# TODO: confirm and expand list.
externalPlayerExec="$(getFirstExecutable "afplay" "mplayer" "mpg123" "mpg321" "play")"
# External player detection.
# TODO: confirm and expand list.
externalShuffleExec="$(getFirstExecutable "shuf" "gshuf")"
# From https://github.com/EtiennePerot/parcimonie.sh/blob/master/parcimonie.sh
# Test for GNU `sed`, or use a `sed` fallback in sedExtRegexp
sedExec=(sed)
if [ "$(echo 'abc' | sed -r 's/abc/def/' 2> /dev/null || true)" == 'def' ]; then
# GNU Linux sed
sedExec+=(-r)
else
# Mac OS X sed
sedExec+=(-E)
fi
sedExtRegexp() {
"${sedExec[@]}" "$@"
}
fswatchExec="$(getFirstExecutable "fswatch" "/usr/local/bin/fswatch")"
waitForFileChange() {
# https://github.com/emcrisostomo/fswatch
# Should cover all systems
[[ -z "$fswatchExec" ]] && die "could not find 'fswatch'"
debug "Waiting for change in '$@': '$watched'"
local watched=$("$fswatchExec" --one-event "$@")
debug "Detected change in '$@': '$watched'"
}
# https://stackoverflow.com/questions/17878684/best-way-to-get-file-modified-time-in-seconds
# https://stackoverflow.com/a/17907126/
if stat -c %Y . >/dev/null 2>&1; then
get_modified_time() { stat -c %Y "$1" 2>/dev/null; }
elif stat -f %m . >/dev/null 2>&1; then
get_modified_time() { stat -f %m "$1" 2>/dev/null; }
elif date -r . +%s >/dev/null 2>&1; then
get_modified_time() { stat -r "$1" +%s 2>/dev/null; }
else
echo 'get_modified_time() is unsupported' >&2
get_modified_time() { printf '%s' 0; }
fi
getLastFileModifiedTime() {
echo $(get_modified_time "$1")
}
|
joelpurra/npshell
|
src/shared/functions/cross-platform.sh
|
Shell
|
gpl-3.0
| 2,030 |
#!/bin/sh
if [ -z "$LDB_SPECIALS" ]; then
LDB_SPECIALS=1
export LDB_SPECIALS
fi
echo "LDB_URL: $LDB_URL"
echo "Adding base elements"
$VALGRIND ldbadd $LDBDIR/tests/test.ldif || exit 1
echo "Adding again - should fail"
$VALGRIND ldbadd $LDBDIR/tests/test.ldif 2> /dev/null && {
echo "Should have failed to add again - gave $?"
exit 1
}
echo "Adding LDIF with one already-existing user again - should fail"
$VALGRIND ldbadd $LDBDIR/tests/test-dup.ldif 2> /dev/null && {
echo "Should have failed to add again - gave $?"
exit 1
}
echo "Adding again - should succeed (as previous failed)"
$VALGRIND ldbadd $LDBDIR/tests/test-dup-2.ldif || exit 1
echo "Modifying elements"
$VALGRIND ldbmodify $LDBDIR/tests/test-modify.ldif || exit 1
echo "Modify LDIF with one un-met constraint - should fail"
$VALGRIND ldbadd $LDBDIR/tests/test-modify-unmet.ldif 2> /dev/null && {
echo "Should have failed to modify - gave $?"
exit 1
}
echo "Modify LDIF with after failure of un-met constraint - should also fail"
$VALGRIND ldbadd $LDBDIR/tests/test-modify-unmet-2.ldif 2> /dev/null && {
echo "Should have failed to modify - gave $?"
exit 1
}
echo "Showing modified record"
$VALGRIND ldbsearch '(uid=uham)' || exit 1
echo "Rename entry with ldbmodify - modrdn"
$VALGRIND ldbmodify $LDBDIR/tests/test-modify-modrdn.ldif || exit 1
echo "Rename entry with ldbrename"
OLDDN="cn=Ursula Hampster,ou=Alumni Association,ou=People,o=University of Michigan,c=TEST"
NEWDN="cn=Hampster Ursula,ou=Alumni Association,ou=People,o=University of Michigan,c=TEST"
$VALGRIND ldbrename "$OLDDN" "$NEWDN" || exit 1
echo "Showing renamed record"
$VALGRIND ldbsearch '(uid=uham)' || exit 1
echo "Starting ldbtest"
$VALGRIND ldbtest --num-records 100 --num-searches 10 || exit 1
if [ $LDB_SPECIALS = 1 ]; then
echo "Adding index"
$VALGRIND ldbadd $LDBDIR/tests/test-index.ldif || exit 1
fi
echo "Adding bad attributes - should fail"
$VALGRIND ldbadd $LDBDIR/tests/test-wrong_attributes.ldif && {
echo "Should fhave failed - gave $?"
exit 1
}
echo "Testing indexed search"
$VALGRIND ldbsearch '(uid=uham)' || exit 1
$VALGRIND ldbsearch '(&(objectclass=person)(objectclass=person)(objectclass=top))' || exit 1
$VALGRIND ldbsearch '(&(uid=uham)(uid=uham))' || exit 1
$VALGRIND ldbsearch '(|(uid=uham)(uid=uham))' || exit 1
$VALGRIND ldbsearch '(|(uid=uham)(uid=uham)(objectclass=OpenLDAPperson))' || exit 1
$VALGRIND ldbsearch '(&(uid=uham)(uid=uham)(!(objectclass=xxx)))' || exit 1
$VALGRIND ldbsearch '(&(objectclass=person)(uid=uham)(!(uid=uhamxx)))' uid \* \+ dn || exit 1
$VALGRIND ldbsearch '(&(uid=uham)(uid=uha*)(title=*))' uid || exit 1
echo "Testing invalid search expression"
$VALGRIND ldbsearch '(&(uid=uham)(title=foo\blah))' uid && exit 1
# note that the "((" is treated as an attribute not an expression
# this matches the openldap ldapsearch behaviour of looking for a '='
# to see if the first argument is an expression or not
$VALGRIND ldbsearch '((' uid || exit 1
$VALGRIND ldbsearch '(objectclass=)' uid || exit 1
$VALGRIND ldbsearch -b 'cn=Hampster Ursula,ou=Alumni Association,ou=People,o=University of Michigan,c=TEST' -s base "" sn || exit 1
echo "Test wildcard match"
$VALGRIND ldbadd $LDBDIR/tests/test-wildcard.ldif || exit 1
$VALGRIND ldbsearch '(cn=test*multi)' || exit 1
$VALGRIND ldbsearch '(cn=*test*multi*)' || exit 1
$VALGRIND ldbsearch '(cn=*test_multi)' || exit 1
$VALGRIND ldbsearch '(cn=test_multi*)' || exit 1
$VALGRIND ldbsearch '(cn=test*multi*test*multi)' || exit 1
$VALGRIND ldbsearch '(cn=test*multi*test*multi*multi_*)' || exit 1
echo "Starting ldbtest indexed"
$VALGRIND ldbtest --num-records 100 --num-searches 500 || exit 1
echo "Testing one level search"
count=`$VALGRIND ldbsearch -b 'ou=Groups,o=University of Michigan,c=TEST' -s one 'objectclass=*' none |grep '^dn' | wc -l`
if [ $count != 3 ]; then
echo returned $count records - expected 3
exit 1
fi
echo "Testing binary file attribute value"
$VALGRIND ldbmodify $LDBDIR/tests/photo.ldif || exit 1
count=`$VALGRIND ldbsearch '(cn=Hampster Ursula)' jpegPhoto | grep '^dn' | wc -l`
if [ $count != 1 ]; then
echo returned $count records - expected 1
exit 1
fi
echo "*TODO* Testing UTF8 upper lower case searches !!"
echo "Testing compare"
count=`$VALGRIND ldbsearch '(cn>=t)' cn | grep '^dn' | wc -l`
if [ $count != 2 ]; then
echo returned $count records - expected 2
echo "this fails on openLdap ..."
fi
count=`$VALGRIND ldbsearch '(cn<=t)' cn | grep '^dn' | wc -l`
if [ $count != 13 ]; then
echo returned $count records - expected 13
echo "this fails on openLdap ..."
fi
checkcount() {
count=$1
scope=$2
basedn=$3
expression="$4"
n=`$VALGRIND ldbsearch -s "$scope" -b "$basedn" "$expression" | grep '^dn' | wc -l`
if [ $n != $count ]; then
echo "Got $n but expected $count for $expression"
bin/ldbsearch "$expression"
exit 1
fi
echo "OK: $count $expression"
}
checkcount 0 'base' '' '(uid=uham)'
checkcount 0 'one' '' '(uid=uham)'
checkcount 1 'base' 'cn=Hampster Ursula,ou=Alumni Association,ou=People,o=University of Michigan,c=TEST' '(uid=uham)'
checkcount 1 'one' 'ou=Alumni Association,ou=People,o=University of Michigan,c=TEST' '(uid=uham)'
checkcount 1 'one' 'ou=People,o=University of Michigan,c=TEST' '(ou=ldb test)'
|
amitay/samba
|
lib/ldb/tests/test-generic.sh
|
Shell
|
gpl-3.0
| 5,345 |
#!/bin/bash
set +x
HOSTAPD_CFG="/etc/hostapd/wired.conf"
EAP_USERS_FILE="/etc/hostapd/hostapd.eap_user"
HOSTAPD_KEYS_PATH="/etc/hostapd/ssl"
CLIENT_KEYS_PATH="/tmp/certs"
function start_dnsmasq ()
{
echo "Start DHCP server (dnsmasq)"
local dnsmasq="/usr/sbin/dnsmasq"
echo "Start auth DHCP server (dnsmasq)"
$dnsmasq\
--pid-file=/tmp/dnsmasq_wired.pid\
--conf-file\
--no-hosts\
--bind-interfaces\
--except-interface=lo\
--interface=test8Y\
--clear-on-reload\
--strict-order\
--listen-address=10.0.253.1\
--dhcp-range=10.0.253.10,10.0.253.200,10m\
--dhcp-option=option:router,10.0.253.1\
--dhcp-leasefile=/var/lib/dnsmasq/hostapd.leases \
--dhcp-lease-max=190
echo "Start noauth DHCP server (dnsmasq)"
$dnsmasq\
--pid-file=/tmp/dnsmasq_wired_noauth.pid\
--conf-file\
--no-hosts\
--bind-interfaces\
--except-interface=lo\
--interface=test8Z\
--clear-on-reload\
--strict-order\
--listen-address=10.0.254.1\
--dhcp-range=10.0.254.10,10.0.254.200,2m\
--dhcp-option=option:router,10.0.254.1\
--dhcp-leasefile=/var/lib/dnsmasq/hostapd.leases \
--dhcp-lease-max=190
}
function write_hostapd_cfg ()
{
echo "# Hostapd configuration for 802.1x client testing
interface=test8Y
driver=wired
ieee8021x=1
eap_reauth_period=3600
eap_server=1
use_pae_group_addr=1
eap_user_file=$EAP_USERS_FILE
ca_cert=$HOSTAPD_KEYS_PATH/hostapd.ca.pem
dh_file=$HOSTAPD_KEYS_PATH/hostapd.dh.pem
server_cert=$HOSTAPD_KEYS_PATH/hostapd.cert.pem
private_key=$HOSTAPD_KEYS_PATH/hostapd.key.enc.pem
private_key_passwd=redhat" > $HOSTAPD_CFG
# Create a list of users for network authentication, authentication types, and corresponding credentials.
echo "# Create hostapd peap user file
# Phase 1 authentication
\"user\" MD5 \"password\"
\"test\" TLS,TTLS,PEAP
# this is for doc_procedures, not to require anonymous identity to be set
\"TESTERS\\test_mschapv2\" TLS,TTLS,PEAP
# Phase 2 authentication (tunnelled within EAP-PEAP or EAP-TTLS)
\"TESTERS\\test_mschapv2\" MSCHAPV2 \"password\" [2]
\"test_md5\" MD5 \"password\" [2]
\"test_gtc\" GTC \"password\" [2]
# Tunneled TLS and non-EAP authentication inside the tunnel.
\"test_ttls\" TTLS-PAP,TTLS-CHAP,TTLS-MSCHAP,TTLS-MSCHAPV2 \"password\" [2]" > $EAP_USERS_FILE
}
function copy_certificates ()
{
# Copy certificates to correct places
[ -d $HOSTAPD_KEYS_PATH ] || mkdir -p $HOSTAPD_KEYS_PATH
/bin/cp -rf $CERTS_PATH/server/hostapd* $HOSTAPD_KEYS_PATH
[ -d $CLIENT_KEYS_PATH ] || mkdir -p $CLIENT_KEYS_PATH
/bin/cp -rf $CERTS_PATH/client/test_user.* $CLIENT_KEYS_PATH
/bin/cp -rf $CERTS_PATH/client/test_user.ca.pem /etc/pki/ca-trust/source/anchors
chown -R test:test $CLIENT_KEYS_PATH
update-ca-trust extract
}
function release_between () {
rel_min="release $1"
rel_max="release $2"
rel="$(grep -o 'release [0-9.]*' /etc/redhat-release)"
vers="$(echo -e "$rel_min\n$rel_max\n$rel" | sort -V)"
[ "$rel_min" == "$(echo "$vers" | head -n1)" ] || return 1
[ "$rel_max" == "$(echo "$vers" | tail -n1)" ] || return 1
return 0
}
function start_nm_hostapd ()
{
if grep -q 'Stream release 8' /etc/redhat-release || release_between 8.4 8.999; then
local policy_file="tmp/selinux-policy/hostapd_wired_8.pp"
if ! [ -f "/tmp/hostapd_wired_selinux" ] ; then
touch "/tmp/hostapd_wired_selinux"
semodule -i $policy_file || echo "ERROR: unable to load selinux policy !!!"
fi
fi
if release_between 9.0 9.999; then
local policy_file="tmp/selinux-policy/hostapd_wired_9.pp"
if ! [ -f "/tmp/hostapd_wired_selinux" ] ; then
touch "/tmp/hostapd_wired_selinux"
semodule -i $policy_file || echo "ERROR: unable to load selinux policy !!!"
fi
fi
local hostapd="hostapd -ddd $HOSTAPD_CFG"
systemd-run --unit nm-hostapd $hostapd
sleep 5
}
function wired_hostapd_check ()
{
need_setup=0
echo "* Checking hostapd"
if [ ! -e /tmp/nm_8021x_configured ]; then
echo "Not OK!!"
need_setup=1
fi
echo "* Checking auth dnsmasqs"
pid=$(cat /tmp/dnsmasq_wired.pid)
if ! pidof dnsmasq |grep -q $pid; then
echo "Not OK!!"
need_setup=1
fi
echo "* Checking noauth dnsmasqs"
pid=$(cat /tmp/dnsmasq_wired_noauth.pid)
if ! pidof dnsmasq |grep -q $pid; then
echo "Not OK!!"
need_setup=1
fi
echo "* Checking hostapd-wired"
#pid=$(cat /tmp/hostapd_wired.pid)
if ! systemctl is-active nm-hostapd; then
echo "Not OK!!"
need_setup=1
fi
echo "* Checking test8Y"
if ! nmcli device show test8Y | grep -qw connected; then
echo "Not OK!!"
need_setup=1
fi
echo "* Checking test8Z"
if ! nmcli device show test8Z | grep -qw connected; then
echo "Not OK!!"
need_setup=1
fi
if [ $need_setup -eq 1 ]; then
rm -rf /tmp/nm_8021x_configured
wired_hostapd_teardown
return 1
fi
return 0
}
function prepare_test_bed ()
{
# Create 2 Veth interface pairs and a bridge between their peers.
ip link add test8Y type veth peer name test8Yp
ip link add test8X type veth peer name test8Xp
ip link add name test8X_bridge type bridge
ip link set dev test8X_bridge up
ip link set test8Xp master test8X_bridge
ip link set test8Yp master test8X_bridge
# Up everything
ip link set dev test8X up
ip link set dev test8Xp up
ip link set dev test8Y up
ip link set dev test8Yp up
# Create additional default down non protected network
ip link add test8Z type veth peer name test8Zp
ip link set test8Zp master test8X_bridge
ip link set dev test8Z up
# Create a connections which (in cooperation with dnsmasq) provide DHCP functionlity
# Auth one
nmcli connection add type ethernet con-name DHCP_test8Y ifname test8Y ip4 10.0.253.1/24
sleep 1
nmcli connection up id DHCP_test8Y
# Non auth one
nmcli connection add type ethernet con-name DHCP_test8Z ifname test8Z ip4 10.0.254.1/24
sleep 1
nmcli connection up id DHCP_test8Z
# Note: Adding an interface to a bridge will cause the interface to lose its existing IP address.
# If you're connected remotely via the interface you intend to add to the bridge,
# you will lose your connection. That's why eth0 is never used in a bridge.
# Allow 802.1x packets to be forwarded through the bridge
# Enable forwarding of EAP 802.1x messages through software bridge "test8X_bridge".
# Note: without this capability the testing scenario fails.
echo 8 > /sys/class/net/test8X_bridge/bridge/group_fwd_mask
}
function wired_hostapd_setup ()
{
set +x
echo "Configuring hostapd 802.1x server..."
if wired_hostapd_check; then
echo "OK. Configuration has already been done."
return 0
fi
prepare_test_bed
write_hostapd_cfg
copy_certificates
set -e
# Start 802.1x authentication and built-in RADIUS server.
# Start hostapd as a service via systemd-run using 802.1x configuration
start_dnsmasq
start_nm_hostapd
pid=$(cat /tmp/dnsmasq_wired.pid)
if ! pidof dnsmasq | grep -q $pid; then
echo "Error. Cannot start auth dnsmasq as DHCP server." >&2
return 1
fi
pid=$(cat /tmp/dnsmasq_wired_noauth.pid)
if ! pidof dnsmasq | grep -q $pid; then
echo "Error. Cannot start noauth dnsmasq as DHCP server." >&2
return 1
fi
#pid=$(cat /tmp/hostapd_wired.pid)
if ! systemctl is-active nm-hostapd; then
echo "Error. Cannot start hostapd." >&2
return 1
fi
touch /tmp/nm_8021x_configured
}
function wired_hostapd_teardown ()
{
set -x
if systemctl --quiet is-failed nm-hostapd; then
systemctl reset-failed nm-hostapd
fi
systemctl stop nm-hostapd
kill $(cat /tmp/dnsmasq_wired.pid)
kill $(cat /tmp/dnsmasq_wired_noauth.pid)
#kill $(cat /tmp/hostapd_wired.pid)
ip netns del 8021x_ns
ip link del test8Yp
ip link del test8Xp
ip link del test8Zp
ip link del test8X_bridge
nmcli con del DHCP_test8Y DHCP_test8Z test8X_bridge
rm -rf /tmp/nm_8021x_configured
}
if [ "$1" != "teardown" ]; then
# If hostapd's config fails then restore initial state.
echo "Configure and start hostapd..."
CERTS_PATH=${1:?"Error. Path to certificates is not specified."}
wired_hostapd_setup $1; RC=$?
if [ $RC -eq 0 ]; then
echo "hostapd started successfully."
else
echo "Error. Failed to start hostapd." >&2
exit 1
fi
else
wired_hostapd_teardown
echo "System's state returned prior to hostapd's config."
fi
|
NetworkManager/NetworkManager-ci
|
prepare/hostapd_wired.sh
|
Shell
|
gpl-3.0
| 8,848 |
#!/bin/bash
echo "installing vim-judy..."
if which apt-get > /dev/null
then
sudo apt-get install -y ctags build-essential cmake
go get -u github.com/jstemmer/gotags
elif which brew > /dev/null
then
brew install ctags gotags
fi
mv -f ~/.vim ~/.vim_old > /dev/null 2>&1
mv -f ~/.vimrc ~/.vimrc_old > /dev/null 2>&1
git clone https://github.com/fkysly/vim-judy.git vim-judy
cd vim-judy
cp .vimrc ~
mkdir ~/.vim
cp -R ./colors ~/.vim
curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
vim -c "PlugInstall" -c "q" -c "q"
cd ..
rm -rf ./vim-judy
echo "Done!"
|
fkysly/vim-judy
|
install.sh
|
Shell
|
gpl-3.0
| 636 |
dataDir="data/measurements"
plotDir="data/plots"
concatDir="data/concat"
concatFitDir="data/concatFits"
aggrDir="data/aggr"
aggrFitDir="data/aggrFits"
plotsAllDir="all"
# # # # # # # # # # # # # #
# .aggr
# # # # # # # # # # # # # #
# 1: SIZE
# 2: INIT
# 3: ADD_SUCCESS
# 4: ADD_FAILURE
# 5: RANDOM_ELEMENT
# 6: SIZE
# 7: ITERATE
# 8: CONTAINS_SUCCESS
# 9: CONTAINS_FAILURE
# 10: GET_SUCCESS
# 11: GET_FAILURE
# 12: REMOVE_SUCCESS
# 13: REMOVE_FAILURE
# # # # # # # # # # # # # #
# # # # # # # # # # # # # #
# .dat
# # # # # # # # # # # # # #
# 1: size
# 2: avg
# 3: min
# 4: max
# 5: med
# 6: var
# 7: varLow
# 8: varUp
# # # # # # # # # # # # # #
operations=(INIT ADD_SUCCESS ADD_FAILURE RANDOM_ELEMENT SIZE ITERATE CONTAINS_SUCCESS CONTAINS_FAILURE GET_SUCCESS GET_FAILURE REMOVE_SUCCESS REMOVE_FAILURE)
# dataStructures=(DArray DArrayList DHashSet DHashMap DHashTable)
# DArray
# DArrayList
# DHashMap
# DHashSet
# DHashTable
# dataStructures=(DLinkedList DHashArrayList)
# DLinkedList
# DHashArrayList
# dataStructures=(DArrayDeque DHashMultimap DLinkedHashMultimap DEmpty)
# DArrayDeque
# DHashMultimap
# DLinkedHashMultimap
# DEmpty
dataStructures=(DArray DArrayList DHashSet DHashMap DHashTable DLinkedList DHashArrayList)
dataTypes=(Node Edge)
|
BenjaminSchiller/DNA.gdsMeasurements
|
analysis/config.sh
|
Shell
|
gpl-3.0
| 1,273 |
#!/bin/bash
# korvaa tämä omalla käyttäjätunnuksellasi
USERNAME="villtann"
# korvaa tämä haluamallasi kansion nimellä
PROJECT_FOLDER="muistilista"
# sovelluksesi tulee sijaitsemaan osoitteessa USERNAME.users.cs.helsinki.fi/PROJECT_FOLDER
|
Yskinator/Tsoha-Bootstrap
|
config/environment.sh
|
Shell
|
gpl-3.0
| 247 |
#!/bin/bash -f
# Vivado (TM) v2016.4 (64-bit)
#
# Filename : blk_mem_gen_0.sh
# Simulator : Synopsys Verilog Compiler Simulator
# Description : Simulation script for compiling, elaborating and verifying the project source files.
# The script will automatically create the design libraries sub-directories in the run
# directory, add the library logical mappings in the simulator setup file, create default
# 'do/prj' file, execute compilation, elaboration and simulation steps.
#
# Generated by Vivado on Thu Dec 22 18:04:56 +0800 2016
# IP Build 1731160 on Wed Dec 14 23:47:21 MST 2016
#
# usage: blk_mem_gen_0.sh [-help]
# usage: blk_mem_gen_0.sh [-lib_map_path]
# usage: blk_mem_gen_0.sh [-noclean_files]
# usage: blk_mem_gen_0.sh [-reset_run]
#
# Prerequisite:- To compile and run simulation, you must compile the Xilinx simulation libraries using the
# 'compile_simlib' TCL command. For more information about this command, run 'compile_simlib -help' in the
# Vivado Tcl Shell. Once the libraries have been compiled successfully, specify the -lib_map_path switch
# that points to these libraries and rerun export_simulation. For more information about this switch please
# type 'export_simulation -help' in the Tcl shell.
#
# You can also point to the simulation libraries by either replacing the <SPECIFY_COMPILED_LIB_PATH> in this
# script with the compiled library directory path or specify this path with the '-lib_map_path' switch when
# executing this script. Please type 'blk_mem_gen_0.sh -help' for more information.
#
# Additional references - 'Xilinx Vivado Design Suite User Guide:Logic simulation (UG900)'
#
# ********************************************************************************************************
# Directory path for design sources and include directories (if any) wrt this path
ref_dir="."
# Override directory with 'export_sim_ref_dir' env path value if set in the shell
if [[ (! -z "$export_sim_ref_dir") && ($export_sim_ref_dir != "") ]]; then
ref_dir="$export_sim_ref_dir"
fi
# Command line options
vlogan_opts="-full64"
vhdlan_opts="-full64"
vcs_elab_opts="-full64 -debug_pp -t ps -licqueue -l elaborate.log"
vcs_sim_opts="-ucli -licqueue -l simulate.log"
# Design libraries
design_libs=(xil_defaultlib xpm blk_mem_gen_v8_3_5)
# Simulation root library directory
sim_lib_dir="vcs"
# Script info
echo -e "blk_mem_gen_0.sh - Script generated by export_simulation (Vivado v2016.4 (64-bit)-id)\n"
# Main steps
run()
{
check_args $# $1
setup $1 $2
compile
elaborate
simulate
}
# RUN_STEP: <compile>
compile()
{
# Compile design files
vlogan -work xil_defaultlib $vlogan_opts -sverilog \
"D:/Xilinx/Vivado/2016.4/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" \
2>&1 | tee -a vlogan.log
vhdlan -work xpm $vhdlan_opts \
"D:/Xilinx/Vivado/2016.4/data/ip/xpm/xpm_VCOMP.vhd" \
2>&1 | tee -a vhdlan.log
vlogan -work blk_mem_gen_v8_3_5 $vlogan_opts +v2k \
"$ref_dir/../../../ipstatic/simulation/blk_mem_gen_v8_3.v" \
2>&1 | tee -a vlogan.log
vlogan -work xil_defaultlib $vlogan_opts +v2k \
"$ref_dir/../../../../VGA.srcs/sources_1/ip/blk_mem_gen_0_1/sim/blk_mem_gen_0.v" \
2>&1 | tee -a vlogan.log
vlogan -work xil_defaultlib $vlogan_opts +v2k \
glbl.v \
2>&1 | tee -a vlogan.log
}
# RUN_STEP: <elaborate>
elaborate()
{
vcs $vcs_elab_opts xil_defaultlib.blk_mem_gen_0 xil_defaultlib.glbl -o blk_mem_gen_0_simv
}
# RUN_STEP: <simulate>
simulate()
{
./blk_mem_gen_0_simv $vcs_sim_opts -do simulate.do
}
# STEP: setup
setup()
{
case $1 in
"-lib_map_path" )
if [[ ($2 == "") ]]; then
echo -e "ERROR: Simulation library directory path not specified (type \"./blk_mem_gen_0.sh -help\" for more information)\n"
exit 1
fi
create_lib_mappings $2
;;
"-reset_run" )
reset_run
echo -e "INFO: Simulation run files deleted.\n"
exit 0
;;
"-noclean_files" )
# do not remove previous data
;;
* )
create_lib_mappings $2
esac
create_lib_dir
# Add any setup/initialization commands here:-
# <user specific commands>
}
# Define design library mappings
create_lib_mappings()
{
file="synopsys_sim.setup"
if [[ -e $file ]]; then
if [[ ($1 == "") ]]; then
return
else
rm -rf $file
fi
fi
touch $file
lib_map_path=""
if [[ ($1 != "") ]]; then
lib_map_path="$1"
fi
for (( i=0; i<${#design_libs[*]}; i++ )); do
lib="${design_libs[i]}"
mapping="$lib:$sim_lib_dir/$lib"
echo $mapping >> $file
done
if [[ ($lib_map_path != "") ]]; then
incl_ref="OTHERS=$lib_map_path/synopsys_sim.setup"
echo $incl_ref >> $file
fi
}
# Create design library directory paths
create_lib_dir()
{
if [[ -e $sim_lib_dir ]]; then
rm -rf $sim_lib_dir
fi
for (( i=0; i<${#design_libs[*]}; i++ )); do
lib="${design_libs[i]}"
lib_dir="$sim_lib_dir/$lib"
if [[ ! -e $lib_dir ]]; then
mkdir -p $lib_dir
fi
done
}
# Delete generated data from the previous run
reset_run()
{
files_to_remove=(ucli.key blk_mem_gen_0_simv vlogan.log vhdlan.log compile.log elaborate.log simulate.log .vlogansetup.env .vlogansetup.args .vcs_lib_lock scirocco_command.log 64 AN.DB csrc blk_mem_gen_0_simv.daidir)
for (( i=0; i<${#files_to_remove[*]}; i++ )); do
file="${files_to_remove[i]}"
if [[ -e $file ]]; then
rm -rf $file
fi
done
create_lib_dir
}
# Check command line arguments
check_args()
{
if [[ ($1 == 1 ) && ($2 != "-lib_map_path" && $2 != "-noclean_files" && $2 != "-reset_run" && $2 != "-help" && $2 != "-h") ]]; then
echo -e "ERROR: Unknown option specified '$2' (type \"./blk_mem_gen_0.sh -help\" for more information)\n"
exit 1
fi
if [[ ($2 == "-help" || $2 == "-h") ]]; then
usage
fi
}
# Script usage
usage()
{
msg="Usage: blk_mem_gen_0.sh [-help]\n\
Usage: blk_mem_gen_0.sh [-lib_map_path]\n\
Usage: blk_mem_gen_0.sh [-reset_run]\n\
Usage: blk_mem_gen_0.sh [-noclean_files]\n\n\
[-help] -- Print help information for this script\n\n\
[-lib_map_path <path>] -- Compiled simulation library directory path. The simulation library is compiled\n\
using the compile_simlib tcl command. Please see 'compile_simlib -help' for more information.\n\n\
[-reset_run] -- Recreate simulator setup files and library mappings for a clean run. The generated files\n\
from the previous run will be removed. If you don't want to remove the simulator generated files, use the\n\
-noclean_files switch.\n\n\
[-noclean_files] -- Reset previous run, but do not remove simulator generated files from the previous run.\n\n"
echo -e $msg
exit 1
}
# Launch script
run $1 $2
|
hsnuonly/PikachuVolleyFPGA
|
VGA.ip_user_files/sim_scripts/blk_mem_gen_0_1/vcs/blk_mem_gen_0.sh
|
Shell
|
gpl-3.0
| 6,702 |
#!/bin/bash
# 1.1.1.2 Ensure mounting of freevxfs filesystems is disabled (Scored)
MODULE=freevxfs
RESULT=$(/sbin/lsmod | /bin/grep $MODULE)
if [[ -z $RESULT ]]
then
# module is not loaded in kernel, check if this was remediated
RESULT=$(/bin/grep "install[[:space:]+]$MODULE[[:space:]+]/bin/true" /etc/modprobe.d/*)
if [[ $RESULT ]]
then
echo 'cis_benchmark_1_1_1_2=passed'
else
echo 'cis_benchmark_1_1_1_2=failed'
fi
else
echo 'cis_benchmark_1_1_1_2=failed'
fi
|
proletaryo/puppet-ciscentos6
|
files/scripts/benchmark-1.1.1.2.sh
|
Shell
|
gpl-3.0
| 491 |
#!/usr/bin/env bash
SDIR="$HOME/.config/polybar/shades/scripts"
# Launch Rofi
MENU="$(rofi -no-config -no-lazy-grab -sep "|" -dmenu -i -p '' \
-theme $SDIR/rofi/styles.rasi \
<<< "♥ amber|♥ blue|♥ blue-gray|♥ brown|♥ cyan|♥ deep-orange|\
♥ deep-purple|♥ green|♥ gray|♥ indigo|♥ blue-light|♥ green-light|\
♥ lime|♥ orange|♥ pink|♥ purple|♥ red|♥ teal|♥ yellow|♥ amber-dark|\
♥ blue-dark|♥ blue-gray-dark|♥ brown-dark|♥ cyan-dark|♥ deep-orange-dark|\
♥ deep-purple-dark|♥ green-dark|♥ gray-dark|♥ indigo-dark|♥ blue-light-dark|\
♥ green-light-dark|♥ lime-dark|♥ orange-dark|♥ pink-dark|♥ purple-dark|♥ red-dark|♥ teal-dark|♥ yellow-dark|")"
case "$MENU" in
## Light Colors
*amber) "$SDIR"/colors-light.sh --amber ;;
*blue) "$SDIR"/colors-light.sh --blue ;;
*blue-gray) "$SDIR"/colors-light.sh --blue-gray ;;
*brown) "$SDIR"/colors-light.sh --brown ;;
*cyan) "$SDIR"/colors-light.sh --cyan ;;
*deep-orange) "$SDIR"/colors-light.sh --deep-orange ;;
*deep-purple) "$SDIR"/colors-light.sh --deep-purple ;;
*green) "$SDIR"/colors-light.sh --green ;;
*gray) "$SDIR"/colors-light.sh --gray ;;
*indigo) "$SDIR"/colors-light.sh --indigo ;;
*blue-light) "$SDIR"/colors-light.sh --light-blue ;;
*green-light) "$SDIR"/colors-light.sh --light-green ;;
*lime) "$SDIR"/colors-light.sh --lime ;;
*orange) "$SDIR"/colors-light.sh --orange ;;
*pink) "$SDIR"/colors-light.sh --pink ;;
*purple) "$SDIR"/colors-light.sh --purple ;;
*red) "$SDIR"/colors-light.sh --red ;;
*teal) "$SDIR"/colors-light.sh --teal ;;
*yellow) "$SDIR"/colors-light.sh --yellow ;;
## Dark Colors
*amber-dark) "$SDIR"/colors-dark.sh --amber ;;
*blue-dark) "$SDIR"/colors-dark.sh --blue ;;
*blue-gray-dark) "$SDIR"/colors-dark.sh --blue-gray ;;
*brown-dark) "$SDIR"/colors-dark.sh --brown ;;
*cyan-dark) "$SDIR"/colors-dark.sh --cyan ;;
*deep-orange-dark) "$SDIR"/colors-dark.sh --deep-orange ;;
*deep-purple-dark) "$SDIR"/colors-dark.sh --deep-purple ;;
*green-dark) "$SDIR"/colors-dark.sh --green ;;
*gray-dark) "$SDIR"/colors-dark.sh --gray ;;
*indigo-dark) "$SDIR"/colors-dark.sh --indigo ;;
*blue-light-dark) "$SDIR"/colors-dark.sh --light-blue ;;
*green-light-dark) "$SDIR"/colors-dark.sh --light-green ;;
*lime-dark) "$SDIR"/colors-dark.sh --lime ;;
*orange-dark) "$SDIR"/colors-dark.sh --orange ;;
*pink-dark) "$SDIR"/colors-dark.sh --pink ;;
*purple-dark) "$SDIR"/colors-dark.sh --purple ;;
*red-dark) "$SDIR"/colors-dark.sh --red ;;
*teal-dark) "$SDIR"/colors-dark.sh --teal ;;
*yellow-dark) "$SDIR"/colors-dark.sh --yellow
esac
|
SubhrajitPrusty/dotfiles
|
config/polybar/shades/scripts/color-switch.sh
|
Shell
|
gpl-3.0
| 2,759 |
#!/bin/sh
set -eu
curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -
sudo apt-get install -y nodejs node-gyp
sudo npm install -g tty.js pm2
pm2 start tty.js
|
jerrywardlow/devops-playground
|
ttyjs/ttyjs.sh
|
Shell
|
gpl-3.0
| 174 |
#!/usr/bin/env bash
#== Import script args ==
timezone=$(echo "$1")
#== Bash helpers ==
function info {
echo " "
echo "--> $1"
echo " "
}
#== Provision script ==
info "Provision-script user: `whoami`"
info "Allocate swap for MySQL 5.6"
fallocate -l 2048M /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap defaults 0 0' >> /etc/fstab
info "Configure locales"
update-locale LC_ALL="C"
dpkg-reconfigure locales
info "Configure timezone"
echo ${timezone} | tee /etc/timezone
dpkg-reconfigure --frontend noninteractive tzdata
info "Prepare root password for MySQL"
debconf-set-selections <<< "mysql-server-5.6 mysql-server/root_password password \"''\""
debconf-set-selections <<< "mysql-server-5.6 mysql-server/root_password_again password \"''\""
echo "Done!"
info "Update OS software"
apt-get update
apt-get upgrade -y
info "Install additional software"
apt-get install -y git php5-curl php5-cli php5-intl php5-mysqlnd php5-gd php5-fpm nginx mysql-server-5.6
info "Configure MySQL"
sed -i "s/.*bind-address.*/bind-address = 0.0.0.0/" /etc/mysql/my.cnf
echo "Done!"
info "Configure PHP-FPM"
sed -i 's/user = www-data/user = vagrant/g' /etc/php5/fpm/pool.d/www.conf
sed -i 's/group = www-data/group = vagrant/g' /etc/php5/fpm/pool.d/www.conf
sed -i 's/owner = www-data/owner = vagrant/g' /etc/php5/fpm/pool.d/www.conf
echo "Done!"
info "Configure NGINX"
sed -i 's/user www-data/user vagrant/g' /etc/nginx/nginx.conf
echo "Done!"
info "Enabling site configuration"
ln -s /app/vagrant/nginx/app.conf /etc/nginx/sites-enabled/app.conf
echo "Done!"
info "Initailize databases for MySQL"
mysql -uroot <<< "CREATE DATABASE yii2advanced"
mysql -uroot <<< "CREATE DATABASE yii2_advanced_tests"
echo "Done!"
info "Install composer"
curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
info "Prepare root password for phpmyadmin"
debconf-set-selections <<< "phpmyadmin phpmyadmin/dbconfig-install boolean true"
debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/admin-pass password \"''\""
debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/app-pass password \"''\""
echo "Done!"
info "install phpmyadmin"
apt-get install -q -y phpmyadmin
sed -i "s/^\s*\/\/\s*\(\$cfg\['Servers'\]\[\$i\]\['AllowNoPassword'\] = TRUE;\).*$/ \\1/" /etc/phpmyadmin/config.inc.php
|
DieTransparente/sk-vote-php
|
vagrant/provision/once-as-root.sh
|
Shell
|
gpl-3.0
| 2,372 |
#!/bin/bash
set -e
# Description: Multiple conditions in loop
# WHILE loop
W_IDX=1
W_BOOL="true"
while [[ "${W_IDX}" -lt 5 && "${W_BOOL}" = "true" ]]; do
echo "While loop: ${W_IDX}"
W_IDX=$[$W_IDX + 1]
# Get out before index is exhausted.
if [ "${W_IDX}" -gt 2 ]; then
W_BOOL="false"
fi
done
|
limelime/Scripts
|
bash/loop-multiple-conditions.sh
|
Shell
|
gpl-3.0
| 313 |
INTEL_OPENMP_LATEST_BUILD_LINK=https://www.openmprtl.org/sites/default/files/libomp_20131209_oss.tgz
CLANG_INCLUDE=~/code/llvm/include
CLANG_BIN=~/code/llvm/build/Debug+Asserts/bin
CLANG_LIB=~/code/llvm/build/Debug+Asserts/lib
OPENMP_INCLUDE=~/code/libomp_oss/exports/common/include
OPENMP_LIB=~/code/libomp_oss/exports/mac_32e/lib.thin
cd ~/
mkdir code
cd ~/code
git clone https://github.com/clang-omp/llvm
git clone https://github.com/clang-omp/compiler-rt llvm/projects/compiler-rt
git clone -b clang-omp https://github.com/clang-omp/clang llvm/tools/clang
cd llvm
mkdir build
cd build
../configure
make
cd Debug+Asserts/bin
mv clang clang2
rm -rf clang++
ln -s clang2 clang2++
echo "LLVM+Clang+OpenMP Include Path : " ${CLANG_INCLUDE}
echo "LLVM+Clang+OpenMP Bin Path : " ${CLANG_BIN}
echo "LLVM+Clang+OpenMP Lib Path : " ${CLANG_LIB}
cd ~/code
curl ${INTEL_OPENMP_LATEST_BUILD_LINK} -o libomp_oss_temp.tgz
gunzip -c libomp_oss_temp.tgz | tar xopf -
rm -rf libomp_oss_temp.tgz
cd libomp_oss
echo "If you do not have GCC installed (not normal on vanilla Mavericks), you must comment out lines 450-451 in libomp_oss/tools/check-tools.pl. Have you done this or want to compile anyway?"
select yn in "Yes" "No"; do
case $yn in
Yes ) make compiler=clang; break;;
No ) exit;;
esac
done
echo "OpenMP Runtime Include Path : " ${OPENMP_INCLUDE}
echo "OpenMP Runtime Lib Path : " ${OPENMP_LIB}
(echo 'export PATH='${CLANG_BIN}':$PATH';
echo 'export C_INCLUDE_PATH='${CLANG_INCLUDE}':'${OPENMP_INCLUDE}':$C_INCLUDE_PATH';
echo 'export CPLUS_INCLUDE_PATH='${CLANG_INCLUDE}':'${OPENMP_INCLUDE}':$CPLUS_INCLUDE_PATH';
echo 'export LIBRARY_PATH='${CLANG_LIB}':'${OPENMP_LIB}':$LIBRARY_PATH';
echo 'export DYLD_LIBRARY_PATH='${CLANG_LIB}':'${OPENMP_LIB}':$DYLD_LIBRARY_PATH}') >> ~/.profile
echo "LLVM+Clang+OpenMP is now accessible through [ clang2 ] via terminal and does not conflict with Apple's clang"
|
Erotemic/local
|
macosx/setup_clang2.sh
|
Shell
|
gpl-3.0
| 1,954 |
# Copyright 2015 Yvette Graham
#
# This file is part of Crowd-Alone.
#
# Crowd-Alone is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crowd-Alone is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Crowd-Alone. If not, see <http://www.gnu.org/licenses/>
rdir=batched-hits
wdir=analysis
mkdir $wdir
perl hits2r.pl ad $rdir $wdir > $wdir/ad-latest.csv
wc -l $wdir/ad-latest.csv
R --no-save --args $wdir ad < concurrent-hits.R
R --no-save --args $wdir ad < wrkr-times.R
perl filter-rejected.pl $wdir/ad-wrkr-stats.csv < $wdir/ad-latest.csv > $wdir/ad-approved.csv
perl repeats.pl < $wdir/ad-approved.csv > $wdir/ad-repeats.csv
R --no-save --args $wdir < quality-control.R
perl raw-bad-ref-pval-2-csv.pl < $wdir/ad-trk-stats.txt > $wdir/ad-trk-stats.csv
perl filter-pval-paired.pl < $wdir/ad-trk-stats.csv > $wdir/ad-trk-stats.class
perl filter-latest.pl ad $wdir/ad-trk-stats.class < $wdir/ad-approved.csv > $wdir/ad-good-raw.csv
perl repeats.pl < $wdir/ad-good-raw.csv > $wdir/ad-good-raw-repeats.csv
|
ygraham/crowd-alone
|
proc-hits/proc-hits-step1.sh
|
Shell
|
gpl-3.0
| 1,456 |
#!/bin/bash
ZK_DIR=`extract_name ${zookeeper}`
tar zxf ${zookeeper} -C ${INSTALL_HOME}
ZH_HOME=${INSTALL_HOME}/zookeeper
mv ${INSTALL_HOME}/${ZK_DIR} ${ZH_HOME}
echo "
# zookeeper settings
export ZK_HOME=${INSTALL_HOME}/${zk_dir_name}
export PATH=\$PATH:\$ZK_HOME/bin
# settings for spark on yarn
" >> ${CUSTOM_PROFILE}
# config zookeeper
ZK_CONF_DIR=${ZK_HOME}/conf
mv ${ZK_CONF_DIR}/zoo-simple.cfg ${ZK_CONF_DIR}/zoo.cfg
ZK_DATA_DIR=${DATA_HOME}/zookeeper/data
mkdir -p ${ZK_DATA_DIR}
echo "1" > ${ZK_DATA_DIR}/myid # TODO fix me, each slave has a myid file with different value
ZK_DATA_LOG_DIR=${LOG_HOME}/zookeeper/datalog
# 在conf 目录生成zookeeper-env.sh 内容是
# #!/bin/bash
# export ZOO_LOG_DIR=....
sed -i "s/^dataDir=*/dataDir=${ZK_DATA_DIR}" ${ZK_CONF_DIR}/zoo.cfg
sed -i "/^dataDir=/a\\dataLogDir=${ZK_DATA_LOG_DIR}" ${ZK_CONF_DIR}/zoo.cfg
host_id=1
for host in `echo ${zookeeper_hosts} | sed 's/,/ /g'`; do
echo "server.${host_id}=${host}:2888:38888" >> ${ZK_CONF_DIR}/zoo.cfg
((host_id++))
done
sed -i "s/^zookeeper.log.dir=*/zookeeper.log.dir=${LOG_HOME}/zookeeper" ${ZK_CONF_DIR}/log4j.properties
sed -i "s/^zookeeper.tracelog.dir=*/zookeeper.tracelog.dir=${LOG_HOME}/zookeeper/trace" ${ZK_CONF_DIR}/log4j.properties
|
taideli/study-parent
|
productization/install/zk/install-zk.sh
|
Shell
|
gpl-3.0
| 1,256 |
java -cp wpa2.jar WPA2
|
JarrettR/FPGA-Cryptoparty
|
FPGA/host/run.sh
|
Shell
|
gpl-3.0
| 23 |
#!/bin/bash
set -e
pushd ../java/argon2
ndk-build
popd
|
PhilippC/keepass2android
|
src/build-scripts/build-native.sh
|
Shell
|
gpl-3.0
| 56 |
#!/bin/bash
## test if guestfish command is present
which guestfish 2>&1 > /dev/null
if [ "$?" != 0 ]; then
echo "[!!!] Command 'guestfish' not found (Install it!). Making changes to VM FAILED."
exit 1
fi
## using direct backend to avoid selinux issues on fedora for now
export LIBGUESTFS_BACKEND=direct
## hostname
VM_HOSTNAME=$(echo $VM_NAME|sed -e 's/\./-/g; s/_/-/g')
if [ ! -f /var/lib/fast-vm/appliance/capability_xfs_el8 ] && [ ! -d /var/tmp/fedora29/appliance ]; then
echo "[!!!] XFS_EL8 capable appliance or Fedora 29+ appliance is required for properrun"
exit 1
fi
# compatibility: blindly try exporting path to fedora29 appliance
if [ -z "$LIBGUESTFS_PATH" ]; then export LIBGUESTFS_PATH=/var/tmp/fedora29/appliance; fi
# detect the slot where network card is as system uses hardware-based names for network interfaces
net_card_slot=$(virsh --connect qemu:///system dumpxml $VM_NAME|xmllint --xpath "//interface[.//source[@network='$LIBVIRT_NETWORK']]/address/@slot" - 2>/dev/null|cut -d\" -f 2|head -1|cut -dx -f 2)
net_card=$(($net_card_slot+0))
## timezone of hypervisor
timezone=$(readlink /etc/localtime | sed 's/^.*zoneinfo\/\(.*\)$/\1/')
guestfish -a "/dev/$THINPOOL_VG/$VM_NAME" -m /dev/f32/root_lv -m /dev/sda1:/boot --selinux <<EOF
sh 'sed -i "s/ONBOOT=no/ONBOOT=yes/" /etc/sysconfig/network-scripts/ifcfg-ens${net_card}'
sh 'sed -i "s/.*/$VM_HOSTNAME/" /etc/hostname'
# change timezone of machine to match hypervisor
sh 'rm -f /etc/localtime'
sh 'ln -s /usr/share/zoneinfo/$timezone /etc/localtime'
sh 'sed -i "s/#PermitRootLogin.*/PermitRootLogin yes/" /etc/ssh/sshd_config'
selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts /etc/ssh/sshd_config
selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts /etc/localtime
selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts /etc/sysconfig/network-scripts/ifcfg-ens${net_card}
selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts /etc/hostname
EOF
|
OndrejHome/fast-vm-public-images
|
fedora/hacks/6g_fedora-32-hacks.sh
|
Shell
|
gpl-3.0
| 1,993 |
#!/bin/bash
cd `dirname "${BASH_SOURCE[0]}"`
source ../utilities/findMatlab.sh
if [[ $matexe == *matlab ]]; then args=-nodesktop; fi
cat <<EOF | $matexe $args
run ../matlab/utilities/initPaths.m;
buffhost='localhost';buffport=1972;
eventViewer;
quit;
EOF
|
jadref/buffer_bci
|
dataAcq/startMatlabEventViewer.sh
|
Shell
|
gpl-3.0
| 256 |
########################################################
# Lauf.Delete #
# (c) 2010 joshua.redfield(AT)gmail.com #
########################################################
_rm () {
rm -f "$arg"
lauf_notify "Trashed:" "$arg"
}
########################################################
# Arguements for skipping GUI #
########################################################
if [ ! ${lauf_exec2:=unset} = "unset" ]; then
for arg in $lauf_exec
do
if [ ! $arg = $lauf_exec1 ]; then
_rm
fi
done
return
fi
########################################################
# GUI Code #
########################################################
cd $HOME
arg=$(zenity $lauf_app_options --file-selection --title=${lauf_app_name}" - $1")
case $? in
0)
_rm
return
;;
1)
lauf_cancel
esac
return
|
joshua-redfield/lauf
|
core/delete.sh
|
Shell
|
gpl-3.0
| 984 |
#!/usr/bin/env bash
set -e # Exit on errors
echo "-> Starting JIRA ..."
echo " - JIRA_VERSION: ${JIRA_VERSION}"
echo " - JIRA_HOME: ${JIRA_HOME}"
mkdir -p $JIRA_HOME
JIRA_DIR=/opt/atlassian-jira-software-${JIRA_VERSION}-standalone
if [ -d ${JIRA_DIR} ]; then
echo "-> JIRA ${JIRA_VERSION} already found at ${JIRA_DIR}. Skipping download."
else
JIRA_TARBALL_URL=https://downloads.atlassian.com/software/jira/downloads/atlassian-jira-software-${JIRA_VERSION}.tar.gz
echo "-> Downloading JIRA ${JIRA_VERSION} from ${JIRA_TARBALL_URL} ..."
wget --progress=dot:mega ${JIRA_TARBALL_URL} -O /tmp/atlassian-jira.tar.gz
echo "-> Extracting to ${JIRA_DIR} ..."
tar xzf /tmp/atlassian-jira.tar.gz -C /opt
rm -f /tmp/atlassian-jira.tar.gz
echo "-> Installation completed"
fi
# Setup postgress driver
rm -f ${JIRA_DIR}/lib/postgresql-9.1-903.jdbc4-atlassian-hosted.jar
wget --progress=dot:mega https://jdbc.postgresql.org/download/postgresql-9.4.1212.jar -O ${JIRA_DIR}/lib/postgresql-9.4.1212.jar
# Uncomment to increase Tomcat's maximum heap allocation
# export JAVA_OPTS=-Xmx512M $JAVA_OPTS
echo "-> Running JIRA server ..."
${JIRA_DIR}/bin/catalina.sh run &
# Kill JIRA process on signals from supervisor
trap 'kill $(jobs -p)' SIGINT SIGTERM EXIT
# Wait for JIRA process to terminate
wait $(jobs -p)
|
matisku/jira-docker
|
jira-server/jira-server.sh
|
Shell
|
gpl-3.0
| 1,325 |
#!/bin/bash
# use a frontend that expects no interactive input at all.
export DEBIAN_FRONTEND=noninteractive
# Stop the execution of a script if a command or pipeline has an error.
set -e
# Print all executed commands to the terminal
set -x
# Drupal server parameters.
CONTAINER_NAME=$1
MYSQL_CONTAINER=$2
# Check if mysql container is already installed.
if [ -z "$(docker ps -a | grep ${MYSQL_CONTAINER})" ]; then
echo "Drupal has a dependency of MySQL container."
exit 0
else
if [ -z "$(docker ps | grep ${MYSQL_CONTAINER})" ]; then
docker start ${MYSQL_CONTAINER}
fi
fi
# Check if drupal container is already installed.
if [ -z "$(docker ps -a | grep ${CONTAINER_NAME})" ]; then
# Download 'mysql' docker image.
docker pull drupal:latest
# Run the 'mysql' docker image.
docker run --name $CONTAINER_NAME \
--link $MYSQL_CONTAINER:mysql \
-p 80:80 \
-d drupal:latest
else
if [ -z "$(docker ps | grep ${CONTAINER_NAME})" ]; then
docker start ${CONTAINER_NAME}
fi
fi
echo "Drupal server ip address: $(docker inspect \
--format '{{.NetworkSettings.IPAddress}}' ${CONTAINER_NAME})"
|
ksaifullah/vagrant-docker
|
scripts/docker-drupal-default.sh
|
Shell
|
gpl-3.0
| 1,130 |
#!/bin/bash
##########################################################3
# This script will install Agent Smith on a gnu/linux system.
# If you're using BSD or Mac OS, I believe it should also work fine.
# If you are using Windows, move the file agentsmith.conf to
# C:/Users/$YOURNAME/.agentsmith.conf
# then just run the tcl script, agentsmith.tcl
# make sure, of course, that you have Tcl/Tk installed.
# see readme
##########################################################3
name=$(whoami)
if [ ! -d $HOME/bin/ ]; then
mkdir $HOME/bin/
$PATH=$PATH:/$HOME/bin/
export PATH
fi
echo "Wake up, $name ..."
echo "The Matrix has you!"
echo "Follow the White Rabbit ..."
echo "Installing Agent Smith ..."
echo "Creating config files ..."
cp agentsmith.conf $HOME/.agentsmith.conf
echo "Moving files"
cp agentsmith.tcl $HOME/bin/agentsmith
chmod +x $HOME/bin/agentsmith
echo "Installation complete!"
echo "Thank you, $name, for using Agent Smith"
echo "To run Agent Smith from terminal type agentsmith, or make an icon/menu item/short cut to /home/$name/bin/agentsmith"
exit
|
tonybaldwin/agentsmith
|
install.sh
|
Shell
|
gpl-3.0
| 1,086 |
# webReaction setup for bash
# The top directory is the top directory of eclipse workspace
# The rest of the environment variables should follow from this.
export REACTWORKSPACE=$1
# ================================================================
# Most likely you do not need to modify below this line.
# But if the location of the subsystems have been moved, then these can be modified
# ================================================================
# REACT subsystem
export CCROOT=$REACTWORKSPACE/REACT
export REACTROOT=$REACTWORKSPACE/REACT
# ================================================================
# These locations are inherent to the system and should never be touched.
# ================================================================
# --------------------------------------------------------------
# REACT subsystem
# --------------------------------------------------------------
REACTBIN=$REACTROOT/bin
REACTSCRIPTS=$REACTROOT/programs/scripts
export PATH=$PATH:$REACTBIN:$REACTSCRIPTS
# ================================================================
# Environment Variables for AnalysisDevelop
# ================================================================
export CodeBaseRoot=$REACTWORKSPACE/AnalysisStable
export ANALYSIS_BASE=$Code_Base_Root
export ANALYSIS_DEVBASE=$REACTWORKSPACE/AnalysisDevelop
export ANALYSIS_BINARY=$REACTWORKSPACE/AnalysisBinaries
export PATH=$PATH:$ANALYSIS_BINARY
|
blurock/REACT
|
bin/webReaction.bash
|
Shell
|
gpl-3.0
| 1,434 |
bh_bin2dec() {
(( $# < 1 )) && return 1
echo $((2#$1))
}
|
merces/bashacks
|
src/math/bh_bin2dec.sh
|
Shell
|
gpl-3.0
| 69 |
#!/bin/bash
############################################################################
### preconditions
############################################################################
# check parameter count
if [ "$2" == "" ]; then
echo "usage: $0 <tag> <message>"
exit 1
fi
# check correct working dir
#if [ ! -d .git -a ! -f setup.py ]; then
# echo "please run this script from project base directory"
# exit 2
#fi
# check if rpmbuild is installed
which rpmbuild >/dev/null 2>&1
if [ $? -ne 0 ]; then
yum -y install rpmdevtools rpmlint
fi
# check if build environment is created
if [ ! -d ~/rpmbuild ]; then
rpmdev-setuptree
fi
############################################################################
############################################################################
### prepare
############################################################################
# predefine variables
TAG=$1
MESSAGE="$2"
GITREPO=https://github.com/sshkm/django-sshkm.git
TEMPDIR=/tmp/sshkm-build
SPEC=rpmbuild/SPECS/sshkm.spec
# cleanup temp dir
rm -rf $TEMPDIR
mkdir -p $TEMPDIR
# clone git repo
cd $TEMPDIR
git clone $GITREPO
# get RPM release
#cd $TEMPDIR/django-sshkm
#RELEASE=$((($(grep "Release:" $SPEC | awk '{print $2}' | awk -F '%' '{print $1}')+1)))
RELEASE=1
############################################################################
############################################################################
### verify
############################################################################
# verify settings
echo "--------------------------------------------------------------"
echo "TAG: $TAG"
echo "MESSAGE: $MESSAGE"
echo "RPM RELEASE: $RELEASE"
echo ""
echo "---- please press enter to continue"
echo "--------------------------------------------------------------"
read
echo ""
############################################################################
############################################################################
### make changes for new version
############################################################################
# change to temporary directory
cd $TEMPDIR/django-sshkm
# set version in setup.py
sed -i "s/version = .*/version = '$TAG'/g" setup.py
# set version and releas in SPEC file
sed -i "s/Version:\t.*/Version:\t$TAG/g" $SPEC
sed -i "s/Release:\t.*/Release:\t$RELEASE%{?dist}/g" $SPEC
############################################################################
############################################################################
### commit changes and create tag
############################################################################
# change to temporary directory
cd $TEMPDIR/django-sshkm
# commit and push last modifications to git repo
git commit -a -m "$MESSAGE"
git push
# create tag and push it to git repo
git tag -a $TAG -m "$MESSAGE"
git push origin $TAG
############################################################################
############################################################################
### prepare files for pypi and upload them
############################################################################
# change to temporary directory
cd $TEMPDIR/django-sshkm
# prepare
python setup.py sdist
PYPIDIR=~/rpmbuild/pypi
rm -rf $PYPIDIR
mkdir -p $PYPIDIR
cp dist/* $PYPIDIR/
cp django_sshkm.egg-info/PKG-INFO $PYPIDIR/
############################################################################
############################################################################
### create SRPM
############################################################################
# change to temporary directory
cd $TEMPDIR/django-sshkm
# create tarball for rpmbuild
RPMSRC=$TEMPDIR/rpmbuild/SOURCES
mkdir -p $RPMSRC
cp -a rpmbuild/SOURCES/sshkm-master $RPMSRC/
mv $RPMSRC/sshkm-master $RPMSRC/sshkm-$TAG
cd $RPMSRC
tar czf ~/rpmbuild/SOURCES/sshkm-${TAG}.tar.gz sshkm-$TAG/
# build SRPM
cd $TEMPDIR/django-sshkm
rpmbuild -bs $TEMPDIR/django-sshkm/$SPEC
############################################################################
############################################################################
### cleanups
############################################################################
# cleanup temp dir
rm -rf $TEMPDIR
############################################################################
############################################################################
### final info
############################################################################
echo "--------------------------------------------------------------"
echo "manual steps:"
echo "- register pypi (files in $PYPIDIR)"
echo "- upload SRPM file to copr (file in ~/rpmbuild/SRPMS)"
echo "--------------------------------------------------------------"
############################################################################
|
sshkm/django-sshkm
|
release.sh
|
Shell
|
gpl-3.0
| 4,864 |
#!/bin/bash
#
#
# Installs a link to current directory.
# Thus, changes in any py files do not require
# reinstallation.
#
# However, the numjuggler script is generated
# automatically and contains current version numger.
# If this number is chaged in setup.py, the
# package should be reinstalled, in order
# to update the numjuggler script.
# Uninstall previous version
pip uninstall numjuggler;
# install the package anew (this generates new script)
pip install -v -e .
# prepare new distribution files. These are not needed
# for local installation, but might be useful for users
python setup.py clean
rm dist/*
python setup.py sdist
|
inr-kit/numjuggler
|
install.sh
|
Shell
|
gpl-3.0
| 648 |
/bin/sed '/^X400/ D' /var/www/tasks/CUSTOM-TRANS_temp4 > /var/www/tasks/CUSTOM-TRANS_temp5
/bin/sed '/^x400/ D' /var/www/tasks/CUSTOM-TRANS_temp5 > /var/www/tasks/CUSTOM-TRANS_temp6
|
deeztek/Hermes-Secure-Email-Gateway
|
dirstructure/opt/hermes/scripts/sed.sh
|
Shell
|
gpl-3.0
| 182 |
#!/bin/bash -e
. ../../blfs.comm
build_src() {
srcfil=phonon-backend-vlc-0.8.0.tar.xz
srcdir=phonon-backend-vlc-0.8.0
tar -xf $BLFSSRC/$PKGLETTER/$CURDIR/$srcfil
cd $srcdir
mkdir -pv build && cd build
cmake -DCMAKE_INSTALL_PREFIX=$KDE_PREFIX \
-DCMAKE_INSTALL_LIBDIR=lib \
-DCMAKE_BUILD_TYPE=Release \
-Wno-dev \
..
make
make DESTDIR=$BUILDDIR install
cleanup_src ../.. $srcdir
}
gen_control() {
cat > $DEBIANDIR/control << EOF
$PKGHDR
Depends: phonon (>= 4.8.0), VLC (>= 2.1.5)
Description: VLC backend for Phonon
EOF
}
build
|
fangxinmiao/projects
|
Architeture/OS/Linux/Distributions/LFS/build-scripts/blfs-7.6-systemv/p/Phonon-backend-vlc-0.8.0/build.sh
|
Shell
|
gpl-3.0
| 537 |
#!/bin/bash
trap ctrl_c INT
function ctrl_c() {
echo "Ha pulsado ctrl-c, saliendo..."
exit
}
###ATENCION####
###Para crear el paquete deb, necesitamos tener instalado fakeroot, python-stdeb, python-all, build-essential y python2.7
###############
echo ""
echo "Creando grx-asistencia"
echo "Comprobando dependencias:"
echo ""
for paquete in fakeroot python-stdeb python-all build-essential python2.7
do
echo "Comprobando si tenemos el paquete $paquete"
pkg=$(dpkg -s $paquete 2>&1|grep -m 1 "Version:"|awk "{print $2}")
if [ -z "$pkg" ]
then
echo -e "\e[31mFalta $paquete\e[0m"
echo "###ATENCION###"
echo "Alguno de los paquetes necesarios falta, ¿desea instalarlos?"
select sn in "Si" "No"; do
case $sn in
Si ) sudo apt-get install fakeroot python-stdeb python-all build-essential python2.7; break;;
No ) echo "Pruebe con:"
echo -e "\e[1msudo apt-get install fakeroot python-stdeb python-all build-essential python2.7\e[0m"
echo "##############"
echo "Saliendo....."
exit;;
esac
done
else
echo -e "\e[32m$paquete $pkg\e[0m"
fi
done
# MENU PRINCIPAL
OPTION=$(whiptail --title "GrX - Asistencia configurador/instalador" --menu "Selecciona una opcion" 14 78 3 \
"CREAR" "Crear paquete DEB." \
"INSTALAR" "Instalar Paquete." \
"EJECUTAR" "Ejecutar programar." 3>&1 1>&2 2>&3 )
case $OPTION in
CREAR)
dependencias="python-apt,zenmap,linphone,fabric,nmap,python-vte,gir1.2-webkit-3.0,python-webkit,python-nmap,python-stdeb,rdesktop,clamav,proxychains,ldap-utils,winbind,python-configobj,sshfs"
version=$(grep version setup.py |cut -d'=' -f2 |sed -e 's/"//g'|sed -e 's/,//g')
if (python setup.py --command-packages=stdeb.command sdist_dsc --depends $dependencias bdist_deb); then
echo "Compilado el paquete version $version"
echo -e "\e[1m¿Quieres instalar el programa?\e[0m"
select sn in "Si" "No"; do
case $sn in
Si ) if (sudo dpkg -i deb_dist/python-grx-asistencia_$version-1_all.deb); then
echo "Instalado el deb, cambiamos los permisos"
if (sudo chmod 755 /usr/share/grx/ldap/*)&&(sudo chmod 777 /usr/share/grx/auxiliar/Packages)&&(sudo chmod 777 /usr/share/grx/auxiliar/sedes.txt.csv)
then
if (sudo grep "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-monta.sh" /etc/sudoers)
then
echo "ya estaba"
else
echo "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-monta.sh" | sudo tee --append /etc/sudoers
fi
if (sudo grep "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-limpiar.sh" /etc/sudoers)
then
echo "ya estaba"
else
echo "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-limpiar.sh" | sudo tee --append /etc/sudoers
fi
if (sudo grep "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-desmonta.sh" /etc/sudoers)
then
echo "ya estaba"
else
echo "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-desmonta.sh" | sudo tee --append /etc/sudoers
fi
fi
else
echo "No se ha podido cambiar los permisos"
fi
break;;
No ) exit;;
esac
done
else
echo "No se ha podido instalar el deb"
exit
fi
;;
INSTALAR)
if (sudo dpkg -i deb_dist/python-grx-asistencia_$version-1_all.deb); then
echo "Instalado el deb, cambiamos los permisos"
if (sudo chmod 755 /usr/share/grx/ldap/*)&&(sudo chmod 777 /usr/share/grx/auxiliar/Packages)
then
if (sudo grep "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-monta.sh" /etc/sudoers)
then
echo "ya estaba"
else
echo "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-monta.sh" | sudo tee --append /etc/sudoers
fi
if (sudo grep "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-desmonta.sh" /etc/sudoers)
then
echo "ya estaba"
else
echo "ALL ALL= NOPASSWD: /usr/bin/sudo-asistencia-desmonta.sh" | sudo tee --append /etc/sudoers
fi
fi
else
echo "No se ha podido cambiar los permisos"
fi
;;
EJECUTAR)
grx-asistencia.sh
;;
esac
|
aavidad/grx-asistencia
|
config.sh
|
Shell
|
gpl-3.0
| 5,709 |
#!/bin/sh
pkgname=physlock
SKIP_ARCH_CHECK=1
pkgver=0.5
vcs=git
gittag=v${pkgver}
kiin_make() {
make PREFIX=/usr
}
kiin_install() {
make DESTDIR=${pkgdir} PREFIX=/usr install
}
|
alekseyrybalkin/kiin-repo
|
essentials/physlock/package.sh
|
Shell
|
gpl-3.0
| 188 |
#!/bin/bash
# keytool -genkey -keyalg RSA -alias root -keystore root.jks -storepass qx4zE7na -validity 360
storepass="qx4zE7na"
keypass=$storepass
echo
echo "Generating two clients and a server for a demo."
echo "-----------------------------------------------"
echo
rm alice* bob* server*
# -----------------------
# Generate client certificate
echo "-> Creating Alice's keystore..."
keytool -genkey -keyalg RSA -alias alice -validity 360 \
-keystore alice.jks -storepass $storepass \
-keypass $keypass \
-dname "CN=Alice Demo, O=SGR"
# Generate client certificate
echo "-> Creating Bobs's keystore..."
keytool -genkey -keyalg RSA -alias bob -validity 360 \
-keystore bob.jks -storepass $storepass \
-keypass $keypass \
-dname "CN=Bob Demo, O=SGR"
# Generate server certificate
echo "-> Creating the Server's keystore..."
keytool -genkey -keyalg RSA -alias server -validity 3600 \
-keystore server.jks -storepass $storepass \
-keypass $keypass \
-dname "CN=Server Demo, O=SGR"
# -------------------
echo "-> Importing alice into server..."
keytool -export -keystore alice.jks -alias alice \
-storepass $storepass -file alice.cert
keytool -import -keystore server.jks -alias alice \
-storepass $storepass -file alice.cert
# --------------------
echo "-> Importing bob into server..."
keytool -export -keystore bob.jks -alias bob \
-storepass $storepass -file bob.cert
keytool -import -keystore server.jks -alias bob \
-storepass $storepass -file bob.cert
# --------------------
echo "-> Importing server into clients..."
keytool -export -keystore server.jks -alias server \
-storepass $storepass -file server.cert
keytool -import -keystore alice.jks -alias server \
-storepass $storepass -file server.cert
keytool -import -keystore bob.jks -alias server \
-storepass $storepass -file server.cert
# --------------------
echo "-> Importing alice into bob...."
keytool -import -keystore bob.jks -alias alice \
-storepass $storepass -file alice.cert
echo "-> Importing bob into alice..."
keytool -import -keystore alice.jks -alias bob \
-storepass $storepass -file bob.cert
|
helderco/univ-secure-chat
|
demo.sh
|
Shell
|
gpl-3.0
| 2,276 |
#/bin/bash
gsl project.xml
chmod +x autogen.sh version.sh
|
zeromq/zlabs
|
generate.sh
|
Shell
|
mpl-2.0
| 60 |
#!/bin/bash
INTERFACE=tun-vpn-01
INTERFACE2=eth0
STOP_GW=0
shopt -s nullglob
ping -q -I $INTERFACE 8.8.8.8 -c 4 -i 1 -W 5 >/dev/null 2>&1
if test $? -eq 0; then
NEW_STATE=server
else
NEW_STATE=off
fi
if [ -f /tmp/stop_gateway ]; then
logger "Stop-Gateway-marker shutting down dhcpd and batman-server-mode..."
NEW_STATE=off
STOP_GW=1
mv /tmp/stop_gateway /tmp/gateway_stopped
else
if [ -f /tmp/gateway_stopped ]; then
if [ "$NEW_STATE" == "off" ]; then
logger "Gateway is stopped, uplink is dead, remove /tmp/gateway_stopped to reactivate automatic..."
else
logger "Gateway is stopped, uplink is working, remove /tmp/gateway_stopped to reactivate automatic..."
fi
NEW_STATE=off
STOP_GW=2
fi
fi
#try to restart tun-01 automatically
if [ "$NEW_STATE" == "off" -a "$STOP_GW" -eq 0 ]; then
logger "try a restart of openvpn via systemctl"
systemctl restart openvpn@tun-01
echo "1" >> /tmp/tun-vpn-01_check.restart
chmod 777 /tmp/tun-vpn-01_check.restart
chown munin:users /tmp/tun-vpn-01_check.restart
fi
#get current traffic on interfaces
rxold=`cat /tmp/tun-vpn-01_check.rx_bytes`
rxold=${rxold:-0}
txold=`cat /tmp/tun-vpn-01_check.tx_bytes`
txold=${txold:-0}
rxnew=`cat /sys/class/net/$INTERFACE2/statistics/rx_bytes`
txnew=`cat /sys/class/net/$INTERFACE2/statistics/tx_bytes`
rx=$(expr $rxnew - $rxold)
tx=$(expr $txnew - $txold)
#fix wrong values after reboot
if [ $rx -lt 0 ]; then
$rx=0
fi
if [ $tx -lt 0 ]; then
$tx=0
fi
rx=$(expr $rx \* 8) #byte to bit
rx=$(expr $rx / 1000) #k
rx=$(expr $rx / 1000) #m
rx=$(expr $rx / 60) #sec
tx=$(expr $tx \* 8) #byte to bit
tx=$(expr $tx / 1000) #k
tx=$(expr $tx / 1000) #m
tx=$(expr $tx / 60) #sec
logger "Detected network load tx: $tx Mbit/s rx: $rx Mbit/s"
#if there are to high values detected
if [ $rx -gt 99 ]; then
rx=99
fi
if [ $tx -gt 99 ]; then
tx=99
fi
#get remainig bandwith
rx=$(expr 85 - $rx)
tx=$(expr 85 - $tx)
#if lower than 1 Mbit set to 1 Mbit
# else we would set batman to default values
if [ $rx -lt 1 ]; then
rx=1
fi
if [ $tx -lt 1 ]; then
tx=1
fi
#use highest value
if [ $rx -gt $tx ]; then
bw=$rx
else
bw=$tx
fi
#save new values
echo "$rxnew" > /tmp/tun-vpn-01_check.rx_bytes
echo "$txnew" > /tmp/tun-vpn-01_check.tx_bytes
MESH=mesh-wk
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
if [ "$NEW_STATE" == "off" ]; then
logger "shutting down dhcpd..."
systemctl stop dhcpd4
elif [ "$NEW_STATE" == "server" ]; then
logger "restarting dhcpd..."
systemctl start dhcpd4
fi
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
### save values for graph ###
if [ "$NEW_STATE" == "server" ]; then
cat /sys/class/net/$MESH/mesh/gw_bandwidth >> /tmp/tun-vpn-01_check.gw_bandwidth
else
echo "0MBit/0MBit" >> /tmp/tun-vpn-01_check.gw_bandwidth
fi
chmod 777 /tmp/tun-vpn-01_check.gw_bandwidth
chown munin:users /tmp/tun-vpn-01_check.gw_bandwidth
### ###
fi
MESH=mesh-lev
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-ob
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-rs
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-ge
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-gro
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-lf
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-kle
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-froe
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-fb
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-saer
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-re
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-stra
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-ems
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-hw
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-bot
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-dus
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-hstl
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-bo
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-len
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-ha
if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
logger "$MESH: batman gateway mode changed to $NEW_STATE"
fi
echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
#MESH=mesh-bur
#
#if [ -f /sys/class/net/$MESH/mesh/gw_mode ]; then
#
# OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
# if [ ! "$OLD_STATE" == "$NEW_STATE" ]; then
# echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
# logger "$MESH: batman gateway mode changed to $NEW_STATE"
# fi
# echo ${bw}MBit/${bw}MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
#
#fi
MESH=mesh-st
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ "$OLD_STATE" != "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
echo 96MBit/96MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
MESH=mesh-wipp
OLD_STATE="$(cat /sys/class/net/$MESH/mesh/gw_mode)"
if [ "$OLD_STATE" != "$NEW_STATE" ]; then
echo $NEW_STATE > /sys/class/net/$MESH/mesh/gw_mode
echo 96MBit/96MBit > /sys/class/net/$MESH/mesh/gw_bandwidth
fi
|
VfN-NRW/ServerScripts
|
tun-vpn-01_check.sh
|
Shell
|
agpl-3.0
| 11,394 |
#!/usr/bin/env bash
# Copyright 2017 Marko Dimjašević
#
# This file is part of jdoop-wrapper.
#
# jdoop-wrapper is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# jdoop-wrapper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with jdoop-wrapper. If not, see <http://www.gnu.org/licenses/>.
# Starts an Apache Spark master and all worker/slave nodes specified
# in conf/slaves. Make sure to prepare each worker node by running the
# prepare-worker-node.sh script on it first.
WRAPPER_HOME="$(cd "`dirname "$0"`"/..; pwd)"
. ${WRAPPER_HOME}/spark/my-env.sh
cp ${WRAPPER_HOME}/spark/conf/slaves ${SPARK_HOME}/conf/
. ${SPARK_HOME}/sbin/start-all.sh
|
soarlab/jdoop-wrapper
|
spark/start-spark-all.sh
|
Shell
|
agpl-3.0
| 1,117 |
<% if (config.nvidia rescue false) -%>
sed -i "s/GRUB_CMDLINE_LINUX=\"\(.*\)\"/GRUB_CMDLINE_LINUX=\"\1 rdblacklist=nouveau blacklist=nouveau\"/" /etc/default/grub
grub2-mkconfig > /etc/grub2.cfg
mkinitrd --force /boot/initramfs-`uname -r`.img `uname -r`
echo "blacklist nouveau" >> /etc/modprobe.d/blacklist.conf
rmmod -v nouveau
yum -y groupinstall "Development Tools"
mkdir -p /var/lib/firstrun/scripts/
cat << EOF > /var/lib/firstrun/scripts/nvidia.bash
URL=http://<%= domain.hostip %>/installers/
curl \$URL/nvidia.run > /tmp/nvidia.run
sh /tmp/nvidia.run -a -q -s --kernel-source-path /usr/src/kernels/*
EOF
<% end -%>
|
alces-software/knowledgebase
|
epel/7/nvidia/nvidia.sh
|
Shell
|
agpl-3.0
| 627 |
#! /bin/bash
#
# clone the simpletest from SVN/SF -- I've been happy enough with git to never want to go back to SVN/CVS/RCS. Besides, github graphs are a boon by themselves. Enough reason for me to spend this extra effort!
#
git svn clone https://simpletest.svn.sourceforge.net/svnroot/simpletest/simpletest -T trunk -b branches -t tags
|
GerHobbelt/simpletest
|
utils/git-svn-clone-simpletest.sh
|
Shell
|
lgpl-2.1
| 342 |
#!/bin/sh
# changedi by Oliver Cordes: 2016-10-24
INFILE=$HOME/git/arctic/demo/06u15042j_drkA.fits
OUTFILE=$HOME/git/arctic/demo/testA.fits
export PATH=$HOME/git/arctic/build:$HOME/git/arctic:$PATH
args=""
while [ -n "$1" ]; do
echo $1
case $1 in
-i) shift; INFILE=$1;shift;;
-o) shift; OUTFILE=$1;shift;;
*) args="$args $1";shift;;
esac
done
arctic -d 10 --out_float -c default_express5_niter6.cte ${INFILE} ${OUTFILE} -m ACS $args
|
ocordes/arctic
|
aifa/arctic_test_express.sh
|
Shell
|
lgpl-3.0
| 455 |
rm -rf build
rm -rf ndsi.egg-info
rm -rf ndsi/ndsi.egg-info
rm ndsi/*.cpp
rm ndsi/*.so
pip3 install -e . --user --force
|
pupil-labs/pyndsi
|
clean-install.sh
|
Shell
|
lgpl-3.0
| 121 |
#!/bin/sh
# base16-shell (https://github.com/chriskempson/base16-shell)
# Base16 Shell template by Chris Kempson (http://chriskempson.com)
# Equilibrium Light scheme by Carlo Abelli
color00="f5/f0/e7" # Base 00 - Black
color01="d0/20/23" # Base 08 - Red
color02="63/72/00" # Base 0B - Green
color03="9d/6f/00" # Base 0A - Yellow
color04="00/73/b5" # Base 0D - Blue
color05="4e/66/b6" # Base 0E - Magenta
color06="00/7a/72" # Base 0C - Cyan
color07="43/47/4e" # Base 05 - White
color08="73/77/7f" # Base 03 - Bright Black
color09=$color01 # Base 08 - Bright Red
color10=$color02 # Base 0B - Bright Green
color11=$color03 # Base 0A - Bright Yellow
color12=$color04 # Base 0D - Bright Blue
color13=$color05 # Base 0E - Bright Magenta
color14=$color06 # Base 0C - Bright Cyan
color15="18/1c/22" # Base 07 - Bright White
color16="bf/3e/05" # Base 09
color17="c4/27/75" # Base 0F
color18="e7/e2/d9" # Base 01
color19="d8/d4/cb" # Base 02
color20="5a/5f/66" # Base 04
color21="2c/31/38" # Base 06
color_foreground="43/47/4e" # Base 05
color_background="f5/f0/e7" # Base 00
if [ -n "$TMUX" ]; then
# Tell tmux to pass the escape sequences through
# (Source: http://permalink.gmane.org/gmane.comp.terminal-emulators.tmux.user/1324)
put_template() { printf '\033Ptmux;\033\033]4;%d;rgb:%s\033\033\\\033\\' $@; }
put_template_var() { printf '\033Ptmux;\033\033]%d;rgb:%s\033\033\\\033\\' $@; }
put_template_custom() { printf '\033Ptmux;\033\033]%s%s\033\033\\\033\\' $@; }
elif [ "${TERM%%[-.]*}" = "screen" ]; then
# GNU screen (screen, screen-256color, screen-256color-bce)
put_template() { printf '\033P\033]4;%d;rgb:%s\007\033\\' $@; }
put_template_var() { printf '\033P\033]%d;rgb:%s\007\033\\' $@; }
put_template_custom() { printf '\033P\033]%s%s\007\033\\' $@; }
elif [ "${TERM%%-*}" = "linux" ]; then
put_template() { [ $1 -lt 16 ] && printf "\e]P%x%s" $1 $(echo $2 | sed 's/\///g'); }
put_template_var() { true; }
put_template_custom() { true; }
else
put_template() { printf '\033]4;%d;rgb:%s\033\\' $@; }
put_template_var() { printf '\033]%d;rgb:%s\033\\' $@; }
put_template_custom() { printf '\033]%s%s\033\\' $@; }
fi
# 16 color space
put_template 0 $color00
put_template 1 $color01
put_template 2 $color02
put_template 3 $color03
put_template 4 $color04
put_template 5 $color05
put_template 6 $color06
put_template 7 $color07
put_template 8 $color08
put_template 9 $color09
put_template 10 $color10
put_template 11 $color11
put_template 12 $color12
put_template 13 $color13
put_template 14 $color14
put_template 15 $color15
# 256 color space
put_template 16 $color16
put_template 17 $color17
put_template 18 $color18
put_template 19 $color19
put_template 20 $color20
put_template 21 $color21
# foreground / background / cursor color
if [ -n "$ITERM_SESSION_ID" ]; then
# iTerm2 proprietary escape codes
put_template_custom Pg 43474e # foreground
put_template_custom Ph f5f0e7 # background
put_template_custom Pi 43474e # bold color
put_template_custom Pj d8d4cb # selection color
put_template_custom Pk 43474e # selected text color
put_template_custom Pl 43474e # cursor
put_template_custom Pm f5f0e7 # cursor text
else
put_template_var 10 $color_foreground
if [ "$BASE16_SHELL_SET_BACKGROUND" != false ]; then
put_template_var 11 $color_background
if [ "${TERM%%-*}" = "rxvt" ]; then
put_template_var 708 $color_background # internal border (rxvt)
fi
fi
put_template_custom 12 ";7" # cursor (reverse video)
fi
# clean up
unset -f put_template
unset -f put_template_var
unset -f put_template_custom
unset color00
unset color01
unset color02
unset color03
unset color04
unset color05
unset color06
unset color07
unset color08
unset color09
unset color10
unset color11
unset color12
unset color13
unset color14
unset color15
unset color16
unset color17
unset color18
unset color19
unset color20
unset color21
unset color_foreground
unset color_background
|
wincent/wincent
|
aspects/dotfiles/files/.zsh/colors/base16-equilibrium-light.sh
|
Shell
|
unlicense
| 3,943 |
./img2py -i icons/logo.jpg iwLogo.py
|
YannChemin/wxGIPE
|
makeLogo.sh
|
Shell
|
unlicense
| 37 |
#!/bin/sh
cd ..
jar -cvfm MinecraftMOTD.jar manifest.txt org/minecraft/server/motd/*.class
|
nwolfe/minecraft-motd
|
bin/build-jar.sh
|
Shell
|
unlicense
| 91 |
#!/bin/bash
# As of Dec 2014, when i2s LIMB software is set to deliver grayscale output,
# it creates TIF files that are 8 bits per sample and 2 samples per pixel.
# For submission to HathiTrust, we need to convert the native LIMB output to
# a grayscale TIF that is 8 bits per sample and 1 sample per pixel.
basedir="/mnt/lsdi2/ftp"
volume=$1
bps=`exiftool ${basedir}/${volume}/TIFF/00000001.tif | grep "Bits Per Sample" | awk -F': ' '{ print $2 }'`
if [ "$bps" == "8 8" ]; then
echo "${basedir}/${volume}"
echo $bps
fi
|
jkylefenton/digitization_scripts
|
book_digitization/tiff_2chan2grayscale.sh
|
Shell
|
unlicense
| 527 |
#!/bin/bash
./configure --prefix=$PREFIX \
--disable-debug \
--disable-dependency-tracking \
--with-jasper=$PREFIX \
--with-grib_api=$PREFIX \
--with-hdf5=$PREFIX \
--with-netcdf=$PREFIX \
--with-proj=$PREFIX
make
make install
|
bird-house/conda-recipes
|
_unused/cdo/build.sh
|
Shell
|
apache-2.0
| 257 |
#!/bin/bash
# This script assumes a clean install of Ubuntu 14.04,
# it installs all stuff necessary to develop and deploy
# a Juju charm containing the 7 Days to Die server.
# This script assumes root.
set -u
echo "Install updates, Git and Juju..."
apt-get update > /dev/null
apt-get upgrade > /dev/null
add-apt-repository ppa:juju/stable
apt-get install -y git juju-core juju-local > /dev/null
echo "Download and install Go."
wget -q https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.5.3.linux-amd64.tar.gz
export PATH=$PATH:/usr/local/go/bin
echo -e '\n\nexport PATH=$PATH:/usr/local/go/bin' >> ~/.bashrc
# Setup workspace in home directory, choose the vagrant user
# as this is the default user Vagrant uses to login with.
echo "Setup workspace..."
homeDir=/home/vagrant
mkdir $homeDir/go $homeDir/charms
chown vagrant:vagrant $homeDir/go $homeDir/charms
cat >> $homeDir/.bashrc <<'EOF'
export GOPATH=$HOME/go
export GOROOT=/usr/local/go
export JUJU_REPOSITORY=$HOME/charms
export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin
EOF
# Juju refuses to run as root (which we are now). Therefore
# I continue as the vagrant user. It also allows us to go on
# directly after logging in with 'vagrant ssh'
sudo -u vagrant -i <<'JUJU_SH'
# configure Juju
juju generate-config
juju switch local
juju bootstrap
echo -n "Download and compile charm..."
# setup environment
export GOPATH=$HOME/go
export GOROOT=/usr/local/go
export JUJU_REPOSITORY=$HOME/charms
export PATH=$PATH:$GOROOT/bin:$GOPATH/bin
# build and deploy Juju Charm
go get github.com/juju/gocharm/cmd/gocharm
go get github.com/mever/sevendays/charms/sevendays
gocharm github.com/mever/sevendays/charms/sevendays
juju deploy local:trusty/sevendays
# Wait until we get the ip from the sevendays unit.
echo -n "Deploying the sevendays service... (this takes a while)"
while [ -z "$(juju status sevendays/0 | grep public-address | grep -Po '[0-9]{1,3}')" ]; do sleep 10; done
JUJU_SH
# Vagrant uses eth0 as it's primary interface and
# eth1 as bridged interface. This may be changed
# if that assumption is false.
BRIDGE_IF=eth1
# Let's configure the Linux firewall to allow all
# traffic on the bridge to flow right into our Juju unit
echo -e "#!/bin/bash\n\nBRIDGE_IF=${BRIDGE_IF}\n\n" > /etc/network/if-up.d/iptables
cat >> /etc/network/if-up.d/iptables <<'IPCONF'
PATH=/sbin:/bin:/usr/sbin:/usr/bin
if [ "$IFACE" == "$BRIDGE_IF" ]; then
export HOME=/home/vagrant
IP=$(ifconfig "$IFACE" | /usr/bin/awk '/inet addr/{print substr($2,6)}')
UNIT_IP=$(juju status sevendays/0 | grep public-address | grep -Po '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
if [ ! -z "$UNIT_IP" ]; then
iptables -t nat -C PREROUTING -d "$IP"/32 -j DNAT --to-destination "$UNIT_IP"
if [ "$?" == 1 ]; then
iptables -t nat -A PREROUTING -d "$IP"/32 -j DNAT --to-destination "$UNIT_IP"
fi
fi
fi
IPCONF
chmod +x /etc/network/if-up.d/iptables
echo -n "Restarting the network with new firewall configuration..."
ifdown -a 2> /dev/null; ifup -a 2> /dev/null
BRIDGE_IP=$(ifconfig "$BRIDGE_IF" | /usr/bin/awk '/inet addr/{print substr($2,6)}')
echo "|"
echo "| All done. Enter '${BRIDGE_IP}' in your web browser!"
echo "|"
|
mever/sevendays
|
vagrant-inst.sh
|
Shell
|
apache-2.0
| 3,264 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Charts/Charts.framework"
install_framework "$BUILT_PRODUCTS_DIR/KeychainSwift/KeychainSwift.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Charts/Charts.framework"
install_framework "$BUILT_PRODUCTS_DIR/KeychainSwift/KeychainSwift.framework"
fi
|
jfosterdavis/FlashcardHero
|
Pods/Target Support Files/Pods-FlashcardHero/Pods-FlashcardHero-frameworks.sh
|
Shell
|
apache-2.0
| 3,761 |
#!/bin/bash
set -o errexit -o nounset
TOP_DIR=$(cd "$(dirname "$0")/.." && pwd)
source "$TOP_DIR/config/localrc"
source "$TOP_DIR/config/paths"
source "$CONFIG_DIR/deploy.osbash"
source "$OSBASH_LIB_DIR/functions-host.sh"
source "$OSBASH_LIB_DIR/$PROVIDER-functions.sh"
OSBASH=exec_cmd
function usage {
# Setting to empty string selects latest (current snapshot)
echo "Usage: $0 {-l|-c|-t <SNAP>} [-s]"
echo ""
echo "-h Help"
echo "-l List snapshots for node VMs"
echo "-c Restore cluster node VMs to current snapshot"
echo "-t SNAP Restore cluster to target snapshot"
echo "-s Start each node VMs after restoring it"
exit
}
function list_snapshots {
for vm_name in $(script_cfg_get_nodenames); do
if ! vm_exists "$vm_name"; then
echo "VM $vm_name does not exist. Skipping..."
continue
fi
echo -e "Snapshot list for $vm_name node:"
vm_snapshot_list_tree "$vm_name"
echo
done
exit 0
}
while getopts :chlst: opt; do
case $opt in
c)
CURRENT=yes
;;
h)
usage
;;
l)
list_snapshots
;;
s)
START=yes
;;
t)
TARGET_SNAPSHOT=$OPTARG
;;
:)
echo "Error: -$OPTARG needs argument"
;;
?)
echo "Error: invalid option -$OPTARG"
echo
usage
;;
esac
done
# Remove processed options from arguments
shift $(( OPTIND - 1 ));
if [ $# -ne 0 ]; then
usage
elif [ -z "${TARGET_SNAPSHOT:-}" -a -z "${CURRENT:-""}" ]; then
echo
echo "Error: no target snapshot given."
echo
usage
elif [ -n "${TARGET_SNAPSHOT:-}" -a -n "${CURRENT:-""}" ]; then
echo
echo "Error: conflicting options: target snapshot name and -c."
echo
usage
fi
# Find target_snapshot in scripts_cfg and set global *_SNAPSHOT variables
# to the correct snapshot name for each node (to allow building from there)
function set_snapshot_vars {
local target_snapshot=$1
local found=0
local config_name=$(get_distro_name "$DISTRO")_cluster
local scripts_cfg="$CONFIG_DIR/scripts.$config_name"
while read -r line; do
if [[ $line =~ ^cmd\ snapshot.*-n\ ([^ ]*)\ (.*) ]]; then
# Node name (e.g. controller)
node=${BASH_REMATCH[1]}
# Snapshot name (e.g. keystone_installed)
snapshot=${BASH_REMATCH[2]}
# Global variable name (e.g. CONTROLLER_SNAPSHOT)
# Can't use ${node,,} (OS X bash version is only 3.2)
var_name=$(echo "$node"|tr "a-z" "A-Z")_SNAPSHOT
if [ "$snapshot" = "$target_snapshot" ]; then
# Can't use associative arrays (OS X bash version is only 3.2)
eval "${var_name}=$snapshot"
found=1
elif [ $found -eq 0 ]; then
eval "${var_name}=$snapshot"
fi
fi
done < "$scripts_cfg"
if [ $found -eq 0 ]; then
echo "ERROR: snapshot '$target_snapshot' not found"
exit 1
fi
}
if [ -n "${TARGET_SNAPSHOT:-}" ]; then
set_snapshot_vars "$TARGET_SNAPSHOT"
fi
for vm_name in $(script_cfg_get_nodenames); do
if ! vm_exists "$vm_name"; then
echo "VM $vm_name does not exist. Skipping..."
continue
fi
vm_power_off "$vm_name"
vm_wait_for_shutdown "$vm_name"
if [ "${CURRENT:-""}" = "yes" ]; then
vm_snapshot_restore_current "$vm_name"
if [ "${START:-""}" = "yes" ]; then
vm_boot "$vm_name"
fi
else
# Global variable name (e.g. CONTROLLER_SNAPSHOT)
# (use tr due to OS X bash limitation)
var_name=$(echo "$vm_name"|tr "a-z" "A-Z")_SNAPSHOT
if [ -z "${!var_name:=""}" ]; then
vm_delete "$vm_name"
else
vm_snapshot_restore "$vm_name" "${!var_name}"
if [ "${START:-""}" = "yes" ]; then
vm_boot "$vm_name"
fi
fi
fi
done
|
openstack/training-labs
|
labs/osbash/tools/restore-cluster.sh
|
Shell
|
apache-2.0
| 4,129 |
#!/bin/bash
set -x
set -e
GO_PATH=$(go env GOPATH)
GO_BIN=$GO_PATH/bin
GO=$GO_PATH/bin/go
PATH=$PATH:$GO_BIN:$(npm bin):/usr/local/bin/:$HOME/.cargo/bin
### UBUNTU BIONIC ###
echo "deb https://packages.le-vert.net/tensorflow/ubuntu bionic main" | sudo tee -a /etc/apt/sources.list
wget -O - https://packages.le-vert.net/packages.le-vert.net.gpg.key | sudo apt-key add -
# install all the deps
sudo apt update
sudo apt install -y protobuf-compiler
sudo apt install -y --no-install-recommends python3 python3-pip python3-setuptools python3-dev python3-grpcio python3-protobuf
sudo apt install -y --no-install-recommends nodejs npm
sudo apt install -y --no-install-recommends ruby ruby-dev
sudo apt install -y --no-install-recommends git-all
sudo gem update --system
sudo gem install grpc grpc-tools
pip3 install --no-cache-dir grpcio-tools
npm i grpc-tools
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
cargo install protobuf-codegen grpc-compiler
# build proto related code
pushd cmd/protoc-gen-client && go get ./... && popd
pushd cmd/protoc-gen-micro && go get ./... && popd
go get github.com/golang/protobuf/[email protected]
# delete the existing sdk directory
rm -rf client/sdk
# generate the clients
PATH=$PATH:$GO_BIN:$(npm bin):/usr/local/bin/:$HOME/.cargo/bin protoc-gen-client -srcdir proto/ -dstdir client/sdk/ -langs go,python,java,ruby,node,rust
# remove node garbage
rm -rf node_modules/ package-lock.json
|
micro/micro
|
scripts/generate-clients.sh
|
Shell
|
apache-2.0
| 1,453 |
#!/bin/bash
# script to test modules
NGINX_HOME=/home/carbyn/test/nginx
NGINX_SRC=/home/carbyn/test/nginx-source/nginx-1.9.14
MODULES_SRC=/home/carbyn/dev/nginx/modules
# the module to be tested
# module=ngx_http_shithole_module
# module=ngx_http_fuckyou_module
module=ngx_http_yak_module
if [ -f $NGINX_HOME/logs/nginx.pid ]; then
echo "gonna to stop nginx"
$NGINX_HOME/sbin/nginx -s quit
fi
cd $NGINX_SRC \
&& ./configure --prefix=$NGINX_HOME --add-module=$MODULES_SRC/$module \
&& make && make install \
&& cp $MODULES_SRC/${module}_nginx.conf $NGINX_HOME/conf/nginx.conf \
&& $NGINX_HOME/sbin/nginx
|
Carbyn/nginx
|
build.sh
|
Shell
|
apache-2.0
| 631 |
# create tex
pandoc --template technical_specification_working_draft.latex -H header.tex --number-sections --table-of-contents parallelism.*.md -o parallelism.tex
# annotate headings with labels
perl -i -p00e 's/\\section{(.*)}\\label{(.*)}/\\Sec0[\2]{\1}/s' parallelism.tex
perl -i -p00e 's/\\subsection{(.*)}\\label{(.*)}/\\Sec1[\2]{\1}/s' parallelism.tex
perl -i -p00e 's/\\subsubsection{(.*)}\\label{(.*)}/\\Sec2[\2]{\1}/s' parallelism.tex
perl -i -p00e 's/\\paragraph{(.*)}\\label{(.*)}/\\Sec3[\2]{\1}/s' parallelism.tex
perl -i -p00e 's/\\subparagraph{(.*)}\\label{(.*)}/\\Sec4[\2]{\1}/s' parallelism.tex
# create pdf
pdflatex parallelism.tex
|
n3554/n3554
|
technical_specification/build.sh
|
Shell
|
apache-2.0
| 652 |
#!/bin/bash
# This script checks out a PR branch and then starts a management server with that code.
# Next, you can run Marvin to setup whatever you need to verify the PR.
function usage {
printf "Usage: %s: -m marvinCfg -p <pr id> [ -b <branch: default to master> -s <skip compile> -t <run tests> -f <test file to run> -T <mvn -T flag> ]\n" $(basename $0) >&2
}
# Options
skip=
run_tests=
test_file=
compile_threads=
while getopts 'm:p:T:b:f:st' OPTION
do
case $OPTION in
m) marvinCfg="$OPTARG"
;;
p) prId="$OPTARG"
;;
s) skip="-s"
;;
t) run_tests="-t"
;;
f) test_file="-f $OPTARG"
;;
T) compile_threads="-T $OPTARG"
;;
b) branch_name="$OPTARG"
;;
esac
done
echo "Received arguments:"
echo "skip = ${skip}"
echo "run_tests = ${run_tests}"
echo "test_file = ${test_file}"
echo "marvinCfg = ${marvinCfg}"
echo "prId = ${prId}"
echo "compile_threads = ${compile_threads}"
echo "branch_name = ${branch_name}"
# Check if a marvin dc file was specified
if [ -z ${marvinCfg} ]; then
echo "No Marvin config specified. Quiting."
usage
exit 1
else
echo "Using Marvin config '${marvinCfg}'."
fi
if [ ! -f "${marvinCfg}" ]; then
echo "Supplied Marvin config not found!"
exit 1
fi
# Default to master branch
if [ -z "${branch_name}" ]; then
branch_name="master"
echo "branch_name = ${branch_name}"
fi
echo "Started!"
date
# Check if a pull request id was specified
if [ -z ${prId} ]; then
echo "No PR number specified. Quiting."
usage
exit 1
fi
# Check if a marvin dc file was specified
if [ -z ${marvinCfg} ]; then
echo "No Marvin config specified. Quiting."
usage
exit 1
fi
# Perpare, checkout and stuff
/data/shared/helper_scripts/cloudstack/prepare_cloudstack_compile.sh
# Go the the source
cd /data/git/${HOSTNAME}/cloudstack
git pull
git reset --hard
git checkout ${branch_name}
git branch --set-upstream-to=origin/${branch_name} ${branch_name}
git pull
git branch -D try/${prId}
git branch try/${prId}
git checkout try/${prId}
# Get the PR
tools/git/git-pr ${prId} --force
if [ $? -gt 0 ]; then
echo "ERROR: Merge failed!"
exit 1
fi
# Build, run and test it
/data/shared/helper_scripts/cloudstack/build_run_deploy_test.sh -m ${marvinCfg} ${run_tests} ${test_file} ${skip} ${compile_threads}
|
DaanHoogland/MCT-shared
|
helper_scripts/cloudstack/check-pr.sh
|
Shell
|
apache-2.0
| 2,340 |
#!/bin/bash
hostname=`hostname -f`
cur_time=`date`
echo "Hostname $hostname"
echo "Time: $cur_time"
echo "OSG Site: $OSG_SITE_NAME"
echo "GWMS Entry Name: $GLIDEIN_Entry_Name"
echo "GWMS Resource Name: $GLIDEIN_ResourceName"
source /cvmfs/oasis.opensciencegrid.org/osg/modules/lmod/5.6.2/init/bash
module load curl/7.37.1
curl -O http://stash.osgconnect.net/keys/cern.ch.pub -L --retry 5
output=`sha1sum cern.ch.pub | cut -f 1 -d' '`
if [ "$output" != "5b83bedef4c7ba38520d7e1b764f0cbc28527fb9" ];
then
echo "Error! Difference in outputs:"
echo $output
exit 1
fi
|
OSGConnect/modulefiles
|
tests/curl/check_curl.sh
|
Shell
|
apache-2.0
| 574 |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://linuxeye.com
#
# Notes: OneinStack for CentOS/RedHat 7+ Debian 8+ and Ubuntu 16+
#
# Project home page:
# https://oneinstack.com
# https://github.com/oneinstack/oneinstack
# Check if user is root
[ $(id -u) != "0" ] && { echo "${CFAILURE}Error: You must be root to run this script${CEND}"; exit 1; }
oneinstack_dir=$(dirname "`readlink -f $0`")
pushd ${oneinstack_dir}/tools > /dev/null
. ../options.conf
[ ! -e "${backup_dir}" ] && mkdir -p ${backup_dir}
DB_Local_BK() {
for D in `echo ${db_name} | tr ',' ' '`
do
./db_bk.sh ${D}
done
}
DB_Remote_BK() {
for D in `echo ${db_name} | tr ',' ' '`
do
./db_bk.sh ${D}
DB_GREP="DB_${D}_`date +%Y%m%d`"
DB_FILE=`ls -lrt ${backup_dir} | grep ${DB_GREP} | tail -1 | awk '{print $NF}'`
echo "file:::${backup_dir}/${DB_FILE} ${backup_dir} push" >> config_backup.txt
echo "com:::[ -e "${backup_dir}/${DB_FILE}" ] && rm -rf ${backup_dir}/DB_${D}_$(date +%Y%m%d --date="${expired_days} days ago")_*.tgz" >> config_backup.txt
done
}
DB_OSS_BK() {
for D in `echo ${db_name} | tr ',' ' '`
do
./db_bk.sh ${D}
DB_GREP="DB_${D}_`date +%Y%m%d`"
DB_FILE=`ls -lrt ${backup_dir} | grep ${DB_GREP} | tail -1 | awk '{print $NF}'`
/usr/local/bin/ossutil cp -f ${backup_dir}/${DB_FILE} oss://${oss_bucket}/`date +%F`/${DB_FILE}
if [ $? -eq 0 ]; then
/usr/local/bin/ossutil rm -rf oss://${oss_bucket}/`date +%F --date="${expired_days} days ago"`/
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${backup_dir}/${DB_FILE}
fi
done
}
DB_COS_BK() {
for D in `echo ${db_name} | tr ',' ' '`
do
./db_bk.sh ${D}
DB_GREP="DB_${D}_`date +%Y%m%d`"
DB_FILE=`ls -lrt ${backup_dir} | grep ${DB_GREP} | tail -1 | awk '{print $NF}'`
${python_install_dir}/bin/coscmd upload ${backup_dir}/${DB_FILE} /`date +%F`/${DB_FILE}
if [ $? -eq 0 ]; then
${python_install_dir}/bin/coscmd delete -r -f `date +%F --date="${expired_days} days ago"` > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${backup_dir}/${DB_FILE}
fi
done
}
DB_UPYUN_BK() {
for D in `echo ${db_name} | tr ',' ' '`
do
./db_bk.sh ${D}
DB_GREP="DB_${D}_`date +%Y%m%d`"
DB_FILE=`ls -lrt ${backup_dir} | grep ${DB_GREP} | tail -1 | awk '{print $NF}'`
/usr/local/bin/upx put ${backup_dir}/${DB_FILE} /`date +%F`/${DB_FILE}
if [ $? -eq 0 ]; then
/usr/local/bin/upx rm -a `date +%F --date="${expired_days} days ago"` > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${backup_dir}/${DB_FILE}
fi
done
}
DB_QINIU_BK() {
for D in `echo ${db_name} | tr ',' ' '`
do
./db_bk.sh ${D}
DB_GREP="DB_${D}_`date +%Y%m%d`"
DB_FILE=`ls -lrt ${backup_dir} | grep ${DB_GREP} | tail -1 | awk '{print $NF}'`
/usr/local/bin/qshell rput ${qiniu_bucket} /`date +%F`/${DB_FILE} ${backup_dir}/${DB_FILE}
if [ $? -eq 0 ]; then
/usr/local/bin/qshell listbucket ${qiniu_bucket} /`date +%F --date="${expired_days} days ago"` /tmp/qiniu.txt > /dev/null 2>&1
/usr/local/bin/qshell batchdelete -force ${qiniu_bucket} /tmp/qiniu.txt > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${backup_dir}/${DB_FILE}
rm -f /tmp/qiniu.txt
fi
done
}
DB_S3_BK() {
for D in `echo ${db_name} | tr ',' ' '`
do
./db_bk.sh ${D}
DB_GREP="DB_${D}_`date +%Y%m%d`"
DB_FILE=`ls -lrt ${backup_dir} | grep ${DB_GREP} | tail -1 | awk '{print $NF}'`
${python_install_dir}/bin/s3cmd put ${backup_dir}/${DB_FILE} s3://${s3_bucket}/`date +%F`/${DB_FILE}
if [ $? -eq 0 ]; then
${python_install_dir}/bin/s3cmd rm -r s3://${s3_bucket}/`date +%F --date="${expired_days} days ago"` > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${backup_dir}/${DB_FILE}
fi
done
}
DB_DROPBOX_BK() {
for D in `echo ${db_name} | tr ',' ' '`
do
./db_bk.sh ${D}
DB_GREP="DB_${D}_`date +%Y%m%d`"
DB_FILE=`ls -lrt ${backup_dir} | grep ${DB_GREP} | tail -1 | awk '{print $NF}'`
/usr/local/bin/dbxcli put ${backup_dir}/${DB_FILE} `date +%F`/${DB_FILE}
if [ $? -eq 0 ]; then
/usr/local/bin/dbxcli rm -f `date +%F --date="${expired_days} days ago"` > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${backup_dir}/${DB_FILE}
fi
done
}
WEB_LOCAL_BK() {
for W in `echo ${website_name} | tr ',' ' '`
do
./website_bk.sh $W
done
}
WEB_Remote_BK() {
for W in `echo ${website_name} | tr ',' ' '`
do
if [ `du -sm "${wwwroot_dir}/${WebSite}" | awk '{print $1}'` -lt 2048 ]; then
./website_bk.sh $W
Web_GREP="Web_${W}_`date +%Y%m%d`"
Web_FILE=`ls -lrt ${backup_dir} | grep ${Web_GREP} | tail -1 | awk '{print $NF}'`
echo "file:::${backup_dir}/${Web_FILE} ${backup_dir} push" >> config_backup.txt
echo "com:::[ -e "${backup_dir}/${Web_FILE}" ] && rm -rf ${backup_dir}/Web_${W}_$(date +%Y%m%d --date="${expired_days} days ago")_*.tgz" >> config_backup.txt
else
echo "file:::${wwwroot_dir}/$W ${backup_dir} push" >> config_backup.txt
fi
done
}
WEB_OSS_BK() {
for W in `echo $website_name | tr ',' ' '`
do
[ ! -e "${wwwroot_dir}/${WebSite}" ] && { echo "[${wwwroot_dir}/${WebSite}] not exist"; break; }
PUSH_FILE="${backup_dir}/Web_${W}_$(date +%Y%m%d_%H).tgz"
if [ ! -e "${PUSH_FILE}" ]; then
pushd ${wwwroot_dir} > /dev/null
tar czf ${PUSH_FILE} ./$W
popd > /dev/null
fi
/usr/local/bin/ossutil cp -f ${PUSH_FILE} oss://${oss_bucket}/`date +%F`/${PUSH_FILE##*/}
if [ $? -eq 0 ]; then
/usr/local/bin/ossutil rm -rf oss://${oss_bucket}/`date +%F --date="${expired_days} days ago"`/
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${PUSH_FILE}
fi
done
}
WEB_COS_BK() {
for W in `echo ${website_name} | tr ',' ' '`
do
[ ! -e "${wwwroot_dir}/${WebSite}" ] && { echo "[${wwwroot_dir}/${WebSite}] not exist"; break; }
PUSH_FILE="${backup_dir}/Web_${W}_$(date +%Y%m%d_%H).tgz"
if [ ! -e "${PUSH_FILE}" ]; then
pushd ${wwwroot_dir} > /dev/null
tar czf ${PUSH_FILE} ./$W
popd > /dev/null
fi
${python_install_dir}/bin/coscmd upload ${PUSH_FILE} /`date +%F`/${PUSH_FILE##*/}
if [ $? -eq 0 ]; then
${python_install_dir}/bin/coscmd delete -r -f `date +%F --date="${expired_days} days ago"` > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${PUSH_FILE}
fi
done
}
WEB_UPYUN_BK() {
for W in `echo ${website_name} | tr ',' ' '`
do
[ ! -e "${wwwroot_dir}/${WebSite}" ] && { echo "[${wwwroot_dir}/${WebSite}] not exist"; break; }
[ ! -e "${backup_dir}" ] && mkdir -p ${backup_dir}
PUSH_FILE="${backup_dir}/Web_${W}_$(date +%Y%m%d_%H).tgz"
if [ ! -e "${PUSH_FILE}" ]; then
pushd ${wwwroot_dir} > /dev/null
tar czf ${PUSH_FILE} ./$W
popd > /dev/null
fi
/usr/local/bin/upx put ${PUSH_FILE} /`date +%F`/${PUSH_FILE##*/}
if [ $? -eq 0 ]; then
/usr/local/bin/upx rm -a `date +%F --date="${expired_days} days ago"` > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${PUSH_FILE}
fi
done
}
WEB_QINIU_BK() {
for W in `echo ${website_name} | tr ',' ' '`
do
[ ! -e "${wwwroot_dir}/${WebSite}" ] && { echo "[${wwwroot_dir}/${WebSite}] not exist"; break; }
[ ! -e "${backup_dir}" ] && mkdir -p ${backup_dir}
PUSH_FILE="${backup_dir}/Web_${W}_$(date +%Y%m%d_%H).tgz"
if [ ! -e "${PUSH_FILE}" ]; then
pushd ${wwwroot_dir} > /dev/null
tar czf ${PUSH_FILE} ./$W
popd > /dev/null
fi
/usr/local/bin/qshell rput ${qiniu_bucket} /`date +%F`/${PUSH_FILE##*/} ${PUSH_FILE}
if [ $? -eq 0 ]; then
/usr/local/bin/qshell listbucket ${qiniu_bucket} /`date +%F --date="${expired_days} days ago"` /tmp/qiniu.txt > /dev/null 2>&1
/usr/local/bin/qshell batchdelete -force ${qiniu_bucket} /tmp/qiniu.txt > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${PUSH_FILE}
rm -f /tmp/qiniu.txt
fi
done
}
WEB_S3_BK() {
for W in `echo ${website_name} | tr ',' ' '`
do
[ ! -e "${wwwroot_dir}/${WebSite}" ] && { echo "[${wwwroot_dir}/${WebSite}] not exist"; break; }
[ ! -e "${backup_dir}" ] && mkdir -p ${backup_dir}
PUSH_FILE="${backup_dir}/Web_${W}_$(date +%Y%m%d_%H).tgz"
if [ ! -e "${PUSH_FILE}" ]; then
pushd ${wwwroot_dir} > /dev/null
tar czf ${PUSH_FILE} ./$W
popd > /dev/null
fi
${python_install_dir}/bin/s3cmd put ${PUSH_FILE} s3://${s3_bucket}/`date +%F`/${PUSH_FILE##*/}
if [ $? -eq 0 ]; then
${python_install_dir}/bin/s3cmd rm -r s3://${s3_bucket}/`date +%F --date="${expired_days} days ago"` > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${PUSH_FILE}
fi
done
}
WEB_DROPBOX_BK() {
for W in `echo ${website_name} | tr ',' ' '`
do
[ ! -e "${wwwroot_dir}/${WebSite}" ] && { echo "[${wwwroot_dir}/${WebSite}] not exist"; break; }
[ ! -e "${backup_dir}" ] && mkdir -p ${backup_dir}
PUSH_FILE="${backup_dir}/Web_${W}_$(date +%Y%m%d_%H).tgz"
if [ ! -e "${PUSH_FILE}" ]; then
pushd ${wwwroot_dir} > /dev/null
tar czf ${PUSH_FILE} ./$W
popd > /dev/null
fi
/usr/local/bin/dbxcli put ${PUSH_FILE} `date +%F`/${PUSH_FILE##*/}
if [ $? -eq 0 ]; then
/usr/local/bin/dbxcli rm -f `date +%F --date="${expired_days} days ago"` > /dev/null 2>&1
[ -z "`echo ${backup_destination} | grep -ow 'local'`" ] && rm -f ${PUSH_FILE}
fi
done
}
for DEST in `echo ${backup_destination} | tr ',' ' '`
do
if [ "${DEST}" == 'local' ]; then
[ -n "`echo ${backup_content} | grep -ow db`" ] && DB_Local_BK
[ -n "`echo ${backup_content} | grep -ow web`" ] && WEB_LOCAL_BK
fi
if [ "${DEST}" == 'remote' ]; then
echo "com:::[ ! -e "${backup_dir}" ] && mkdir -p ${backup_dir}" > config_backup.txt
[ -n "`echo ${backup_content} | grep -ow db`" ] && DB_Remote_BK
[ -n "`echo ${backup_content} | grep -ow web`" ] && WEB_Remote_BK
./mabs.sh -c config_backup.txt -T -1 | tee -a mabs.log
fi
if [ "${DEST}" == 'oss' ]; then
[ -n "`echo ${backup_content} | grep -ow db`" ] && DB_OSS_BK
[ -n "`echo ${backup_content} | grep -ow web`" ] && WEB_OSS_BK
fi
if [ "${DEST}" == 'cos' ]; then
[ -n "`echo ${backup_content} | grep -ow db`" ] && DB_COS_BK
[ -n "`echo ${backup_content} | grep -ow web`" ] && WEB_COS_BK
fi
if [ "${DEST}" == 'upyun' ]; then
[ -n "`echo ${backup_content} | grep -ow db`" ] && DB_UPYUN_BK
[ -n "`echo ${backup_content} | grep -ow web`" ] && WEB_UPYUN_BK
fi
if [ "${DEST}" == 'qiniu' ]; then
[ -n "`echo ${backup_content} | grep -ow db`" ] && DB_QINIU_BK
[ -n "`echo ${backup_content} | grep -ow web`" ] && WEB_QINIU_BK
fi
if [ "${DEST}" == 's3' ]; then
[ -n "`echo ${backup_content} | grep -ow db`" ] && DB_S3_BK
[ -n "`echo ${backup_content} | grep -ow web`" ] && WEB_S3_BK
fi
if [ "${DEST}" == 'dropbox' ]; then
[ -n "`echo ${backup_content} | grep -ow db`" ] && DB_DROPBOX_BK
[ -n "`echo ${backup_content} | grep -ow web`" ] && WEB_DROPBOX_BK
fi
done
|
kaneawk/oneinstack
|
backup.sh
|
Shell
|
apache-2.0
| 11,347 |
#!/bin/bash
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# (c) barthel <[email protected]> https://github.com/barthel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Replaces the version in Eclipse RCP feature.xml file
# where the entries following the pattern:
# <plugin
# id="my.artifactId"
# download-size="0"
# install-size="0"
# version="1.0.0-SNAPSHOT"
# unpack="false"/>
#
# Locates transitively all feature.xml files in the current directory.
# In each found feature.xml file will the entry, matches the pattern:
# <plugin
# id="[artifactId]"
# download-size="0"
# install-size="0"
# version="[old_version]"
# unpack="false"/>
# , modify to replace the [old_version] with the given new version argument.
#
# feature.xml file example:
# -----------------
# 1. feature.xml file before modification:
# [...]
# <plugin
# id="my.artifactId"
# download-size="0"
# install-size="0"
# version="1.0.0-SNAPSHOT"
# unpack="false"/>
# [...]
#
# 2a. feature.xml file after executing this script with parameter "my.artifactId" "47.11.0-SNAPSHOT"
# [...]
# <plugin
# id="my.artifactId"
# download-size="0"
# install-size="0"
# version="47.11.0.qualifier"
# unpack="false"/>
# [...]
#
# 2b. feature.xml file after executing this script with parameter "my.artifactId" "47.11.0"
# [...]
# <plugin
# id="my.artifactId"
# download-size="0"
# install-size="0"
# version="47.11.0"
# unpack="false"/>
# [...]
#
# Usage:
# ------
# > set-dependency-version-in-all-feature.sh "my.artifactId" "47.11.0"
# > set-dependency-version-in-all-feature.sh "my.artifactId" "0.8.15-SNAPSHOT"
#
# set -x
# Include global functions
# @see: http://wiki.bash-hackers.org/syntax/shellvars
[ -z "${SCRIPT_DIRECTORY}" ] && SCRIPT_DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" && export SCRIPT_DIRECTORY
# @see: https://github.com/koalaman/shellcheck/wiki/SC1090
# shellcheck source=./lib/_set_dependencies_functions.sh
. "${SCRIPT_DIRECTORY}/lib/_set_dependencies_functions.sh"
# check the presens of required tools/commands/executables
_check_required_helper 'grep' 'xargs' 'sed' 'awk'
# is SNAPSHOT - use stripped version
_version="${VERSION}"
# is NOT SNAPSHOT - use version range "[stripped_version,stripped_version]"
if ${IS_SNAPSHOT_VERSION}
then
_version="${STRIPPED_VERSION}\.qualifier"
fi
_find_filter="feature.xml"
# id="${ORIGINAL_ARTIFACT_ID}"
_grep_filter="id=\\\"${ORIGINAL_ARTIFACT_ID}\\\""
# id="${ORIGINAL_ARTIFACT_ID}"
_awk_filter="${_grep_filter}"
# version="${VERSION}"
# @see: https://stackoverflow.com/questions/2854655/command-to-escape-a-string-in-bash
_local_quoted_version="$( printf "%q" "${_version}")"
_sed_filter="s|\\\\\\(version=\\\\\\\"\\\\\\).*\\\\\\(\\\\\\\".*\\\\\\)|\\\\1${_local_quoted_version}\\\\2|g"
_local_quoted_sed_cmd="sed -e\\\"%d,%d ${_sed_filter}\\\" -i.sed-backup %s"
# 1. '_build_find_cmd ...' - build the find command for relative path only of files
# with name pattern
_cmd="( $(_build_find_cmd "${_find_filter}") "
_cmd+=" | $(_build_xargs_cmd) "
# 2. '_build_grep_cmd ...' - select file names containing the bundle version string
_cmd+="$(_build_grep_cmd "${_grep_filter}") || exit 0 ) "
# 3. 'awk ...' - identify file name and range; returns replacement sed script 'sed ... -e"[START],[END] ... [FILENAME]'
# awk '/\<plugin/{s=x; start=NR}{s=s$0"\n"}/id=\"taco.contentstore.encrypted\"/{p=1}/\/>/ && p{printf "sed -e\"%d,%d s|\\\(version=\\\"\\\).*\\\(\\\".*\\\)|\\10\\\.8\\\.15\\\.qualifier\\2|g\" -i.sed-backup %s\n",start,NR,FILENAME; p=0}'
_cmd+=" | $(_build_xargs_cmd -I '{}') "
_cmd+=" awk '/\\<plugin/{s=x; start=NR}{s=s\$0\"\\n\"}/${_awk_filter}/{p=1}/\\/>/ && p{printf \"${_local_quoted_sed_cmd}\\n\",start,NR,FILENAME; p=0}' {}"
# 4. - exec command in bash
# bash -c '{}'
# @see: https://www.cloudsavvyit.com/7984/using-xargs-in-combination-with-bash-c-to-create-complex-commands/
_cmd+=" | $(_build_xargs_cmd -0 ) bash -c "
[ 0 -lt "${VERBOSE}" ] && echo "Execute: ${_cmd}"
# 4. '_exec_cmd ...' - execute the assembled command line
_exec_cmd "${_cmd}"
# clean up temp. work file if verbose level is lower than '2'
# @see: http://www.linuxjournal.com/content/use-bash-trap-statement-cleanup-temporary-files
[[ ${VERBOSE} -lt 2 ]] && trap "$(_exec_cmd _build_delete_sed_backup_files)" EXIT
|
barthel/maven-utils
|
set-dependency-version-in-all-feature.sh
|
Shell
|
apache-2.0
| 4,867 |
echo "crouton aliases installed, use crouton-help for more info."
alias startchroot='sudo enter-chroot -n precise'
alias startxfce4='sudo enter-chroot -n xfce startxfce4'
#alias startunity='sudo initctl stop powerd;sudo enter-chroot -n unity startunity'
# alias startunity='sudo enter-chroot -n unity startunity'
alias startunity='sudo enter-chroot -n trusty exec env XMETHOD=xorg startunity'
alias startunityw='sudo enter-chroot -n trusty exec env XMETHOD=xiwi startunity'
alias startunitycmd='sudo enter-chroot -n trusty'
function crouton-help {
cat <<HELP_TXT
Summary:
Helper for interacting with crouton chroot setup on ChromeOS. Setup a
config in .bashrc for chroot name, otherwise we default to unity as
the name of the chroot. XMETHOD targets supported atm are xiwi and xorg.
export CHROOT_NAME=trusty
Available Commands:
crouton-help : get the help text
crouton-start [METHOD] : start the last XMETHOD we used, if method is set
then start one of these types of methods:
cmd - command prompt only
xorg - full video , switch with ctl-alt-shift <arrows>
xiwi - use the chromeapp to connect and switch
crouton-run [command] : run a xiwi window command. example: crouton-run terminator
crouton-update : update the crouton image
crouton-clean : clean up the crouton scripts and alias
HELP_TXT
}
function crouton-start {
_METHOD=$1
# fix for tun0 error
sudo stop shill
sudo start shill BLACKLISTED_DEVICES=tun0
[[ -z "$CHROOT_NAME" ]] && CHROOT_NAME=trusty
if [ -z "$_METHOD" ] ; then
if [ -f ~/.config/crouton_method.config ]; then
_METHOD=$(cat ~/.config/crouton_method.config)
else
_METHOD=cmd
fi
fi
case "$_METHOD" in
cmd)
echo "start using cmd"
echo $_METHOD> ~/.config/crouton_method.config
sudo enter-chroot -n $CHROOT_NAME
;;
xorg)
echo "start using xorg"
echo $_METHOD> ~/.config/crouton_method.config
sudo enter-chroot -n $CHROOT_NAME exec env XMETHOD=xorg startunity
;;
xiwi)
echo "start using xiwi"
echo $_METHOD> ~/.config/crouton_method.config
sudo enter-chroot -n $CHROOT_NAME exec env XMETHOD=xiwi startunity
;;
*)
echo "ERROR start method not supported"
crouton-help
;;
esac
}
function crouton-run {
COMMAND=$1
if [ -z "$COMMAND" ]; then
echo "crouton-run <command> : run a xiwi command in chrome window"
else
sudo enter-chroot -b xiwi $COMMAND -n $CHROOT_NAME
fi
}
function crouton-update {
[[ -z "$CHROOT_NAME" ]] && CHROOT_NAME=trusty
cd ~/Downloads
[ ! -f ./crouton ] && curl -L https://goo.gl/fd3zc > ./crouton
[ ! -f ./crouton-alias.sh ] && curl -L https://raw.githubusercontent.com/wenlock/myhome/master/opt/bin/crouton-alias.sh > ./crouton-alias.sh
sudo sh ./crouton -u -n $CHROOT_NAME
}
function crouton-clean {
cd ~/Downloads
[ -f ./crouton ] && rm -f ./crouton
[ -f ./crouton-alias.sh ] && rm -f ./crouton-alias.sh
echo "clean done"
}
|
wenlock/myhome
|
opt/bin/crouton-alias.sh
|
Shell
|
apache-2.0
| 3,273 |
#!/bin/sh
#
# setup project setup
# limingwei
# 2015-04-09 16:32:27
#
SVN_URL="svn://121.41.62.44/***-v5-source-code-root/"
# SVN_URL="http://192.168.138.202:8082/svn/code/helpdesk/"
# paths
BIN_PATH=/home/***/bin
APP_PATH=/home/***/app
DOWN_PATH=/home/***/down
CONF_PATH=/home/***/conf
SRC_PATH=/home/***/source-code
#
# 入口函数
#
function _main_()
{
if [ $# -eq 0 ]
then
_***_project_setup_code_
_***_project_setup_server_
_***_project_setup_maven_
elif [ $1 = "code" ]
then
_***_project_setup_code_
elif [ $1 = "server" ]
then
_***_project_setup_server_
elif [ $1 = "maven" ]
then
_***_project_setup_maven_
else
echo "code|server|maven"
fi
}
function _***_project_setup_code_()
{
rm -rf $SRC_PATH
mkdir -p $SRC_PATH
cd $SRC_PATH
svn checkout ${SVN_URL}***-support
svn checkout ${SVN_URL}***-api
svn checkout ${SVN_URL}***-admin-api
svn checkout ${SVN_URL}***-admin-sdk
svn checkout ${SVN_URL}***-admin-service
svn checkout ${SVN_URL}***-notify
svn checkout ${SVN_URL}***-account-service
svn checkout ${SVN_URL}***-ticket-service
svn checkout ${SVN_URL}***-helpcenter-service
svn checkout ${SVN_URL}***-account-logic
svn checkout ${SVN_URL}***-ticket-logic
svn checkout ${SVN_URL}***-helpcenter-logic
svn checkout ${SVN_URL}***-mail
svn checkout ${SVN_URL}***-push
svn checkout ${SVN_URL}***-admin
svn checkout ${SVN_URL}***-open
svn checkout ${SVN_URL}***-web
mkdir -p $SRC_PATH/***-support/bin/shell-online/project-status/
echo 'no value' >> $SRC_PATH/***-support/bin/shell-online/project-status/nginx_route_status
# init ***-shell
mkdir -p $BIN_PATH
rm -rf $BIN_PATH/***_project
echo '#!/bin/sh
sh /home/***/source-code/***-support/bin/shell-online/***.sh $@' >> $BIN_PATH/***_project && chmod 777 $BIN_PATH/***_project
}
function _***_project_setup_server_()
{
cd $DOWN_PATH
if [ ! -e "$DOWN_PATH/tomcat-7.0.59.tar.gz" ];then
wget http://7u2toi.com2.z0.glb.qiniucdn.com/tomcat-7.0.59.tar.gz
fi
if [ ! -e "$DOWN_PATH/jetty-9.2.10.tar.gz" ];then
wget http://7u2toi.com2.z0.glb.qiniucdn.com/jetty-9.2.10.tar.gz
fi
# jetty-***-push
rm -rf $APP_PATH/jetty-***-push
cd $DOWN_PATH
tar zxvf jetty-9.2.10.tar.gz -C $APP_PATH
mv $APP_PATH/jetty-distribution-9.2.10.v20150310/ $APP_PATH/jetty-***-push
# tomcat-***-admin
rm -rf $APP_PATH/tomcat-***-admin
cd $DOWN_PATH
tar zxvf tomcat-7.0.59.tar.gz -C $APP_PATH
mv $APP_PATH/apache-tomcat-7.0.59/ $APP_PATH/tomcat-***-admin
rm -rf $APP_PATH/tomcat-***-admin/webapps
# tomcat-***-open
rm -rf $APP_PATH/tomcat-***-open
cd $DOWN_PATH
tar zxvf tomcat-7.0.59.tar.gz -C $APP_PATH
mv $APP_PATH/apache-tomcat-7.0.59/ $APP_PATH/tomcat-***-open
rm -rf $APP_PATH/tomcat-***-open/webapps
# tomcat-***-web
rm -rf $APP_PATH/tomcat-***-web
cd $DOWN_PATH
tar zxvf tomcat-7.0.59.tar.gz -C $APP_PATH
mv $APP_PATH/apache-tomcat-7.0.59/ $APP_PATH/tomcat-***-web
rm -rf $APP_PATH/tomcat-***-web/webapps
}
function _***_project_setup_maven_()
{
# maven-jar-files
cd $DOWN_PATH
if [ ! -e "$DOWN_PATH/maven-local-repository.tar.gz" ];then
wget http://7u2toi.com2.z0.glb.qiniucdn.com/maven-local-repository.tar.gz
fi
tar zxvf maven-local-repository.tar.gz -C /
}
#
# 调用入口方法
#
_main_ $@
|
zhangzuoqiang/summer
|
bin/_setup_shell/ewei_project_setup.sh
|
Shell
|
apache-2.0
| 3,233 |
#!/usr/bin/env bash
set -e
source bosh-src/ci/tasks/utils.sh
source /etc/profile.d/chruby.sh
chruby 2.4.2
check_param google_project
check_param google_json_key_data
function clean_up_bucket {
local bucket=$1
gsutil rm -rf gs://${bucket_name}/
}
function gcs_login {
gcloud config set project $google_project
echo $google_json_key_data > key.json
gcloud auth activate-service-account --key-file=key.json
}
function gcs_logout {
gcloud auth revoke
}
function setup_bucket {
bucket_name="bosh-blobstore-bucket-$RANDOM"
echo -n "foobar" > public
gcs_login
gsutil mb -c MULTI_REGIONAL -l us "gs://${bucket_name}"
trap 'clean_up_bucket ${bucket_name}' EXIT
gsutil acl set public-read "gs://${bucket_name}"
gsutil cp -a public-read public "gs://${bucket_name}/"
export GCS_BUCKET_NAME=${bucket_name}
}
setup_bucket
pushd bosh-src
bosh sync blobs
chmod +x ./blobs/bosh-gcscli/bosh-gcscli-*-amd64
popd
pushd bosh-src/src
bundle install
popd
pushd bosh-src/src/bosh-director
export GCS_SERVICE_ACCOUNT_KEY="${google_json_key_data}"
bundle exec rspec spec/functional/gcs_spec.rb --tag general_gcs
popd
|
barthy1/bosh
|
ci/tasks/test-gcs-blobstore-client-integration.sh
|
Shell
|
apache-2.0
| 1,162 |
#! /bin/sh
# cross-compile magnacarto for various os/cpus and zip/tar.gz the output
# requires Go 1.5, set GO15 environment var to use a different Go installation
set -e
BUILD_DATE=`date +%Y%m%d`
BUILD_REF=`git rev-parse --short HEAD`
BUILD_VERSION=dev-$BUILD_DATE-$BUILD_REF
VERSION_LDFLAGS="-X github.com/omniscale/magnacarto.buildVersion=${BUILD_VERSION}"
# build os arch
function build() {
os=$1
arch=$2
build_name=magnacarto-$BUILD_VERSION-$os-$arch
mkdir -p $build_name
echo building $build_name
cd $build_name
env GOOS=$os GOARCH=$arch go build -ldflags "$VERSION_LDFLAGS" github.com/omniscale/magnacarto/cmd/magnacarto
env GOOS=$os GOARCH=$arch go build -ldflags "$VERSION_LDFLAGS" github.com/omniscale/magnacarto/cmd/magnaserv
# use git archive to only include checked-in files
(cd ../../../ && git archive --format tar HEAD app README.md LICENSE) | tar -x -
(cd ../../../docs && git archive --format tar HEAD examples/) | tar -x -
cd ..
if [ $os = windows ]; then
rm -f $build_name.zip
zip -q -r $build_name.zip $build_name
else
tar -czf $build_name.tar.gz $build_name
fi
rm -r $build_name
}
mkdir -p dist/$BUILD_VERSION
cd dist/$BUILD_VERSION
# build for these os/arch combinations
build windows 386
build windows amd64
build linux 386
build linux amd64
build darwin amd64
cd ../../
|
omniscale/magnacarto
|
crosscompile.sh
|
Shell
|
apache-2.0
| 1,389 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code sends a TYPING_STARTED event to the user.
# Read more: https://developers.google.com/business-communications/business-messages/guides/how-to/message/events#send
# Replace the __CONVERSATION_ID__ with a conversation id that you can send messages to
# Make sure a service account key file exists at ./service_account_key.json
curl -X POST "https://businessmessages.googleapis.com/v1/conversations/__CONVERSATION_ID__/events?eventId=6a0af2c6-787d-4097-870d-93fe20351747" \
-H "Content-Type: application/json" \
-H "User-Agent: curl/business-messages" \
-H "$(oauth2l header --json ./service_account_key.json businessmessages)" \
-d "{
'eventType': 'TYPING_STARTED',
'representative': {
'avatarImage': 'https://developers.google.com/identity/images/g-logo.png',
'displayName': 'Chatbot',
'representativeType': 'BOT'
},
}"
|
google-business-communications/bm-snippets-curl
|
send-event-typing-started.sh
|
Shell
|
apache-2.0
| 1,423 |
#!/bin/sh
cat <<_END_ > /srv/zookeeper/conf/zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=$DATADIR
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
# Force sync to media after each update
forceSync=yes
_END_
echo ${MYID:-1} > $DATADIR/myid
if [ -n "$SERVERS" ]; then
python -c "print '\n'.join(['server.%i=%s:2888:3888' % (i + 1, x) for i, x in enumerate('$SERVERS'.split(','))])" >> /srv/zookeeper/conf/zoo.cfg
fi
exec "$@"
|
gtonello/docker-zookeeper
|
zkp_config.sh
|
Shell
|
apache-2.0
| 887 |
#!/bin/sh
set -e
while true; do nc -l 80 < index.html; done
|
stackdocker/container-ops
|
alpine-netcat/entrypoint.sh
|
Shell
|
apache-2.0
| 61 |
#!/bin/sh
#
# More details at
# http://www.osehra.org/wiki/obtaining-testing-code
#
export DashboardsDir=$HOME/OSEHRA/Dashboards
mkdir -p $DashboardsDir
cd $DashboardsDir
git clone git://github.com/OSEHR/OSEHRA-Automated-Testing.git
cd OSEHRA-Automated-Testing
git checkout --track origin/UseCaseTesting
ln -s $DashboardsDir/OSEHRA-Automated-Testing/DashboardScripts/vista_common.cmake $HOME/OSEHRA/VistA-installation-scripts/Scripts
|
luisibanez/VistA-installation-scripts
|
Scripts/installOSEHRATesting.sh
|
Shell
|
apache-2.0
| 435 |
#!/bin/sh
sim-seriationct-networkmodel.py --dbhost localhost --dbport 27017 --experiment seriationct-21 --popsize 100 --maxinittraits 5 --numloci 3 --innovrate 0.000596679679724 --simlength 12000 --debug 0 --seed 1863375858 --reps 1 --samplefraction 0.5 --migrationfraction 0.0567437253281 --devel 0 --networkmodel networks/seriationct-21-network-gml.zip
sim-seriationct-networkmodel.py --dbhost localhost --dbport 27017 --experiment seriationct-21 --popsize 100 --maxinittraits 5 --numloci 3 --innovrate 0.000964648768499 --simlength 12000 --debug 0 --seed 199616279 --reps 1 --samplefraction 0.5 --migrationfraction 0.0705025150399 --devel 0 --networkmodel networks/seriationct-21-network-gml.zip
sim-seriationct-networkmodel.py --dbhost localhost --dbport 27017 --experiment seriationct-21 --popsize 100 --maxinittraits 5 --numloci 3 --innovrate 0.000324086489152 --simlength 12000 --debug 0 --seed 431936105 --reps 1 --samplefraction 0.5 --migrationfraction 0.0916973627116 --devel 0 --networkmodel networks/seriationct-21-network-gml.zip
sim-seriationct-networkmodel.py --dbhost localhost --dbport 27017 --experiment seriationct-21 --popsize 100 --maxinittraits 5 --numloci 3 --innovrate 0.000753397563755 --simlength 12000 --debug 0 --seed 1918328733 --reps 1 --samplefraction 0.5 --migrationfraction 0.0618824390013 --devel 0 --networkmodel networks/seriationct-21-network-gml.zip
|
mmadsen/experiment-seriationct
|
experiments/seriationct-21/jobs/job-seriationct-21-4024293e-3656-4425-9f56-1ee94bb00971.sh
|
Shell
|
apache-2.0
| 1,393 |
#!/bin/bash
# Prints usage.
function print_usage {
cat <<EOF
Usage: `basename $0` <DIR> <CMD>
with
* <DIR> the test directory
* <CMD> the Kind 2 command to test
(Passing "-h" or "--help" as argument prints this message.)
Tests Kind 2 on the lustre files in a directory. The directory should be
structured as follows. Directory
* "success" contains the files with only valid properties
* "falsifiable" contains the files with at least one falsifiable property
* "error" contains the files on which Kind 2 fails with error code 2.
EOF
}
# Print usage if asked.
for arg in "$@"; do
if [[ "$arg" = "-h" || "$arg" = "--help" ]]; then
print_usage
exit 0
fi
done
test_dir=`echo "$1" | sed 's:/$::'`
# Make sure folder exists.
if [ ! -d "$test_dir" ]; then
print_error "directory \"$test_dir\" does not exist"
fi
contract_dir="${test_dir}/contracts"
shift
k2_args="$@"
basic_k2_cmd="$k2_args --color false --check_subproperties true"
contract_k2_cmd="$basic_k2_cmd --modular true --compositional true"
success_code="20"
falsifiable_code="10"
timeout_code="0"
error_code="2"
success_dir="success"
falsifiable_dir="falsifiable"
error_dir="error"
tests_ok="true"
# Prints an error taking the lines as argument.
# Exit with exit code 2.
function print_error {
print_usage
echo
echo -e "\033[31mError\033[0m:"
for line in "$@"; do
echo " $line"
done
echo
exit 2
}
# Returns the log file corresponding to a file.
# Simply appends ".log" at the end of the path.
function log_file_of {
file="$1"
echo "$1.log"
}
# Returns a string version of an exit code.
function str_of_code {
if [ "$1" -eq "$success_code" ] ; then
echo "success ($success_code)"
elif [ "$1" -eq "$falsifiable_code" ]; then
echo "falsifiable ($falsifiable_code)"
elif [ "$1" -eq "$timeout_code" ]; then
echo "timeout ($timeout_code)"
else
echo "error ($1)"
fi
}
function name_of_path {
echo $1 | sed -e 's:.*/::g'
}
# Runs a test on a file, takes the file path, the expected exit code and the
# Kind 2 command as arguments.
function run_one {
file_path="$1"
shift
expected_code="$1"
shift
full_kind2_cmd="$@ $file_path"
expected_code_str=`str_of_code "$expected_code"`
printf "| %-40s ... " "`name_of_path $file_path`"
log_file_path=`log_file_of "$file_path"`
$full_kind2_cmd &> $log_file_path
exit_code="$?"
if [ "$exit_code" -ne "$expected_code" ]; then
tests_ok="false"
exit_code_str=`str_of_code "$exit_code"`
echo -e "\033[31merror\033[0m"
echo -e "\033[31m!\033[0m expected $expected_code_str"
echo -e "\033[31m!\033[0m but got $exit_code_str"
echo -e "\033[31m!\033[0m See log in \"$log_file_path\"."
else
echo -e "\033[32mok\033[0m"
rm $log_file_path
fi
}
# Constructs the find command from a directory and a subdirectory.
function find_tests {
echo "find ${1}/${2} -iname '*.lus'"
}
# Runs the tests in some directory, takes the working directory and the Kind 2
# command as arguments.
function run_in {
work_dir="$1"
shift
kind2_cmd="$@"
kind2_cmd_bv="${kind2_cmd} --smt_solver Z3 --smt_logic none"
# Falsifiable
find_cmd=`find_tests $work_dir $falsifiable_dir`
file_count=`eval $find_cmd | wc -l | tr -d ' '`
echo "| Running \"falsifiable\" ($file_count files)"
for file in `eval $find_cmd`; do
#if the regression files are those for bitvectors, we append
#the options " --smt_solver Z3 --smt_logic none" to the call to kind2
if [[ $file == *"bv-lia-ex.lus" ]] || [[ $file == *"bv-conversions.lus" ]]; then
run_one "$file" "$falsifiable_code" "$kind2_cmd_bv"
else
run_one "$file" "$falsifiable_code" "$kind2_cmd"
fi
done
# Success
find_cmd=`find_tests "$work_dir" "$success_dir"`
file_count=`eval $find_cmd | wc -l | tr -d ' '`
echo "| Running \"success\" ($file_count files)"
for file in `eval $find_cmd`; do
#if the regression files are those for bitvectors, we append
#the options " --smt_solver Z3 --smt_logic none" to the call to kind2
if [[ $file == *"bv-logical.lus" ]]; then
run_one "$file" "$success_code" "$kind2_cmd_bv"
else
run_one "$file" "$success_code" "$kind2_cmd"
fi
done
# Error
find_cmd=`find_tests $work_dir $error_dir`
file_count=`eval $find_cmd | wc -l | tr -d ' '`
echo "| Running \"error\" ($file_count files)"
for file in `eval $find_cmd`; do
#if the regression files are those for bitvectors, we append
#the options " --smt_solver Z3 --smt_logic none" to the call to kind2
if [[ $file == *"bv-sh-exception.lus" ]]; then
run_one "$file" "$error_code" "$kind2_cmd_bv --lus_strict true"
else
run_one "$file" "$error_code" "$kind2_cmd --lus_strict true"
fi
done
}
# Runs the tests on normal and contract.
function run_all {
echo
echo "|===| Running normal tests."
echo -e "| > \033[1m$basic_k2_cmd\033[0m"
echo "|"
run_in "$test_dir" "$basic_k2_cmd"
echo "|===| Done."
echo
# echo "|===| Running contract tests."
# echo -e "| > \033[1m$contract_k2_cmd\033[0m"
# echo "|"
# run_in "$contract_dir" "$contract_k2_cmd"
# echo "|===| Done."
# echo
}
# Running tests.
run_all
# Shouting if there was an error.
if [ "$tests_ok" = "false" ]; then
echo -e "\033[31mError\033[0m: some test failed."
echo ""
exit 2
else
exit 0
fi
|
tinelli/kind2
|
tests/run.sh
|
Shell
|
apache-2.0
| 5,388 |
#!/bin/bash
if [ $1 = "start" ]; then
echo 'Starting name service'
docker-compose -f /data/ala-name-service.yml up -d
else
echo 'shutting down name service'
docker-compose -f /data/ala-name-service.yml kill
fi
|
AtlasOfLivingAustralia/ala-install
|
ansible/roles/pipelines/files/ala-name-service.sh
|
Shell
|
apache-2.0
| 217 |
#!/usr/bin/env bash
apt-get install -y python-software-properties
apt-get update
|
zalando-stups/yourturn
|
vagrant.sh
|
Shell
|
apache-2.0
| 82 |
#!/bin/bash
#
# Configuring network services for Intersystems Cache.
#
# You MUST use "sudo" to execute this script.
#
#
# Open the port needed for Web interface.
#
sudo iptables -A INPUT -p tcp --dport 57772 -j ACCEPT
#
# Open the port needed for TCP connection.
#
sudo iptables -A INPUT -p tcp --dport 1972 -j ACCEPT
|
luisibanez/VistA-installation-scripts
|
Scripts/Cache/configureCache.sh
|
Shell
|
apache-2.0
| 324 |
#!/bin/bash
# Check a testbot or test environment to make sure it's likely to be sane.
# We should add to this script whenever a testbot fails and we can figure out why.
set -o errexit
set -o pipefail
set -o nounset
# Check that required commands are available.
for command in git go make; do
command -v $command >/dev/null || ( echo "Did not find command installed '$command'" && exit 2 )
done
docker run -t busybox ls >/dev/null
if [ "$(go env GOOS)" = "windows" -a "$(git config core.autocrlf)" != "false" ] ; then
echo "git config core.autocrlf is not set to false on windows"
exit 3
fi
echo "--- testbot $HOSTNAME seems to be set up OK"
|
drud/build-tools
|
.autotests/sanetestbot.sh
|
Shell
|
apache-2.0
| 655 |
#!/bin/bash
if [ "$#" -ne 2 ]; then
>&2 echo "This script requires 2 arguments -- verNum, buildId"
exit 1
fi
verNum=$1
buildId=$2
echo "verNum = $verNum"
echo "buildId = $buildId"
buildNumber=$(expr $buildId - 102500) # shrink shared build number appropriately
echo "buildNumber = ${buildNumber}"
versionString="$verNum.$buildNumber"
echo "VersionString = ${versionString}"
echo "##vso[task.setvariable variable=VersionString;]$versionString"
|
OneIdentity/safeguard-ps
|
versionnumber.sh
|
Shell
|
apache-2.0
| 455 |
#!/usr/bin/env bash
export SPARK_LOCAL_DIRS="{{spark_local_dirs}}"
# Standalone cluster options
export SPARK_MASTER_OPTS="{{spark_master_opts}}"
if [ -n "{{spark_worker_instances}}" ]; then
export SPARK_WORKER_INSTANCES={{spark_worker_instances}}
fi
export SPARK_WORKER_CORES={{spark_worker_cores}}
export HADOOP_HOME="/root/ephemeral-hdfs"
export SPARK_MASTER_IP={{active_master}}
export MASTER=`cat /root/spark-ec2/cluster-url`
export SPARK_SUBMIT_LIBRARY_PATH="$SPARK_SUBMIT_LIBRARY_PATH:/root/ephemeral-hdfs/lib/native/"
export SPARK_SUBMIT_CLASSPATH="$SPARK_CLASSPATH:$SPARK_SUBMIT_CLASSPATH:/root/ephemeral-hdfs/conf"
# Bind Spark's web UIs to this machine's public EC2 hostname otherwise fallback to private IP:
export SPARK_PUBLIC_DNS=`
wget -q -O - http://169.254.169.254/latest/meta-data/public-hostname ||\
wget -q -O - http://169.254.169.254/latest/meta-data/local-ipv4`
# Used for YARN model
export YARN_CONF_DIR="/root/ephemeral-hdfs/conf"
export PYSPARK_PYTHON=python3
export PYSPARK_DRIVER_PYTHON=python3
# Set a high ulimit for large shuffles, only root can do this
if [ $(id -u) == "0" ]
then
ulimit -n 1000000
fi
|
paulomagalhaes/spark-ec2
|
templates/root/spark/conf/spark-env.sh
|
Shell
|
apache-2.0
| 1,146 |
echo "plz go to /etc/pacman.conf"
echo "and add Multilib"
echo "then remote the sleep line from this script"
sleep 10000
#
#https://aur.archlinux.org/packages/android-sdk-build-tools/
#download ide in https://developer.android.com/studio/index.html
#
#aur install
#
git clone https://aur.archlinux.org/android-sdk.git
cd android-sdk/
makepkg -si
cd ..
#
git clone https://aur.archlinux.org/android-sdk-platform-tools.git
cd android-sdk-platform-tools/
makepkg -si
cd ..
#
#add Multilib in /etc/pacman.conf
#
# [multilib]
# Include = /etc/pacman.d/mirrorlist
#
#then run
sudo pacman -S lib32-zlib
#
git clone https://aur.archlinux.org/android-sdk-build-tools.git
cd android-sdk-build-tools/
makepkg -si
cd ..
#
git clone https://aur.archlinux.org/android-platform.git
cd android-platform/
makepkg -si
cd ..
#if error in platform-27_r01.zip
# nano PKGBUILD
#
#and
#
# #sha384sums=('269d9373d55f0f93994401b9d733e98b599d42c680d6d4436a91232024c298bc1e3d717288f94f85c303b2c2c93e8dc5')
# sha384sums=('da778b2688355151e55fc1fe7763b67c087470c2afd1122007c04d58153c27bdcd5bb4ee0ca423e4e84bad243a87b95b')
#
git clone https://aur.archlinux.org/android-support-repository.git
cd android-support-repository/
makepkg -si
cd ..
#
git clone https://aur.archlinux.org/android-support.git
cd android-support/
makepkg -si
cd ..
#
sudo su
ls /opt/android-sdk/
groupadd sdkusers
#
#change <user> to your user
#gpasswd -a <user> sdkusers
#
chown -R :sdkusers /opt/android-sdk/
chmod -R g+w /opt/android-sdk/
#
#restart your machine
#
reboot
#
#after restart
#download ide in https://developer.android.com/studio/index.html
#
cd android-studio/bin/
#change the sdk folder location to /opt/android-sdk/
./studio.sh
|
h31nr1ch/Linux
|
pacman/android-studio.sh
|
Shell
|
apache-2.0
| 1,699 |
#!/bin/sh
$((123))
|
consulo/consulo-bash
|
src/test/resources/psi/inspection/evaluateArithmeticExpressionInspection/ok/src/A.bash
|
Shell
|
apache-2.0
| 20 |
#!/usr/bin/env bash
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# A script that runs all Python unit tests in tfjs-layers.
set -e
SCRIPTS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TEST_FILES="$(find "${SCRIPTS_DIR}" -name '*_test.py')"
pip install virtualenv
TMP_VENV_DIR="$(mktemp -d --suffix=_venv)"
virtualenv -p "python" "${TMP_VENV_DIR}"
source "${TMP_VENV_DIR}/bin/activate"
pip install -r "${SCRIPTS_DIR}/requirements-dev.txt"
cd "${SCRIPTS_DIR}"
pylint --rcfile=.pylintrc tensorflowjs
export PYTHONPATH=".:${PYTHONPATH}"
for TEST_FILE in ${TEST_FILES}; do
echo
echo "====== Running test: ${TEST_FILE} ======"
echo
python "${TEST_FILE}"
done
echo
echo "All tests passed."
echo
deactivate
rm -rf "${TMP_VENV_DIR}"
|
tensorflow/tfjs-converter
|
tfjs-converter/python/run-python-tests.sh
|
Shell
|
apache-2.0
| 1,360 |
#!/usr/bin/env bash
set -e -x
source bosh-cpi-release/ci/tasks/utils.sh
# Creates an integer version number from the semantic version format
# May be changed when we decide to fully use semantic versions for releases
integer_version=`cut -d "." -f1 release-version-semver/number`
echo $integer_version > integer_version
cd bosh-cpi-release
source /etc/profile.d/chruby.sh
chruby 2.1.2
set +x
echo creating config/private.yml with blobstore secrets
cat > config/private.yml << EOF
---
blobstore:
s3:
access_key_id: $S3_ACCESS_KEY_ID
secret_access_key: $S3_SECRET_ACCESS_KEY
EOF
set -x
echo "using bosh CLI version..."
bosh version
echo "finalizing CPI release..."
bosh finalize release ../bosh-cpi-dev-artifacts/*.tgz --version $integer_version
rm config/private.yml
git diff | cat
git add .
git config --global user.email [email protected]
git config --global user.name CI
git commit -m "New final release v $integer_version"
|
ritazh/bosh-azure-cpi-release
|
ci/tasks/promote-candidate.sh
|
Shell
|
apache-2.0
| 952 |
#!/bin/bash
#This job performs spark submits via cron jobs.
#Spark history server helps get detailed information of jobs that have finished
if [ `hostname` == 'node0' ]
then
set -x
#Create spark events directory in dsefs
dse hadoop fs -mkdir -p /spark/events/
#dse hadoop fs -mkdir /spark
#dse hadoop fs -mkdir /spark/events
#Start Spark Job Server with custom config
#sudo dse spark-history-server start --properties-file /tmp/datastax-sketch-examples/dse-sketching-demo/history-server-config/spark-defaults.conf
echo "Finished Setup Spark History Server"
fi
|
michaelraney/datastax-sketch-examples
|
.startup/sparkHistory.sh
|
Shell
|
apache-2.0
| 582 |
#!/bin/sh
java -cp target/lib/*:target/classes RaceFreeMember.sh
|
SoCe/SoCe
|
Server/thirdparty/hazelcast/hazelcast-3.3.3/code-samples/distributed-primitives/lock/start-racefree.sh
|
Shell
|
apache-2.0
| 65 |
#!/bin/bash
#find ./ -type l -exec rm {} \;
DIR=$(dirname $0)
cd "$DIR/docker/tahr-consul-mongodb"
cp -f "$DIR/utilities/consul/start-consul.sh" start-consul.sh
cp -f "$DIR/utilities/consul/install.sh" install-consul.sh
cp -f "$DIR/utilities/consul/supervisord.conf" supervisord-consul.conf
cd "$DIR/docker/tahr-consul-nodejs"
cp -f "$DIR/utilities/consul/start-consul.sh" start-consul.sh
cp -f "$DIR/utilities/consul/install.sh" install-consul.sh
cp -f "$DIR/utilities/consul/supervisord.conf" supervisord-consul.conf
cd "$DIR/docker/tahr-consul-golang"
cp -f "$DIR/utilities/consul/start-consul.sh" start-consul.sh
cp -f "$DIR/utilities/consul/install.sh" install-consul.sh
cp -f "$DIR/utilities/consul/supervisord.conf" supervisord-consul.conf
#find ./ -type l -exec ls -lad {} \;
find ./ -name "*.sh" -exec chmod +x {} \;
|
nalindak/devops-docker-ansible
|
build-dependecies.sh
|
Shell
|
apache-2.0
| 830 |
#!/bin/sh
if [ $JAVA_HOME ]
then
echo "JAVA_HOME found at $JAVA_HOME"
RUN_JAVA=$JAVA_HOME/bin/java
else
echo "JAVA_HOME environment variable not available."
RUN_JAVA=`which java 2>/dev/null`
fi
if [ -z $RUN_JAVA ]
then
echo "JAVA could not be found in your system."
echo "please install Java 1.7 or higher!!!"
exit 1
fi
appname="`basename $0`"
appname=${appname/\.sh/}
apphome="`cd \`dirname $0\`/.. && pwd && cd - >/dev/null`"
CLASSPATH="${apphome}/config/*"
CLASSPATH="${CLASSPATH}:${apphome}/lib/bagri-test-tpox-1.2.1.jar"
CLASSPATH="${CLASSPATH}:${apphome}/lib/*"
export CLASSPATH
TPOX_HOME="${apphome}/../../TPoX"
export TPOX_HOME
. "set-tpox-env.conf"
#insert orders to the cache
$RUN_JAVA -server $JAVA_OPTS net.sf.tpox.workload.core.WorkloadDriver -w queries/XQJ/insOrder.xml -tr 5000 -u 50
#rem perform queries looping by user count
a=5
while [ $a -le 200 ]
do
echo $a
$RUN_JAVA -server $JAVA_OPTS net.sf.tpox.workload.core.WorkloadDriver -w queries/XQJ/orders.xml -u $a -r 10 -pc 95 -cl 99
a=`expr $a + 15`
done
|
dsukhoroslov/bagri
|
bagri-distr/src/main/tpox/tpox-xqj-orders.sh
|
Shell
|
apache-2.0
| 1,156 |
#!/bin/bash
source ~/.bash_profile
export JAVA_HOME=jdk
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib:.:$CLASSPATH
JAVA=$JAVA_HOME/bin/java
JAVA_OPTS="$JAVA_OPTS -server -Xms256m -Xmx512m -Xmn150m -XX:MaxPermSize=128m"
#performance Options
JAVA_OPTS="$JAVA_OPTS -XX:+AggressiveOpts"
JAVA_OPTS="$JAVA_OPTS -XX:+UseBiasedLocking"
JAVA_OPTS="$JAVA_OPTS -XX:+UseFastAccessorMethods"
JAVA_OPTS="$JAVA_OPTS -XX:+DisableExplicitGC"
JAVA_OPTS="$JAVA_OPTS -XX:+UseParNewGC"
JAVA_OPTS="$JAVA_OPTS -XX:+UseConcMarkSweepGC"
JAVA_OPTS="$JAVA_OPTS -XX:+CMSParallelRemarkEnabled"
JAVA_OPTS="$JAVA_OPTS -XX:+UseCMSCompactAtFullCollection"
JAVA_OPTS="$JAVA_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
JAVA_OPTS="$JAVA_OPTS -XX:CMSInitiatingOccupancyFraction=75"
JAVA_OPTS="$JAVE_OPTS -XX:+HeapDumpOnOutOfMemoryError"
JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=../dump_files/"
APP_DIR="."
CONF_DIR="$APP_DIR/conf"
CFG_NAME="$CONF_DIR/global.properties"
TIMEZONE="-Dfile.encoding=UTF8 -Duser.timezone=GMT+08"
LIB_DIR=$APP_DIR/lib
LIB_JARS=`ls $LIB_DIR|grep .jar|awk '{print "'$LIB_DIR'/"$0}'|tr "\n" ":"`
nohup $JAVA $JAVA_OPTS $TIMEZONE -cp "$CONF_DIR:$APP_DIR/lib/webant-xpath-0.0.1.jar:$LIB_JARS" cn.inveno.worken.WriteHotNewsServer $CFG_NAME > /dev/null 2>&1 &
#$JAVA $JAVA_OPTS -cp "$CONF_DIR:$APP_DIR/lib/webant-xpath-0.0.1.jar:$LIB_JARS" cn.inveno.worken.WriteHotNewsServer $CFG_NAME
|
tanyjan/km
|
doc/bin/write_news.sh
|
Shell
|
apache-2.0
| 1,436 |
#!/bin/bash
# Combine all the arguments.
printf -v var "'%s', " "$@"
var=${var%??}
# Invoke the application.
./gradlew run -PappArgs="[$var]"
|
googleapis/api-compiler
|
run.sh
|
Shell
|
apache-2.0
| 145 |
#!/bin/bash
###
### Sanity check
###
if [ "$DEST_ROOT" == "" ]; then
echo "Hint: export DEST_ROOT='/mnt/newroot'"
echo "ERROR: Destination root path not specified."
exit 1
fi
###
### Fix home to symlink
###
mkdir -p $DEST_ROOT/var/home &&
rm -rf $DEST_ROOT/home &&
ln -sf var/home $DEST_ROOT/home
echo
echo "OK: /home has been moved to /var/home (and symlink created)."
echo
|
a2o/puppet-modules-a2o-essential
|
bootstrap/os/slackware64/scripts/fix-home.sh
|
Shell
|
apache-2.0
| 397 |
#! /bin/sh
createenduser() {
TEMP=`getopt -o a:z:i:c:s:t:e:k:d:p:h --long id:,ca:,subject:,keytype:,ecurve:keysize:,days:,profile:,passphrase:,rekey:,help -n 'createuser.sh' -- "$@"`
KEYSIZE=2048
KEYTYPE=rsa
ECURVE=prime256v1
DAYS=30
PROFILE=v3_user
PASSPHRASE=69866640
eval set -- "$TEMP"
while true; do
case "$1" in
-i|--id) ID=$2; shift 2;;
-c|--ca) CA=$2; shift 2;;
-s|--subject) SUBJECTDN="$2"; shift 2;;
-t|--keytype) KEYTYPE=$2; shift 2;;
-e|--ecurve) ECURVE=$2; shift 2;;
-k|--keysize) KEYSIZE=$2; shift 2;;
-d|--days) DAYS=$2; shift 2;;
-a|--startdate) SD=$2; shift 2;;
-z|--enddate) ED=$2; shift 2;;
-p|--profile) PROFILE=$2; shift 2;;
--passphrase) PASSPHRASE="$2"; shift 2;;
--rekey) REKEY="$2"; shift 2;;
-h|--help) echo "Options:"
echo " -i|--id <id>"
echo " -c|--ca <ca>"
echo " -s|--subject <subject>"
echo " (-t|--keytype <algo>) # default rsa (dsa,ec)"
echo " (-e|--ecurve <curvename>) # default prime256v1"
echo " (-k|--key <keysize>) # default 2048"
echo " (-d|--days <days>) # default 30"
echo " (-a|--startdate <date>) # default now"
echo " (-z|--enddate <date>) # default 1 year"
echo " (-p|--profile <profile>) # default v3_user"
echo " (--passphrase <pwd>) # default 69866640"
echo " (--rekey) # default false"
shift
exit 1
;;
--) shift; break;;
*) echo "internal error"; exit 1;;
esac
done
if [ -z "$CA" ]; then
echo "CA identifier is missing."
exit 1
fi
if [ -z "$ID" ]; then
echo "User identifier is missing."
exit 1
fi
if [ -z "$SUBJECTDN" ]; then
echo "User subject name is missing."
exit 1
fi
case $KEYTYPE in
rsa|dsa|ec) ;;
*) echo "Wrong key type."; exit 1;;
esac
if [ ! -d "users" ]; then
mkdir users
fi
case $REKEY in
true) rm -rf users/$CA-$ID.key;;
false|*) ;;
esac
if [ ! -f users/$CA-$ID.key ]; then
echo users/$CA-$ID.key
echo "====="
echo "Creating end-user $ID, named $SUBJECTDN, issued by CA $CA"
echo "Generating user private key"
case $KEYTYPE in
rsa) openssl genrsa -out users/$CA-$ID.key $KEYSIZE
;;
dsa) openssl dsaparam -genkey -out users/$CA-$ID.key $KEYSIZE
;;
ec) openssl ecparam -genkey -name $ECURVE -out users/$CA-$ID.key
;;
esac
echo "Generating user certificate request" && openssl req -utf8 -new -config conf/$CA.cnf -key users/$CA-$ID.key -batch -out users/$CA-$ID.req -subj "$SUBJECTDN"
else
echo "Renew without rekey, generates certificate request" && openssl req -new -utf8 -config conf/$CA.cnf -key users/$CA-$ID.key -batch -out users/$CA-$ID.req -subj "$SUBJECTDN"
fi
SECRETKEY=`od -t x1 -A n database/$CA/private/secretkey | sed 's/ //g' | tr 'a-f' 'A-F'`
COUNTER=`cat database/$CA/counter`
echo `expr $COUNTER + 1` > database/$CA/counter
IV=`hexdump -n 16 -e '4/4 "%08X" 1 "\n"' /dev/urandom`
SERIAL=`echo -n $COUNTER | openssl enc -e -K $SECRETKEY -iv $IV -aes-128-cbc | od -t x1 -A n | sed 's/ //g' | tr 'a-f' 'A-F'`
echo $SERIAL > database/$CA/serial
echo "Creating user certificate" && openssl ca -utf8 -config conf/$CA.cnf -in users/$CA-$ID.req -startdate $SD -enddate $ED -out users/$CA-$ID.crt -extensions $PROFILE -batch
echo "Creating PKCS#12 object" && openssl pkcs12 -export -in users/$CA-$ID.crt -inkey users/$CA-$ID.key -password "pass:$PASSPHRASE" -out users/$CA-$ID.p12 -CApath store -chain
echo "Deleting certificate request" && rm users/$CA-$ID.req
echo "====="
}
createenduser "$@"
|
saoullabit/pkidugoret
|
createuser.sh
|
Shell
|
apache-2.0
| 3,880 |
#!/bin/bash
find . -name '*.mo' -delete
package_name="springboard_iogt"
declare -a unsupported_locales=("tgk_TJ")
mkdir -p ${package_name}/locale
# move out unsupported languages temporarily
for locale in "${unsupported_locales[@]}"
do
if [ -d "${package_name}/locale/${locale}/" ]; then
mv "${package_name}/locale/${locale}/" "${package_name}/unsupported_locale/"
fi
done
python setup.py extract_messages -o ${package_name}/locale/messages.pot
if [ -d "${package_name}/locale/fre_FR/" ]; then
mv "${package_name}/locale/fre_FR/" "${package_name}/locale/fra_FR/"
fi
for locale in "$@"
do
if [ ! -f "${package_name}/locale/${locale}/LC_MESSAGES/messages.po" ]; then
python setup.py init_catalog -i ${package_name}/locale/messages.pot -d ${package_name}/locale -l ${locale}
fi
done
python setup.py update_catalog -i ${package_name}/locale/messages.pot -d ${package_name}/locale
python setup.py compile_catalog -d ${package_name}/locale
if [ -d "${package_name}/locale/fra_FR/" ]; then
mv "${package_name}/locale/fra_FR/" "${package_name}/locale/fre_FR/"
fi
# move unsupported languages back and compile
for locale in "${unsupported_locales[@]}"
do
if [ -d "${package_name}/unsupported_locale/${locale}/" ]; then
mv "${package_name}/unsupported_locale/${locale}/" "${package_name}/locale/"
msgfmt -o "${package_name}/locale/${locale}/LC_MESSAGES/messages.mo" "${package_name}/locale/${locale}/LC_MESSAGES/messages.po"
fi
done
|
universalcore/springboard-iogt
|
extract_i18n.sh
|
Shell
|
bsd-2-clause
| 1,467 |
#!/bin/sh
### Dmitry Frolov <[email protected]>
### $Id: divert_node.sh,v 1.5 2002/07/31 07:53:43 romanp Exp $
IFACE="xl0"
THRESHOLD=1000
VERBOSE="1"
### to manipulate a node use "ipacctctl ipacct_${IFACE}:$IFACE} <command...>".
nodename=ipacct_${IFACE}
hookprefix=${IFACE}
case "$1" in
stop)
ipfw del 64021 64022
ngctl shutdown ${nodename}:
;;
show)
ipacctctl ${nodename}:${hookprefix} checkpoint
ipacctctl ${nodename}:${hookprefix} show
ipacctctl ${nodename}:${hookprefix} clear
;;
start|*)
### we must create two hooks, ${hookprefix}_in and
### ${hookprefix}_out to simulate input and output
### streams
ngctl -f- <<-SEQ
### dummy hook, to create a node
mkpeer ipacct ctl ctl
name .:ctl ${nodename}
### "incoming" hook
mkpeer ${nodename}: ksocket ${hookprefix}_in inet/raw/divert
name ${nodename}:${hookprefix}_in ${nodename}_in
msg ${nodename}_in: bind inet/0.0.0.0:3021
### "outgoing" hook
mkpeer ${nodename}: ksocket ${hookprefix}_out inet/raw/divert
name ${nodename}:${hookprefix}_out ${nodename}_out
msg ${nodename}_out: bind inet/0.0.0.0:3022
rmhook .:ctl
SEQ
ipacctctl ${nodename}:${hookprefix} dlt RAW
ipacctctl ${nodename}:${hookprefix} v ${VERBOSE}
ipacctctl ${nodename}:${hookprefix} th ${THRESHOLD}
### packets reaching tee are _accepted_,
### so use theese rules _AFTER_ all deny rules
ipfw add 64021 tee 3021 ip from any to room101 via ${IFACE}
ipfw add 64022 tee 3022 ip from room101 to any via ${IFACE}
;;
esac
|
vstakhov/ng_ipacct
|
scripts/divert_node.sh
|
Shell
|
bsd-2-clause
| 1,539 |
#!/bin/bash
##############################################################################
#
# Simple test runner for measuring speed and memory usage of XlsxWriter
# and the Excel::Writer::XLSX modules.
#
# Copyright 2013-2016, John McNamara, [email protected]
echo ""
echo "Python and XlsxWriter. Speed only."
echo "Rows, Columns, Time, Memory"
sleep 1; python perf_pyx.py 200 0 0
sleep 1; python perf_pyx.py 400 0 0
sleep 1; python perf_pyx.py 800 0 0
sleep 1; python perf_pyx.py 1600 0 0
sleep 1; python perf_pyx.py 3200 0 0
sleep 1; python perf_pyx.py 6400 0 0
sleep 1; python perf_pyx.py 12800 0 0
echo ""
echo "Python and XlsxWriter. Memory only."
echo "Rows, Columns, Time, Memory"
sleep 1; python perf_pyx.py 200 0 1
sleep 1; python perf_pyx.py 400 0 1
sleep 1; python perf_pyx.py 800 0 1
sleep 1; python perf_pyx.py 1600 0 1
sleep 1; python perf_pyx.py 3200 0 1
sleep 1; python perf_pyx.py 6400 0 1
sleep 1; python perf_pyx.py 12800 0 1
echo ""
echo "Python and XlsxWriter in optimisation mode. Speed only."
echo "Rows, Columns, Time, Memory"
sleep 1; python perf_pyx.py 200 1 0
sleep 1; python perf_pyx.py 400 1 0
sleep 1; python perf_pyx.py 800 1 0
sleep 1; python perf_pyx.py 1600 1 0
sleep 1; python perf_pyx.py 3200 1 0
sleep 1; python perf_pyx.py 6400 1 0
sleep 1; python perf_pyx.py 12800 1 0
echo ""
echo "Python and XlsxWriter in optimisation mode. Memory only."
echo "Rows, Columns, Time, Memory"
sleep 1; python perf_pyx.py 200 1 1
sleep 1; python perf_pyx.py 400 1 1
sleep 1; python perf_pyx.py 800 1 1
sleep 1; python perf_pyx.py 1600 1 1
sleep 1; python perf_pyx.py 3200 1 1
sleep 1; python perf_pyx.py 6400 1 1
sleep 1; python perf_pyx.py 12800 1 1
echo ""
echo "Perl and Excel::Writer::XLSX"
echo "Rows, Columns, Time, Memory"
sleep 1; perl perf_ewx.pl 200
sleep 1; perl perf_ewx.pl 400
sleep 1; perl perf_ewx.pl 800
sleep 1; perl perf_ewx.pl 1600
sleep 1; perl perf_ewx.pl 3200
sleep 1; perl perf_ewx.pl 6400
sleep 1; perl perf_ewx.pl 12800
echo ""
echo "Perl Excel::Writer::XLSX in optimisation mode"
echo "Rows, Columns, Time, Memory"
sleep 1; perl perf_ewx.pl 200 1
sleep 1; perl perf_ewx.pl 400 1
sleep 1; perl perf_ewx.pl 800 1
sleep 1; perl perf_ewx.pl 1600 1
sleep 1; perl perf_ewx.pl 3200 1
sleep 1; perl perf_ewx.pl 6400 1
sleep 1; perl perf_ewx.pl 12800 1
echo ""
echo ""
|
jkyeung/XlsxWriter
|
dev/performance/perf_test.sh
|
Shell
|
bsd-2-clause
| 2,364 |
#!/bin/sh
# Install Eigen
eigen_version="3.3.7"
wget --no-check-certificate https://bitbucket.org/eigen/eigen/get/$eigen_version.tar.bz2
tar -xf $eigen_version.tar.bz2
mv eig* eigen
mkdir eigen/build_dir
cd eigen/build_dir
cmake ..
sudo make install
cd ../..
rm -rf eigen/ $eigen_version.tar.bz2
echo "Installed Eigen"
# Install Ceres
ceres_version="ceres-solver-1.14.0"
sudo apt-get -y install cmake
sudo apt-get -y install libgoogle-glog-dev
sudo apt-get -y install libatlas-base-dev
sudo apt-get -y install libsuitesparse-dev
wget http://ceres-solver.org/$ceres_version.tar.gz
tar zxf $ceres_version.tar.gz
rm $ceres_version.tar.gz
mkdir ceres-bin
cd ceres-bin
cmake ../$ceres_version
make -j4
sudo make install
cd ..
rm -rf $ceres_version/ ceres-bin/
echo "Installed ceres"
|
tsender/riptide_software
|
riptide_utilities/setup_scripts/install_eigen_ceres.sh
|
Shell
|
bsd-2-clause
| 781 |
apt-get -q -y update
apt-get install -q -y nginx
|
preflightsiren/hackathon-machine-images
|
scripts/setup.sh
|
Shell
|
bsd-3-clause
| 48 |
#!/usr/bin/env bash
while [ -s ../resources/diff_ids.txt ]
do
echo "Running python script"
./get_urls_from_ids.py
echo
echo "Running update"
./update_urls.sh
done
|
MIREL-UNC/mirel-scripts
|
preprocess/04_run_get.sh
|
Shell
|
bsd-3-clause
| 185 |
#!/bin/bash
sudo rm -Rf /usr/local/share/java.installer
sudo mkdir -p /usr/local/share/java.installer/
sudo cp "$2" /usr/local/share/java.installer/java.pkg
sudo pkggen -i com.oracle.java.installer -v "$1" --postinstall scripts/postinstall files out.pkg
chmod 666 out.pkg
mv out.pkg "$3"
|
korylprince/java.installer
|
mkpkg.sh
|
Shell
|
bsd-3-clause
| 290 |
#Help
if [ "$1" = "-h" ]
then
echo "Locate aligned fragments within 2 full proteins"
echo "Arguments:"
echo " 1. NCBI Accession of first protein 1"
echo " 2. Aligned fragment of protein 1"
echo " 3. NCBI Accession of first protein 2"
echo " 4. Aligned fragment of protein 2"
echo " 5. (Optional) substitution matrix to use. (Defaul: BL50)"
echo " 6. (Optional) Indicate whether plots will be shown (Values: show/quiet; Default: show)"
exit 1
fi
#Define the substitution matrix to work with
mat="BL50"
mode=""
#Identify the type of substitution matrix, if given
if [[ ! -z "$5" ]] && ([[ "$5" != "quiet" ]] && [[ "$5" != "show" ]])
then
mat=$5
fi
#Check the mode of operation: quiet/show
if [[ "$5" == "quiet" ]] || [[ "$6" == "quiet" ]]
then
mode="-q"
fi
locateFragment.pl -a $1 -f $2 $mode
locateFragment.pl -a $3 -f $4 $mode
alignSeqsFiles.pl -q $1_frag.faa -ql $1_frag -s $3_frag.faa -sl $3_frag -e 0.1 -c 20 -cc X -m $mat
alignSeqsFiles.pl -q $1.faa -ql $1 -s $3.faa -sl $3 -e 0.1 -c 5 -cc X -m $mat
#Open html reports if apropriate
if [[ $mode != "-q" ]]
then
open ssearch*/*.html
fi
|
SaierLaboratory/TCDBtools
|
scripts/locfrag.sh
|
Shell
|
bsd-3-clause
| 1,131 |
#!/bin/bash
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -o nounset
set -o errexit
VERIFY=${PREFIX:-yes}
PREFIX=${PREFIX:-}
python2 ../prepare_input.py --config $(basename $(pwd)) ref
${PREFIX} $1 ${DASHDASH} data/ref/input/input.source 58 > input.source.out 2>stderr1.out
${PREFIX} $1 ${DASHDASH} data/ref/input/input.graphic 58 > input.graphic.out 2>stderr2.out
${PREFIX} $1 ${DASHDASH} data/ref/input/input.program 58 > input.program.out 2>stder3.out
if [[ "${VERIFY}" != "no" ]] ; then
echo "VERIFY"
cmp input.source.out data/ref/output/input.source.out
cmp input.graphic.out data/ref/output/input.graphic.out
cmp input.program.out data/ref/output/input.program.out
fi
echo OK
|
Lind-Project/native_client
|
tests/spec2k/256.bzip2/run.ref.sh
|
Shell
|
bsd-3-clause
| 836 |
# Install dependencies
# Use conda **ONLY** for numpy and pandas (if not pulling from master), this
# speeds up the builds a lot. Use the normal pip install for the rest.
conda create -n odo numpy=1.11.2 python=$python
source activate odo
# update setuptools and pip
conda update setuptools pip
if [ -n "$PANDAS_VERSION" ];then
pip install cython==0.24.1
pip install $PANDAS_VERSION
else
conda install pandas=0.19.0
fi
conda install pytables=3.3.0
conda install h5py=2.6.0
# install the frozen ci dependencies
pip install -e .
pip install -r etc/requirements_ci.txt
# datashape
pip install git+git://github.com/blaze/datashape.git
|
quantopian/odo
|
etc/ci-install.sh
|
Shell
|
bsd-3-clause
| 647 |
#!/bin/bash
echo "Installing Git and Sumatra Test"
# sudo apt-get install git
pip install GitPython
if [[ $TRAVIS_PYTHON_VERSION == 3* ]]
then
pip install django
pip install pyyaml # otherwise smt init fails with yaml not defined error
pip install Sumatra
fi
|
SmokinCaterpillar/pypet
|
ciscripts/travis/install_gitpython.sh
|
Shell
|
bsd-3-clause
| 291 |
#!/bin/bash
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script assumed to be run in native_client/
if [ "x${OSTYPE}" = "xcygwin" ]; then
cd "$(cygpath "${PWD}")"
fi
if [[ ${PWD} != */native_client ]]; then
echo "ERROR: must be run in native_client!"
exit 1
fi
if [ $# -ne 1 ]; then
echo "USAGE: $0 win/mac/linux"
exit 2
fi
readonly SCRIPT_DIR="$(dirname "$0")"
readonly SCRIPT_DIR_ABS="$(cd "${SCRIPT_DIR}" ; pwd)"
export TOOLCHAINLOC=sdk
export TOOLCHAINNAME=nacl-sdk
set -x
set -e
set -u
PLATFORM=$1
cd tools
export INSIDE_TOOLCHAIN=1
echo @@@BUILD_STEP clobber_toolchain@@@
rm -rf ../scons-out sdk-out sdk ../toolchain/*_newlib SRC/* BUILD/*
if [[ "${BUILDBOT_SLAVE_TYPE:-Trybot}" == "Trybot" ]]; then
echo @@@BUILD_STEP setup source@@@
./buildbot_patch-toolchain-tries.sh
fi
echo @@@BUILD_STEP compile_toolchain@@@
mkdir -p ../toolchain/${PLATFORM}_x86
make -j8 clean buildbot-build-with-newlib
if [[ ${PLATFORM} == win ]]; then
../mingw/msys/bin/sh.exe -c "export PATH=/mingw/bin:/bin:\$PATH &&
export TOOLCHAINLOC &&
export TOOLCHAINNAME &&
make -j8 gdb 2>&1"
fi
echo @@@BUILD_STEP canonicalize timestamps@@@
./canonicalize_timestamps.sh sdk
echo @@@BUILD_STEP tar_toolchain@@@
# We don't just use tar's z flag because we want to pass the -n option
# to gzip so that it won't embed a timestamp in the compressed file.
tar cvf - sdk | gzip -n -9 > naclsdk.tgz
chmod a+r naclsdk.tgz
if [ "$PLATFORM" = "mac" ] ; then
echo "$(SHA1=$(openssl sha1 naclsdk.tgz) ; echo ${SHA1/* /})"
else
echo "$(SHA1=$(sha1sum -b naclsdk.tgz) ; echo ${SHA1:0:40})"
fi > naclsdk.tgz.sha1hash
if [[ "${BUILDBOT_SLAVE_TYPE:-Trybot}" != "Trybot" ]]; then
# Upload the toolchain before running the tests, in case the tests
# fail. We do not want a flaky test or a non-toolchain-related bug to
# cause us to lose the toolchain snapshot, especially since this takes
# so long to build on Windows. We can always re-test a toolchain
# snapshot on the trybots.
echo @@@BUILD_STEP archive_build@@@
(
gsutil=../buildbot/gsutil.sh
GS_BASE=gs://nativeclient-archive2/toolchain
for destrevision in ${BUILDBOT_GOT_REVISION} latest ; do
for suffix in tgz tgz.sha1hash ; do
${gsutil} cp -a public-read \
naclsdk.${suffix} \
${GS_BASE}/${destrevision}/naclsdk_${PLATFORM}_x86.${suffix}
done
done
)
echo @@@STEP_LINK@download@http://gsdview.appspot.com/nativeclient-archive2/toolchain/${BUILDBOT_GOT_REVISION}/@@@
fi
echo @@@BUILD_STEP untar_toolchain@@@
mkdir -p ../toolchain/${PLATFORM}_x86_newlib/.tmp
cd ../toolchain/${PLATFORM}_x86_newlib/.tmp
tar xfz ../../../tools/naclsdk.tgz
mv sdk/nacl-sdk/* ../
cd ../../..
if [[ ${PLATFORM} == win ]]; then
# Explicitly call the depot tools version of Python to avoid cygwin issues.
python.bat buildbot/buildbot_standard.py opt 64 newlib
elif [[ ${PLATFORM} == mac ]]; then
python2 buildbot/buildbot_standard.py opt 32 newlib
elif [[ ${PLATFORM} == linux ]]; then
python2 buildbot/buildbot_standard.py opt 32 newlib
else
echo "ERROR, bad platform."
exit 1
fi
if [[ "${DONT_BUILD_COMPATIBLE_TOOLCHAINS:-no}" != "yes" ]]; then
echo @@@BUILD_STEP sync backports@@@
rm -rf tools/BACKPORTS/ppapi*
tools/BACKPORTS/build_backports.sh VERSIONS ${PLATFORM} newlib
fi
|
Lind-Project/native_client
|
buildbot/buildbot_toolchain.sh
|
Shell
|
bsd-3-clause
| 3,428 |
#!/bin/bash
function message () {
echo -e "\033[32m$1\033[0m"
}
function info () {
message "I: $1"
}
function error () {
>&2 echo -e "\033[31mE: $1\033[0m"
exit 1
}
|
dossier/sortingdesk
|
sh/libtools.sh
|
Shell
|
mit
| 176 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.