code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -e
echo "################################################################################"
echo "# REBUILDING WITHOUT NODE_ENV"
echo "################################################################################"
unset NODE_ENV
docker run ${DOCKER_RUN_OPTS} npm run build
echo "################################################################################"
echo "# BUILDING DOCS"
echo "################################################################################"
npm run grunt:concurrent -- build:docs
| nickclar/spark-js-sdk | tooling/pre-release.sh | Shell | mit | 532 |
#!/bin/sh
PLATFORM="linux"
TREE_DIR="../../tree/libtess2"
SRC_DIR="$TREE_DIR/src"
BUILD_DIR="build/$PLATFORM"
INSTALL_DIR="tmp/$PLATFORM"
SRC_PATH="$(pwd)/$SRC_DIR"
INSTALL_PATH="$(pwd)/$INSTALL_DIR"
if [ ! -d "$SRC_PATH" ]; then
echo "SOURCE NOT FOUND!"
exit 1
fi
# ---
TOOLCHAIN_FILE="$CROSS_PATH/core/cmake/toolchains/linux.cmake"
cmake -H"$SRC_DIR" -B"$BUILD_DIR" \
-DCMAKE_TOOLCHAIN_FILE="$TOOLCHAIN_FILE" -G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DLIBRARY_OUTPUT_PATH="$INSTALL_PATH/lib"
if [ $? != 0 ]; then
echo "CONFIGURATION FAILED!"
exit -1
fi
# ---
rm -rf "$INSTALL_PATH"
cmake --build "$BUILD_DIR"
if [ $? != 0 ]; then
echo "BUILD FAILED!"
exit -1
fi
rm -rf "$TREE_DIR/$PLATFORM/lib"
mkdir -p "$TREE_DIR/$PLATFORM/lib"
mv "tmp/$PLATFORM/lib" "$TREE_DIR/$PLATFORM"
cd "$TREE_DIR/$PLATFORM"
ln -s "../src/Include" include
| arielm/chronotext-cross | deps/libtess2/build.linux.sh | Shell | mit | 865 |
#!/bin/bash
set -e
echo "SMELT BUILD MODEL SCRIPT"
MODEL_ROOT="model"
MAKE_NPROC=$(nproc)
export SMLT_HOSTNAME=$(hostname)
export SMLT_MACHINE=$(hostname -s)
if [[ "$1" == "-d" ]]; then
LOGFILE=/dev/stdout
else
LOGFILE=$(readlink -f $MODEL_ROOT/create_overlays_${SMLT_MACHINE}.log)
fi
LIKWID_PATH="$MODEL_ROOT/likwid"
LIKWID_REPOSITORY="https://github.com/RRZE-HPC/likwid.git"
SIM_REPOSITORY="https://github.com/libsmelt/Simulator.git"
SIM_PATH="$MODEL_ROOT/Simulator"
MACHINEDB=$SIM_PATH/machinedb
OUTDIR=$MACHINEDB/machine-data/$SMLT_MACHINE
export PATH=$PATH:$LIKWID_PATH:$LIKWID_PATH/ext/lua/:$LIKWID_PATH/ext/hwloc/:.
export LD_LIBRARY_PATH=$LIKWID_PATH:.
echo "SMLT_MACHINE=$SMLT_MACHINE"
echo "SMLT_HOSTNAME=$SMLT_HOSTNAME"
echo "MODEL_ROOT=$MODEL_ROOT"
echo "Creating overlays for $SMLT_MACHINE. logfile: $LOGFILE"
pushd $SIM_PATH
MODELS="adaptivetree badtree mst bintree cluster fibonacci sequential"
for m in $MODELS; do
echo $m
./simulator.py $SMLT_MACHINE $m
done
popd
| libsmelt/libsmelt | scripts/create_overlays.sh | Shell | mit | 1,015 |
#!/usr/bin/env bash
#
# Copyright (c) 2008-2022 the Urho3D project.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
$(dirname $0)/cmake_generic.sh "$@" -D ARM=1
# vi: set ts=4 sw=4 expandtab:
| urho3d/Urho3D | script/cmake_arm.sh | Shell | mit | 1,205 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2361-1
#
# Security announcement date: 2014-09-24 00:00:00 UTC
# Script generation date: 2017-01-04 21:04:14 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libnss3:2:3.17.1-0ubuntu0.14.04.1
#
# Last versions recommanded by security team:
# - libnss3:2:3.26.2-0ubuntu0.14.04.3
#
# CVE List:
# - CVE-2014-1568
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libnss3=2:3.26.2-0ubuntu0.14.04.3 -y
| Cyberwatch/cbw-security-fixes | Ubuntu_14.04_LTS/x86_64/2014/USN-2361-1.sh | Shell | mit | 646 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2010:0938
#
# Security announcement date: 2010-12-01 23:38:06 UTC
# Script generation date: 2016-05-12 18:10:00 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - glassfish-jaxb.noarch:2.1.4-1.17.patch04.ep1.el5
# - glassfish-jaxws.noarch:2.1.1-1jpp.ep1.13.el5
# - hibernate3.noarch:3.2.4-1.SP1_CP11.0jpp.ep2.0.el5
# - hibernate3-annotations.noarch:3.3.1-2.0.GA_CP04.ep1.el5
# - hibernate3-annotations-javadoc.noarch:3.3.1-2.0.GA_CP04.ep1.el5
# - hibernate3-javadoc.noarch:3.2.4-1.SP1_CP11.0jpp.ep2.0.el5
# - javassist.noarch:3.9.0-2.ep1.1.el5
# - jboss-common.noarch:1.2.2-1.ep1.1.el5
# - jboss-messaging.noarch:1.4.0-4.SP3_CP11.1.ep1.el5
# - jboss-remoting.noarch:2.2.3-4.SP3.ep1.el5
# - jboss-seam.noarch:1.2.1-3.JBPAPP_4_3_0_GA.ep1.22.el5.1
# - jboss-seam-docs.noarch:1.2.1-3.JBPAPP_4_3_0_GA.ep1.22.el5.1
# - jboss-seam2.noarch:2.0.2.FP-1.ep1.26.el5
# - jboss-seam2-docs.noarch:2.0.2.FP-1.ep1.26.el5
# - jbossas.noarch:4.3.0-8.GA_CP09.2.1.ep1.el5
# - jbossas-4.3.0.GA_CP09-bin.noarch:4.3.0-8.GA_CP09.2.1.ep1.el5
# - jbossas-client.noarch:4.3.0-8.GA_CP09.2.1.ep1.el5
# - jbossts.noarch:4.2.3-2.SP5_CP10.1jpp.ep1.1.el5
# - jbossweb.noarch:2.0.0-7.CP15.0jpp.ep1.1.el5
# - jbossws.noarch:2.0.1-6.SP2_CP09.2.ep1.el5
# - jbossws-common.noarch:1.0.0-3.GA_CP06.1.ep1.el5
# - jgroups.noarch:2.4.9-1.ep1.el5
# - quartz.noarch:1.5.2-1jpp.patch01.ep1.4.2.el5
# - rh-eap-docs.noarch:4.3.0-8.GA_CP09.ep1.3.el5
# - rh-eap-docs-examples.noarch:4.3.0-8.GA_CP09.ep1.3.el5
# - xalan-j2.noarch:2.7.1-4.ep1.1.el5
#
# Last versions recommanded by security team:
# - glassfish-jaxb.noarch:2.2.5-14.redhat_5.ep6.el5
# - glassfish-jaxws.noarch:2.1.1-1jpp.ep1.13.el5
# - hibernate3.noarch:3.3.2-1.5.GA_CP05.ep5.el5
# - hibernate3-annotations.noarch:3.4.0-3.3.GA_CP05.ep5.el5
# - hibernate3-annotations-javadoc.noarch:3.4.0-3.3.GA_CP05.ep5.el5
# - hibernate3-javadoc.noarch:3.3.2-1.5.GA_CP05.ep5.el5
# - javassist.noarch:3.12.0-6.SP1.ep5.el5
# - jboss-common.noarch:1.2.2-1.ep1.1.el5
# - jboss-messaging.noarch:1.4.8-12.SP9.1.ep5.el5
# - jboss-remoting.noarch:2.5.4-11.SP4_patch01.ep5.el5
# - jboss-seam.noarch:1.2.1-3.JBPAPP_4_3_0_GA.ep1.22.el5.1
# - jboss-seam-docs.noarch:1.2.1-3.JBPAPP_4_3_0_GA.ep1.22.el5.1
# - jboss-seam2.noarch:2.2.6.EAP5-22_patch_01.ep5.el5
# - jboss-seam2-docs.noarch:2.2.6.EAP5-22_patch_01.ep5.el5
# - jbossas.noarch:5.2.0-14.ep5.el5
# - jbossas-4.3.0.GA_CP09-bin.noarch:4.3.0-8.GA_CP09.2.1.ep1.el5
# - jbossas-client.noarch:5.2.0-14.ep5.el5
# - jbossts.noarch:4.17.30-1.Final_redhat_1.1.ep6.el5
# - jbossweb.noarch:7.5.12-1.Final_redhat_1.1.ep6.el5
# - jbossws.noarch:3.1.2-14.SP15_patch_02.ep5.el5
# - jbossws-common.noarch:2.3.1-1.Final_redhat_1.1.ep6.el5
# - jgroups.noarch:3.2.13-1.Final_redhat_1.1.ep6.el5
# - quartz.noarch:1.5.2-1jpp.patch01.ep1.4.2.el5
# - rh-eap-docs.noarch:5.2.0-6.ep5.el5
# - rh-eap-docs-examples.noarch:5.2.0-6.ep5.el5
# - xalan-j2.noarch:2.7.1-12_patch_08.ep5.el5
#
# CVE List:
# - CVE-2010-3708
# - CVE-2010-3862
# - CVE-2010-3878
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install glassfish-jaxb.noarch-2.2.5 -y
sudo yum install glassfish-jaxws.noarch-2.1.1 -y
sudo yum install hibernate3.noarch-3.3.2 -y
sudo yum install hibernate3-annotations.noarch-3.4.0 -y
sudo yum install hibernate3-annotations-javadoc.noarch-3.4.0 -y
sudo yum install hibernate3-javadoc.noarch-3.3.2 -y
sudo yum install javassist.noarch-3.12.0 -y
sudo yum install jboss-common.noarch-1.2.2 -y
sudo yum install jboss-messaging.noarch-1.4.8 -y
sudo yum install jboss-remoting.noarch-2.5.4 -y
sudo yum install jboss-seam.noarch-1.2.1 -y
sudo yum install jboss-seam-docs.noarch-1.2.1 -y
sudo yum install jboss-seam2.noarch-2.2.6.EAP5 -y
sudo yum install jboss-seam2-docs.noarch-2.2.6.EAP5 -y
sudo yum install jbossas.noarch-5.2.0 -y
sudo yum install jbossas-4.3.0.GA_CP09-bin.noarch-4.3.0 -y
sudo yum install jbossas-client.noarch-5.2.0 -y
sudo yum install jbossts.noarch-4.17.30 -y
sudo yum install jbossweb.noarch-7.5.12 -y
sudo yum install jbossws.noarch-3.1.2 -y
sudo yum install jbossws-common.noarch-2.3.1 -y
sudo yum install jgroups.noarch-3.2.13 -y
sudo yum install quartz.noarch-1.5.2 -y
sudo yum install rh-eap-docs.noarch-5.2.0 -y
sudo yum install rh-eap-docs-examples.noarch-5.2.0 -y
sudo yum install xalan-j2.noarch-2.7.1 -y
| Cyberwatch/cbw-security-fixes | Red_Hat_5/x86_64/2010/RHSA-2010:0938.sh | Shell | mit | 4,595 |
#!/bin/sh
cd /src
npm start
| newlight77/raspberry-cluster | server/run.sh | Shell | mit | 29 |
#!/bin/bash
# Base folder
mkdir -p ~/robocup/ultron
cd ~/robocup/ultron
# Add python 3.6 repository if necessary
case $(lsb_release -rs) in
18.04) echo "You have 18.04 you don't need an unofficial repo! Hooray!"
;;
*)
echo "You don't have 18.04 so you need an unofficial repo. sadf"
sudo add-apt-repository ppa:jonathonf/python-3.6 -y
sudo apt update
sudo apt-get install python3.6-venv
;;
esac
# Install dependencies
sudo -S apt-get install --yes git python3.6 python-pip python3-venv python-virtualenv build-essential cmake libqt4-dev libgl1-mesa-dev libglu1-mesa-dev libprotobuf-dev protobuf-compiler libode-dev libboost-dev
# Clone repos
git clone https://github.com/RoboCupULaval/UI-Debug.git
# Create and activate virtualenv
python3.6 -m venv virtualenv
source virtualenv/bin/activate
# Install requirements
pip install -r StrategyAI/requirements.txt
pip install -r UI-Debug/requirements.txt
# Add user to dialout group to enable access to serial ports
sudo adduser $USER dialout
| RoboCupULaval/StrategyIA | scripts/install_ultron.sh | Shell | mit | 997 |
#!/bin/sh
SCRIPT_PATH=`dirname $0`
echo "amqp-coffee Compiling coffeescript to bin/"
echo $SCRIPT_PATH
rm -rf $SCRIPT_PATH/../bin
mkdir $SCRIPT_PATH/../bin
cp -r $SCRIPT_PATH/../src $SCRIPT_PATH/../bin/
# compile all coffeescript files
find $SCRIPT_PATH/../bin -name "*.coffee" | xargs $SCRIPT_PATH/../node_modules/coffee-script/bin/coffee --compile
# remove all coffeescript files
find $SCRIPT_PATH/../bin -name "*.coffee" | xargs rm
| dropbox/amqp-coffee | scripts/compile.sh | Shell | mit | 438 |
# CFG SYS 1
PROCS=3
| ECP-CANDLE/Supervisor | archives/workflows/simple_uq/test/cfg-sys-1.sh | Shell | mit | 22 |
#!/usr/bin/env bash
block="server {
listen ${4:-80};
listen ${5:-443} ssl;
server_name $2;
root \"$3\";
index index.html index.htm index.php app_dev.php;
charset utf-8;
location / {
try_files \$uri \$uri/ /app_dev.php?\$query_string;
}
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
access_log off;
error_log /var/log/nginx/$1-ssl-error.log error;
sendfile on;
client_max_body_size 100m;
# DEV
location ~ ^/(app_dev|app_test|config)\.php(/|\$) {
fastcgi_split_path_info ^(.+\.php)(/.+)\$;
fastcgi_pass unix:/var/run/php/php7.0-fpm.sock;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
fastcgi_intercept_errors off;
fastcgi_buffer_size 64k;
fastcgi_buffers 4 64k;
fastcgi_busy_buffers_size 128k;
fastcgi_temp_file_write_size 128k;
fastcgi_connect_timeout 300;
fastcgi_send_timeout 300;
fastcgi_read_timeout 300;
}
# PROD
location ~ ^/app\.php(/|$) {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/var/run/php/php7.0-fpm.sock;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
fastcgi_intercept_errors off;
fastcgi_buffer_size 64k;
fastcgi_buffers 4 64k;
fastcgi_busy_buffers_size 128k;
fastcgi_temp_file_write_size 128k;
fastcgi_connect_timeout 300;
fastcgi_send_timeout 300;
fastcgi_read_timeout 300;
}
location ~ /\.ht {
deny all;
}
ssl_certificate /etc/nginx/ssl/$1.crt;
ssl_certificate_key /etc/nginx/ssl/$1.key;
}
"
echo "$block" >> "/etc/nginx/sites/centstead-$1.conf" | jason-chang/centstead | scripts/serves/symfony2.sh | Shell | mit | 1,876 |
if [ ! -n "$WERCKER_DATADOG_EVENT_TOKEN" ]; then
error 'Please specify token property'
exit 1
fi
if [ ! -n "$WERCKER_DATADOG_EVENT_TITLE" ]; then
error 'Please specify title property'
exit 1
fi
if [ ! -n "$WERCKER_DATADOG_EVENT_TEXT" ]; then
error 'Please specify text property'
exit 1
fi
if [ ! -n "$WERCKER_DATADOG_EVENT_PRIORITY" ]; then
error 'Please specify priority property'
exit 1
fi
if [ ! -n "$WERCKER_DATADOG_EVENT_ALERT_TYPE" ]; then
error 'Please specify alert_type property'
exit 1
fi
curl -X POST -H "Content-type: application/json" \
-d "{
\"title\": \"$WERCKER_DATADOG_EVENT_TITLE\",
\"text\": \"$WERCKER_DATADOG_EVENT_TEXT\",
\"priority\": \"$WERCKER_DATADOG_EVENT_PRIORITY\",
\"alert_type\": \"$WERCKER_DATADOG_EVENT_ALERT_TYPE\",
\"source_type_name\": \"jenkins\"
}" \
"https://app.datadoghq.com/api/v1/events?api_key=$WERCKER_DATADOG_EVENT_TOKEN"
| WeAreFarmGeek/wercker-datadog-event-step | run.sh | Shell | mit | 964 |
cat generated/positions/shard-list.txt | util/shuffle | parallel -j+0 --eta './generate-analysis.py --hash=512 --out=generated/analysis/19/{/.}-res.txt --done=generated/analysis/19/{/.}-done.txt --depth=19 --engine=/usr/local/bin/stockfish {} > generated/analysis/19/{/.}.out 2> generated/analysis/19/{/.}.err' >& d19.shh | rozim/KaggleFindingElo | d19.sh | Shell | mit | 324 |
#!/bin/bash
# Configuration Setup
source ./config
# Install NTP
yum -y install ntp cronie
systemctl enable ntpd.service
systemctl start ntpd.service
systemctl enable crond.service
systemctl start crond.service
# Set up OpenStack repos
yum -y install yum-plugin-priorities epel-release ethtool
yum -y install http://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm
yum -y install openstack-utils
yum -y update
# Drop firewall and SELinux for now
systemctl stop firewalld.service
systemctl disable firewalld.service
sed -i 's/enforcing/disabled/g' /etc/selinux/config
setenforce 0
# Disable IPv6 for now
echo net.ipv6.conf.default.disable_ipv6=1 > /etc/sysctl.d/disable-ipv6.conf
sysctl -p
mkdir -pv /etc/openstack-uncharted/
touch /etc/openstack-uncharted/common-setup-done
| brad-x/openstack | openstack-network/common.sh | Shell | mit | 792 |
#!/bin/sh
# file: implement.sh
#
# (c) Copyright 2008 - 2011 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#
#-----------------------------------------------------------------------------
# Script to synthesize and implement the RTL provided for the clocking wizard
#-----------------------------------------------------------------------------
# Clean up the results directory
rm -rf results
mkdir results
# Copy unisim_comp.v file to results directory
cp $XILINX/verilog/src/iSE/unisim_comp.v ./results/
# Synthesize the Verilog Wrapper Files
echo 'Synthesizing Clocking Wizard design with XST'
xst -ifn xst.scr
mv clocks_exdes.ngc results/
# Copy the constraints files generated by Coregen
echo 'Copying files from constraints directory to results directory'
cp ../example_design/clocks_exdes.ucf results/
cd results
echo 'Running ngdbuild'
ngdbuild -uc clocks_exdes.ucf clocks_exdes
echo 'Running map'
map -timing clocks_exdes -o mapped.ncd
echo 'Running par'
par -w mapped.ncd routed mapped.pcf
echo 'Running trce'
trce -e 10 routed -o routed mapped.pcf
echo 'Running design through bitgen'
bitgen -w routed
echo 'Running netgen to create gate level model for the clocking wizard example design'
netgen -ofmt vhdl -sim -tm clocks_exdes -w routed.ncd routed.vhd
cd ..
| chrismasters/fpga-space-invaders | project/ipcore_dir/clocks/implement/implement.sh | Shell | mit | 3,387 |
"$CLOUD_REBUILD" MsgLinux 32 dll debug same | xylsxyls/xueyelingshuang | src/MsgLinux/version_debug.sh | Shell | mit | 43 |
export PATH="/home/marcel/bin/:/usr/bin:/usr/local/bin:/usr/lib/jvm/java-7-openjdk/bin/:$PATH"
export XDG_CONFIG_HOME="$HOME/.config"
export EDITOR="/usr/sbin/vim"
export VISUAL="/usr/sbin/vim"
export CHEATCOLORS=true
export CHERE_INVOKING=1
export TERM=xterm-256color
# Prevent "Couldn't connect to accessibility bus" warnings
export NO_AT_BRIDGE=1
export NODE_ENV=development
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
export FZF_DEFAULT_COMMAND='ag -l \
--nocolor \
--hidden \
--follow \
--skip-vcs-ignores \
--ignore .git \
--ignore node_modules \
--ignore public \
--ignore build \
--ignore vendor \
--ignore env \
--ignore __pycache__ \
-g ""'
export KEYTIMEOUT=1
#
# NPM
#
NPM_PACKAGES="/home/marcel/.npm-packages"
export PATH="$PATH:$NPM_PACKAGES/bin"
#
# Ruby
#
export PATH="$PATH:/home/marcel/.gem/ruby/2.4.0/bin"
#
# Weechat
#
export WEECHAT_HOME="$XDG_CONFIG_HOME/weechat"
| Iambecomeroot/dotfiles | omzsh/exports.sh | Shell | mit | 931 |
#!/usr/bin/env bash
cd "$(dirname "${BASH_SOURCE[0]}")" \
&& . "../utils.sh"
install_apps() {
brew_install "FFmpeg" "ffmpeg"
brew_install "Git" "git"
brew_install "Yarn" "yarn"
brew_install "MidnightCommander" "mc"
brew_install "tmux" "tmux"
brew_install "tmux (pasteboard)" "reattach-to-user-namespace"
brew_install "Web Font Tools: TTF/OTF → WOFF (Zopfli)" "sfnt2woff-zopfli" "bramstein/webfonttools"
brew_install "Web Font Tools: TTF/OTF → WOFF" "sfnt2woff" "bramstein/webfonttools"
brew_install "Web Font Tools: WOFF2" "woff2" "bramstein/webfonttools"
brew_install "ShellCheck" "shellcheck"
brew_install "ImageMagick" "imagemagick --with-webp"
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
main() {
print_in_purple "\n Miscellaneous\n\n"
install_apps
printf "\n"
brew_cleanup
}
main | abogdan/dotfiles | setup/misc.sh | Shell | mit | 893 |
#!/usr/bin/env bash
source script.sh
function catch() {
case $1 in
0) echo "setup succeeded"
;;
1) echo "script.sh: failed @ clonePhoneCatRepo@14()";
;;
2) echo "script.sh: failed @ checkoutPhoneCatRepoStep12()";
;;
4) echo "script.sh: failed @ npmInstallPhoneCatRepo()";
;;
5) echo "script.sh; failed @ npmInstallPhoneCatRepo()";
;;
6) echo "script.sh; failed @ updatePhoneCatRepoWebDriver()";
;;
*) echo "fubar! Something went wrong."
;;
esac
exit $1
}
# try
(
clonePhoneCatRepo@14 || exit 1;
checkoutPhoneCatRepoStep12 || exit 2;
npmInstallPhoneCatRepo || exit 4;
bowerInstallPhoneCatRepo || exit 5;
updatePhoneCatRepoWebDriver || exit 6;
)
catch $?;
| bradyhouse/house | fiddles/angular/fiddle-0016-PhoneCatStep12/setup.sh | Shell | mit | 835 |
#!/bin/bash
#bootstrap a minimal chef enviroment
# installs:
# chef
# berkshelf
#reference: http://gettingstartedwithchef.com
DOTCHEF=~/.chef
#Git repo for your cookbooks
REPODIR=~/chef-repo
#Location of ruby install
RUBY_HOME=/opt/chef/embedded
function set_redhat {
PLATFORM="redhat"
PKG_INSTALLER="yum"
PKG_UNINSTALLER="yum"
PKG_INSTALL_ARGS="-y install"
PKG_UNINSTALL_ARGS="-y remove"
}
function set_ubuntu {
PLATFORM="ubuntu"
PKG_INSTALLER="apt-get"
PKG_UNINSTALLER="dpkg"
PKG_INSTALL_ARGS="-y install"
PKG_UNINSTALL_ARGS="-P"
}
if [ -f "/etc/redhat-release" ]; then
set_redhat
elif [ -f "/etc/lsb-release" ]; then
set_ubuntu
fi
#Are we running as root? If not, wrap commands with sudo
function check_root {
id=`id -u`
if [ "$id" != 0 ] ; then
SUDO="sudo"
else
SUDO=
fi
}
function get_chef {
curl -s -L https://www.opscode.com/chef/install.sh | sudo bash
}
#set up a base chef recipe directory
function get_base_repo {
local _dest=$1
if [ ! -d "${_dest}" ]; then
wget http://github.com/opscode/chef-repo/tarball/master
tar -zxvf master && mv opscode-chef-repo* ${_dest}
rm -f master opscode-chef-repo*
else
echo "${_dest} already exists, skipping chef-repo copy"
fi
}
#configure the .chef/knife.rb file to the location of your cookbook directory
function dotChef_cookbook {
local _dir=$1
local _repodir=$2
if [ ! -d "$1" ] ; then
mkdir "$1" && echo "cookbook_path [ '${_repodir}/cookbooks' ]" > "$1"/knife.rb
else
echo "${_dir} already exists, skipping knife.rb config"
fi
}
#installs OS packages
function install_packages {
${SUDO} ${PKG_INSTALLER} ${PKG_INSTALL_ARGS} $@
}
#Installs ubuntu build tools
function install_ubuntu_build {
install_packages build-essential git-core libxslt1-dev libxml2-dev
}
#Installs rhel build tools
function install_redhat_build {
install_packages gcc git libxml2 libxml2-devel libxslt libxslt-devel
}
#Installs gems using the chef embedded ruby
function chef_install_gem {
local _gem=$1
gem_opts="--no-ri --no-rdoc"
${SUDO} ${RUBY_HOME}/bin/gem install ${_gem} ${gem_opts}
}
check_root
get_chef
get_base_repo ${REPODIR}
dotChef_cookbook ${DOTCHEF} ${REPODIR}
if [ "$PLATFORM" = "ubuntu" ]; then
install_ubuntu_build
elif [ "$PLATFORM" = "redhat" ]; then
install_redhat_build
fi
chef_install_gem berkshelf
| stevendborrelli/chef-init | bootstrap.sh | Shell | mit | 2,345 |
#!/bin/bash
sudo yum -y install epel-release
sudo yum -y install gcc curl wget
sudo yum -y install kernel ntp logwatch
sudo yum -y install mod24_ssl openssl openssl-devel
sudo yum -y install httpd
| shifumin/setupfiles | setup_yum_packages.sh | Shell | mit | 199 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2016:1267
#
# Security announcement date: 2016-06-21 21:34:44 UTC
# Script generation date: 2017-01-01 21:17:17 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - setroubleshoot.x86_64:3.0.47-12.el6_8
# - setroubleshoot-debuginfo.x86_64:3.0.47-12.el6_8
# - setroubleshoot-server.x86_64:3.0.47-12.el6_8
# - setroubleshoot-doc.x86_64:3.0.47-12.el6_8
#
# Last versions recommanded by security team:
# - setroubleshoot.x86_64:3.0.47-12.el6_8
# - setroubleshoot-debuginfo.x86_64:3.0.47-12.el6_8
# - setroubleshoot-server.x86_64:3.0.47-12.el6_8
# - setroubleshoot-doc.x86_64:3.0.47-12.el6_8
#
# CVE List:
# - CVE-2016-4444
# - CVE-2016-4445
# - CVE-2016-4446
# - CVE-2016-4989
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install setroubleshoot.x86_64-3.0.47 -y
sudo yum install setroubleshoot-debuginfo.x86_64-3.0.47 -y
sudo yum install setroubleshoot-server.x86_64-3.0.47 -y
sudo yum install setroubleshoot-doc.x86_64-3.0.47 -y
| Cyberwatch/cbw-security-fixes | Red_Hat_6/x86_64/2016/RHSA-2016:1267.sh | Shell | mit | 1,165 |
alias reload!='. ~/.zshrc'
alias p="cd $PROJECTS"
alias h="cd ~"
alias d="cd ~/dotfiles"
alias g="git"
alias v="vim"
# ssh into vuw server
# for auto-password entry
# 1) ssh-keygen -t rsa -b 2048
# a) specify file to save key to
# b) use blank passphrase
# 2) ssh-copy-id id@server
alias ssh-vuw="ssh [email protected]"
alias ssh-banana="ssh [email protected]"
alias ssh-rasp="ssh [email protected]"
alias ssh-notes="ssh -i $LIGHTSAIL [email protected]"
alias ssh-notes-ci="ssh -i $HOME/.local/lib/aws/ec2.pem [email protected]"
alias cls='clear' # Good 'ol Clear Screen command
| willhs/dotfiles | zsh/aliases.zsh | Shell | mit | 657 |
#!/bin/bash
# experiment 5: testing bigger LSTM network on 4096 2d and 3d matrix with pure random walk
th main.lua -w world_4096_3d.txt -model lstm -m model_4096_3d_lstm512_b200_l2 -l 2 -n 512 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_2d.txt -model lstm -m model_4096_2d_lstm512_b200_l2 -l 2 -n 512 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_3d.txt -model lstm -m model_4096_3d_lstm256_b200_l2 -l 2 -n 256 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_2d.txt -model lstm -m model_4096_2d_lstm256_b200_l2 -l 2 -n 256 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_3d.txt -model lstm -m model_4096_3d_lstm128_b200_l2 -l 2 -n 128 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_2d.txt -model lstm -m model_4096_2d_lstm128_b200_l2 -l 2 -n 128 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_3d.txt -model lstm -m model_4096_3d_lstm64_b200_l2 -l 2 -n 64 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_2d.txt -model lstm -m model_4096_2d_lstm64_b200_l2 -l 2 -n 64 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_3d.txt -model lstm -m model_4096_3d_lstm32_b200_l2 -l 2 -n 32 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
th main.lua -w world_4096_2d.txt -model lstm -m model_4096_2d_lstm32_b200_l2 -l 2 -n 32 -seqlen 50 -batchsize 200 -maxiter 10000 -dropout 0 -maxnoturnsteps 1
| andreaskoepf/matrixwalk | run_experiment.sh | Shell | mit | 1,697 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2016:1944
#
# Security announcement date: 2016-09-28 13:40:22 UTC
# Script generation date: 2017-01-16 21:17:40 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - bind-debuginfo.i686:9.8.2-0.47.rc1.el6_8.1
# - bind-libs.i686:9.8.2-0.47.rc1.el6_8.1
# - bind-debuginfo.x86_64:9.8.2-0.47.rc1.el6_8.1
# - bind-libs.x86_64:9.8.2-0.47.rc1.el6_8.1
# - bind-utils.x86_64:9.8.2-0.47.rc1.el6_8.1
# - bind-devel.i686:9.8.2-0.47.rc1.el6_8.1
# - bind.x86_64:9.8.2-0.47.rc1.el6_8.1
# - bind-chroot.x86_64:9.8.2-0.47.rc1.el6_8.1
# - bind-devel.x86_64:9.8.2-0.47.rc1.el6_8.1
# - bind-sdb.x86_64:9.8.2-0.47.rc1.el6_8.1
#
# Last versions recommanded by security team:
# - bind-debuginfo.i686:9.8.2-0.47.rc1.el6_8.4
# - bind-libs.i686:9.8.2-0.47.rc1.el6_8.4
# - bind-debuginfo.x86_64:9.8.2-0.47.rc1.el6_8.4
# - bind-libs.x86_64:9.8.2-0.47.rc1.el6_8.4
# - bind-utils.x86_64:9.8.2-0.47.rc1.el6_8.4
# - bind-devel.i686:9.8.2-0.47.rc1.el6_8.4
# - bind.x86_64:9.8.2-0.47.rc1.el6_8.4
# - bind-chroot.x86_64:9.8.2-0.47.rc1.el6_8.4
# - bind-devel.x86_64:9.8.2-0.47.rc1.el6_8.4
# - bind-sdb.x86_64:9.8.2-0.47.rc1.el6_8.4
#
# CVE List:
# - CVE-2016-2776
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install bind-debuginfo.i686-9.8.2 -y
sudo yum install bind-libs.i686-9.8.2 -y
sudo yum install bind-debuginfo.x86_64-9.8.2 -y
sudo yum install bind-libs.x86_64-9.8.2 -y
sudo yum install bind-utils.x86_64-9.8.2 -y
sudo yum install bind-devel.i686-9.8.2 -y
sudo yum install bind.x86_64-9.8.2 -y
sudo yum install bind-chroot.x86_64-9.8.2 -y
sudo yum install bind-devel.x86_64-9.8.2 -y
sudo yum install bind-sdb.x86_64-9.8.2 -y
| Cyberwatch/cbw-security-fixes | Red_Hat_6/x86_64/2016/RHSA-2016:1944.sh | Shell | mit | 1,859 |
#!/usr/bin/env bash
# Use this instead of Nodegit. It's big and unwieldy.
MISC_ASSET_PATH=$(grep "const MISC_PATH" _scripts/eleventy/config.js | cut -d\" -f2)
# Now sync the static assets
if [[ ! -e "$MISC_ASSET_PATH" ]]; then
echo "Could could find the misc assets folder."
exit 1
fi
DATE=$(date "+%Y-%m-%dT%H.%M.%S")
# Only building and pushing misc assets. Site is deployed by CircleCI.
echo "Pushing misc assets"
yarn borg
yarn push:misc
# Then commit stuff to Github. This kicks off a CI/CD build.
echo "Pushing log to remote repo"
git pull origin master
git add .
git commit -m "${DATE}"
git push origin master
| afreeorange/log | _scripts/push.sh | Shell | mit | 631 |
#! /usr/bin/env sh
set -eu
gnustep_install() {
git clone -b 1.9 https://github.com/gnustep/libobjc2.git
mkdir libobjc2/build
cd libobjc2/build
export CC="clang"
export CXX="clang++"
cmake -DCMAKE_INSTALL_PREFIX:PATH=$HOME/libobjc2_staging ../
make install
}
for arch in $IOS_ARCHS; do
rustup target add "${arch}-apple-ios"
done
if [ -n "$IOS_ARCHS" ]; then
curl -LO https://github.com/SSheldon/rust-test-ios/releases/download/0.1.1/rust-test-ios
chmod +x rust-test-ios
fi
if [ "$TRAVIS_OS_NAME" = "linux" ]; then
gnustep_install
fi
| SSheldon/rust-objc | travis_install.sh | Shell | mit | 581 |
#!/bin/bash
handlebars `ls widgets/*.handlebars` -f js/templates.js
| 42trees/godash | buildtemplates.sh | Shell | mit | 68 |
#!/usr/bin/env sh
set -e
git stash save
gulp --prod
REV=`git rev-parse HEAD`
git checkout gh-pages
git rm -rf .
git checkout master -- .gitignore
mv dist/* . && rm -rf dist # might to be changed if you have hidden files
git add .
git commit -m "deployed $REV"
git push --all
git checkout master
git stash pop | niko-matses/nikomatses.com | deploy.sh | Shell | mit | 308 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2016:1217
#
# Security announcement date: 2016-06-08 17:06:49 UTC
# Script generation date: 2017-01-25 21:23:43 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - firefox.i386:45.2.0-1.el5_11
# - firefox-debuginfo.i386:45.2.0-1.el5_11
# - firefox.x86_64:45.2.0-1.el5_11
# - firefox-debuginfo.x86_64:45.2.0-1.el5_11
#
# Last versions recommanded by security team:
# - firefox.i386:45.7.0-1.el5_11
# - firefox-debuginfo.i386:45.7.0-1.el5_11
# - firefox.x86_64:45.7.0-1.el5_11
# - firefox-debuginfo.x86_64:45.7.0-1.el5_11
#
# CVE List:
# - CVE-2016-2818
# - CVE-2016-2819
# - CVE-2016-2821
# - CVE-2016-2822
# - CVE-2016-2828
# - CVE-2016-2831
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install firefox.i386-45.7.0 -y
sudo yum install firefox-debuginfo.i386-45.7.0 -y
sudo yum install firefox.x86_64-45.7.0 -y
sudo yum install firefox-debuginfo.x86_64-45.7.0 -y
| Cyberwatch/cbw-security-fixes | Red_Hat_5/x86_64/2016/RHSA-2016:1217.sh | Shell | mit | 1,106 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2191-1
#
# Security announcement date: 2014-05-01 00:00:00 UTC
# Script generation date: 2017-01-01 21:03:47 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - icedtea-6-jre-cacao:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre-headless:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre-lib:6b31-1.13.3-1ubuntu1~0.12.04.2
# - icedtea-6-jre-jamvm:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre-zero:6b31-1.13.3-1ubuntu1~0.12.04.2
# - icedtea-6-jre-jamvm:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre-headless:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre-zero:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre-lib:6b31-1.13.3-1ubuntu1~0.12.04.2
#
# Last versions recommanded by security team:
# - icedtea-6-jre-cacao:6b40-1.13.12-0ubuntu0.12.04.2
# - openjdk-6-jre-headless:6b40-1.13.12-0ubuntu0.12.04.2
# - openjdk-6-jre:6b40-1.13.12-0ubuntu0.12.04.2
# - openjdk-6-jre-lib:6b40-1.13.12-0ubuntu0.12.04.2
# - icedtea-6-jre-jamvm:6b40-1.13.12-0ubuntu0.12.04.2
# - openjdk-6-jre-zero:6b31-1.13.3-1ubuntu1~0.12.04.2
# - icedtea-6-jre-jamvm:6b40-1.13.12-0ubuntu0.12.04.2
# - openjdk-6-jre:6b40-1.13.12-0ubuntu0.12.04.2
# - openjdk-6-jre-headless:6b40-1.13.12-0ubuntu0.12.04.2
# - openjdk-6-jre-zero:6b31-1.13.3-1ubuntu1~0.12.04.2
# - openjdk-6-jre-lib:6b40-1.13.12-0ubuntu0.12.04.2
#
# CVE List:
# - CVE-2014-0429
# - CVE-2014-0446
# - CVE-2014-0451
# - CVE-2014-0452
# - CVE-2014-0456
# - CVE-2014-0457
# - CVE-2014-0458
# - CVE-2014-0461
# - CVE-2014-0462
# - CVE-2014-2397
# - CVE-2014-2405
# - CVE-2014-2412
# - CVE-2014-2414
# - CVE-2014-2421
# - CVE-2014-2423
# - CVE-2014-2427
# - CVE-2014-0453
# - CVE-2014-0460
# - CVE-2014-0459
# - CVE-2014-1876
# - CVE-2014-2398
# - CVE-2014-2403
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade icedtea-6-jre-cacao=6b40-1.13.12-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade openjdk-6-jre-headless=6b40-1.13.12-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade openjdk-6-jre=6b40-1.13.12-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade openjdk-6-jre-lib=6b40-1.13.12-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade icedtea-6-jre-jamvm=6b40-1.13.12-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade openjdk-6-jre-zero=6b31-1.13.3-1ubuntu1~0.12.04.2 -y
sudo apt-get install --only-upgrade icedtea-6-jre-jamvm=6b40-1.13.12-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade openjdk-6-jre=6b40-1.13.12-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade openjdk-6-jre-headless=6b40-1.13.12-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade openjdk-6-jre-zero=6b31-1.13.3-1ubuntu1~0.12.04.2 -y
sudo apt-get install --only-upgrade openjdk-6-jre-lib=6b40-1.13.12-0ubuntu0.12.04.2 -y
| Cyberwatch/cbw-security-fixes | Ubuntu_12.04_LTS/i386/2014/USN-2191-1.sh | Shell | mit | 3,101 |
#!/bin/bash
sudo cp bm_util /usr/bin/bm_util
if [[ $? -ne 0 ]]; then
echo -e "Couldn't copy bm_util to /usr/bin/. Installation failed."
exit 1
fi
cp bm_bash ~/.bm_bash
if [[ $? -ne 0 ]]; then
echo -e "Couldn't copy bm_bash to ~/.bm_bash. Installation failed."
exit 1
fi
grep --quiet "^source ~/.bm_bash" ~/.bashrc;
if [[ $? -ne 0 ]]; then
echo -e "\nsource ~/.bm_bash" >> ~/.bashrc
fi
echo Installation Successful.
echo Please restart the terminal or do "source ~/.bashrc"
| shrsv/bm | install.sh | Shell | mit | 499 |
gcc -std=c99 -Ofast -Wall -Werror -pedantic -o pci_maker.exe pci_maker.c strxtoi.c
| korun/dcs-system | pci/compile.sh | Shell | mit | 83 |
git clone https://github.com/chriskempson/base16-shell.git $HOME/.config/base16-shell
git clone --recursive https://github.com/sorin-ionescu/prezto.git $HOME/.zprezto
| DiogoDoreto/dotfiles | zsh/install.sh | Shell | mit | 167 |
#!/bin/sh
PLATFORM="linux"
TREE_DIR="../../tree/gtest"
SRC_DIR="$TREE_DIR/src"
BUILD_DIR="build/$PLATFORM"
INSTALL_DIR="tmp/$PLATFORM"
SRC_PATH="$(pwd)/$SRC_DIR"
INSTALL_PATH="$(pwd)/$INSTALL_DIR"
if [ ! -d "$SRC_PATH" ]; then
echo "SOURCE NOT FOUND!"
exit 1
fi
# ---
TOOLCHAIN_FILE="$CROSS_PATH/core/cmake/toolchains/linux.cmake"
cmake -H"$SRC_DIR" -B"$BUILD_DIR" \
-DCMAKE_TOOLCHAIN_FILE="$TOOLCHAIN_FILE" -G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DLIBRARY_OUTPUT_PATH="$INSTALL_PATH/lib"
if [ $? != 0 ]; then
echo "CONFIGURATION FAILED!"
exit -1
fi
# ---
rm -rf "$INSTALL_PATH"
cmake --build "$BUILD_DIR"
if [ $? != 0 ]; then
echo "BUILD FAILED!"
exit -1
fi
rm -rf "$TREE_DIR/$PLATFORM/lib"
mkdir -p "$TREE_DIR/$PLATFORM/lib"
mv "tmp/$PLATFORM/lib" "$TREE_DIR/$PLATFORM"
cd "$TREE_DIR/$PLATFORM"
ln -s "../src/include"
| arielm/chronotext-cross | deps/gtest/build.linux.sh | Shell | mit | 854 |
# generated from catkin/cmake/env-hooks/05.catkin-test-results.sh.develspace.in
export CATKIN_TEST_RESULTS_DIR="/home/ubuntu/ros/catkin_ws/src/rover5_ros/build/test_results"
export ROS_TEST_RESULTS_DIR="$CATKIN_TEST_RESULTS_DIR"
| chcbaram/rover5_ros | build/devel/etc/catkin/profile.d/05.catkin-test-results.sh | Shell | mit | 230 |
#!/usr/bin/env bash
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $script_dir/../external/libshellscript/libshellscript/libshellscript.sh
install_path=$HOME/.shell/kube-ps1
mkdir -p $(dirname $install_path)
if ! [[ -e $install_path ]]; then
print_information "installing kube-ps1"
git clone https://github.com/jonmosco/kube-ps1.git $install_path
else
cd $install_path
git pull
fi
| colajam93/dotfiles | kube-ps1/install.sh | Shell | mit | 422 |
export GOPATH="${HOME}/.go"
export GOROOT="$(brew --prefix golang)/libexec"
path=("${GOPATH}/bin:${GOROOT}/bin" $path)
| cheremushki/dotfiles | golang/path.zsh | Shell | mit | 119 |
#!/bin/bash
# This script is designed to work with ubuntu 16.04 LTS
# ensure system is updated and has basic build tools
# sudo apt-get update
# sudo apt-get --assume-yes upgrade
# sudo apt-get --assume-yes install tmux build-essential gcc g++ make binutils
# sudo apt-get --assume-yes install software-properties-common
# download and install GPU drivers
CUDA_REPO_PKG=http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_8.0.61-1_amd64.deb
wget "$CUDA_REPO_PKG" -O cuda-repo.deb
sudo dpkg -i cuda-repo.deb && rm -f cuda-repo.deb
sudo apt-get update
sudo apt-get -y install cuda
sudo modprobe nvidia
nvidia-smi
# install and configure theano
pip install theano
echo "[global]
device = gpu
floatX = float32
[cuda]
root = /usr/local/cuda" > ~/.theanorc
# install and configure keras
pip install keras==1.2.2
mkdir ~/.keras
echo '{
"image_dim_ordering": "th",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "theano"
}' > ~/.keras/keras.json
# install cudnn libraries
wget "http://platform.ai/files/cudnn.tgz" -O "cudnn.tgz"
tar -zxf cudnn.tgz
cd cuda
sudo cp lib64/* /usr/local/cuda/lib64/
sudo cp include/* /usr/local/cuda/include/
# alternative way to install cudnn from official resource but take more spaces
# ML_REPO_PKG=http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
# wget "$ML_REPO_PKG" -O ml-repo.deb
# sudo dpkg -i ml-repo.deb && rm -f ml-repo.deb
# sudo apt-get update
# sudo apt-get install digits # including cudnn
| stevenliuyi/dotfiles | install/ubuntu_gpu.sh | Shell | mit | 1,593 |
#!/bin/sh
./node_modules/.bin/babel src/ --out-dir .tmp/
./node_modules/.bin/browserify -r ./.tmp/sandy.js:Sandy > dist/sandy.js
| parambirs/aui-demos | node_modules/@atlassian/aui/node_modules/sandy/build/build.sh | Shell | mit | 129 |
#!/bin/bash
#Usage ./run_testsolver.sh [problem] [nsims] [reps] [verbosity] [min_time] [max_time] [other_flags] [algorithm]
problem=$1
nsims=$2
reps=$3
verbosity=$4
min_time=$5
max_time=$6
other_flags=$7
algorithm=$8
echo "../testsolver.out $problem --heuristic=hmin --precompute-h --n=$nsims --reps=$reps --v=$verbosity --algorithm=$algorithm --min_time=$min_time --max_time=$max_time $other_flags"
../testsolver.out $problem \
--heuristic=hmin --precompute-h \
--n=$nsims --reps=$reps --v=$verbosity \
--algorithm=$algorithm \
--min_time=$min_time --max_time=$max_time $other_flags | luisenp/mdp-lib | scripts/run_testsolver.sh | Shell | mit | 594 |
#!/bin/bash
geoc vector draw -f vector_draw.png -i naturalearth.gpkg -l countries -m "layertype=layer file=naturalearth.gpkg layername=ocean style=ocean.sld" \
| jericks/geoc | examples/vector_draw.sh | Shell | mit | 160 |
#!/bin/bash
# Automatically change a Blink(1)'s color based on system load
# Using which for convenience at the moment
# blink1-tool needs to be in your PATH
BLINK1_TOOL=`which blink1-tool`
# How long to sleep between polling
# If this is too low and the system is borderline it would swap
# back and forth. Also, since we are watching the load, there is
# no point in contributing more to it than necessary ;^)
SLEEP=5
# thresholds in percentage
THRESHOLD_MEM=25
THRESHOLD_LOAD=15
# total available memory and physical cores
TOTAL_MEM=$(free | grep Mem | awk '{print $2}')
TOTAL_CORE=$(grep 'model name' /proc/cpuinfo | wc -l)
# Calculate a value between 0-255 based on the arg1/arg2
get_value()
{
RESULT=`echo ${1} ${2} | awk '{print int((255 * ($1)/$2))}'`
echo $RESULT
}
invert_value()
{
RESULT=`echo ${1} | awk '{print int(255-($1))}'`
}
# return a value between 0-255 to represent used memory
memory()
{
free_mem=$(free | grep buffers/cach | awk '{print $3}')
echo $(get_value $free_mem $TOTAL_MEM)
}
# return a value between 0-255 to represent used memory
load()
{
cur_load=$(uptime | awk '{ print $10 }' | cut -c1-4)
echo $(get_value $cur_load $TOTAL_CORE)
}
##signal trapping
cleanup()
{
# Turn the Blink(1) off
$TOOL --off > /dev/null 2>&1
exit $?
}
# trap keyboard interrupt (CTRL-c) or a SIGTERM (kill)
trap cleanup SIGINT SIGTERM
echo "Monitoring load with threasholds: memory>${THRESHOLD_MEM} (led 1) and load>${THRESHOLD_LOAD} (led 2)"
echo "CTRL-C to exit."
#infinite loop, stop with CTRL-c
while true; do
cur_mem=$(memory)
cur_load=$(load)
change_mem=$(echo "$cur_mem > $THRESHOLD_MEM*2.55" | bc)
#echo "mem =$cur_mem change=$change_mem"
change_load=$(echo "$cur_load > $THRESHOLD_LOAD*2.55" | bc)
#echo "load=$cur_load change=$change_load"
if [ "$change_mem" != "0" ]; then
$BLINK1_TOOL --hsb 255,255,$cur_mem --led 1 > /dev/null 2>&1
fi
if [ "$change_load" != "0" ]; then
$BLINK1_TOOL --hsb 255,255,$cur_load --led 2 > /dev/null 2>&1
fi
sleep $SLEEP
done
| peterfpeterson/dotfiles | bin/load-to-blink1.sh | Shell | mit | 2,086 |
#!/bin/sh
VERSION=5.3.0
tar --files-from=file.list -xjvf ../gcc-$VERSION.tar.bz2
mv gcc-$VERSION gcc-$VERSION-orig
cp -rf ./gcc-$VERSION-new ./gcc-$VERSION
diff -b --unified -Nr gcc-$VERSION-orig gcc-$VERSION > gcc-$VERSION-a2x-newlib.patch
mv gcc-$VERSION-a2x-newlib.patch ../../patches
rm -rf ./gcc-$VERSION
rm -rf ./gcc-$VERSION-orig
| radix-platform/toolchains | sources/GNU/gcc/gcc-5.3.0/create-5.3.0-a2x-newlib-patch/create.patch.sh | Shell | mit | 345 |
#!/usr/bin/env bash
WORKING_DIR=/your/path/to/MyDict
PYTHON=python3
${PYTHON} ${WORKING_DIR}/main.py
| zhuzhenpeng/MyDict | mydict.sh | Shell | mit | 102 |
#!/bin/bash
# Script to provide IP Address score as per projecthoneypot
# Input your HTTP:bl API Key from the site in the "KEY" field below:
KEY='';
SUFFIX="dnsbl.httpbl.org";
echo "Enter IP Address:";
read IPADDRESS;
REVERSEIP=$(echo "$IPADDRESS" | awk -F. '{print $4"."$3"."$2"."$1}');
RESULT=$(eval $echo "dig +short $KEY"."$REVERSEIP"."$SUFFIX");
if [[ -z "$RESULT" ]]; then
echo "IP Address is Clean as per ProjectHoneyPot";
exit;
fi
# 2nd Octet Provides the no. of days back the IP Address was seen by ProjectHoneyPot
LASTSEEN=$(echo $RESULT | awk -F. '{print $2}');
echo "Last Seen = $LASTSEEN days back!";
# 4th Octet Provides the Category
CATEGORYOCTET=$(echo $RESULT | awk -F. '{print $4}');
if [[ $CATEGORYOCTET -eq 0 ]]; then
CATEGORY=SearchEngine;
elif [[ $CATEGORYOCTET -eq 1 ]]; then
CATEGORY=Suspicious;
elif [[ $CATEGORYOCTET -eq 2 ]]; then
CATEGORY=Harvester;
elif [[ $CATEGORYOCTET -eq 4 ]]; then
CATEGORY=CommentSpammer;
fi
echo "Category = $CATEGORY";
# 3rd Octet provides the Threat Score or the Search Engine Name if Category is Search Engine
SCORE=$(echo $RESULT | awk -F. '{print $3}');
if [[ $CATEGORYOCTET -eq 0 ]]; then
if [[ $SCORE -eq 0 ]]; then
SEARCHENGINE=Undocumented;
elif [[ $SCORE -eq 1 ]]; then
SEARCHENGINE=AltaVista;
elif [[ $SCORE -eq 2 ]]; then
SEARCHENGINE=Ask;
elif [[ $SCORE -eq 3 ]]; then
SEARCHENGINE=Baidu;
elif [[ $SCORE -eq 4 ]]; then
SEARCHENGINE=Excite;
elif [[ $SCORE -eq 5 ]]; then
SEARCHENGINE=Google;
elif [[ $SCORE -eq 6 ]]; then
SEARCHENGINE=Looksmart;
elif [[ $SCORE -eq 7 ]]; then
SEARCHENGINE=Lycos;
elif [[ $SCORE -eq 8 ]]; then
SEARCHENGINE=MSN;
elif [[ $SCORE -eq 9 ]]; then
SEARCHENGINE=Yahoo;
elif [[ $SCORE -eq 10 ]]; then
SEARCHENGINE=Cuil;
elif [[ $SCORE -eq 11 ]]; then
SEARCHENGINE=InfoSeek;
elif [[ $SCORE -eq 12 ]]; then
SEARCHENGINE=Miscellaneous;
fi
echo "Search Engine Name = $SEARCHENGINE";
else
echo "Threat Score = $SCORE"
fi | aarvee11/ProjectHoneyPotTool | checkip.sh | Shell | mit | 1,998 |
source "$ZINIT_HOME/$ZINIT_BIN_DIR_NAME/zinit.zsh"
autoload -Uz _zinit
(( ${+_comps} )) && _comps[zinit]=_zinit
zinit light "zsh-users/zsh-autosuggestions"
zinit light "zdharma/fast-syntax-highlighting"
zinit light "zsh-users/zsh-completions"
zinit light "zsh-users/zsh-history-substring-search"
zinit load "sorin-ionescu/prezto"
zinit snippet OMZ::plugins/dotenv/dotenv.plugin.zsh
zinit ice atload"zpcdreplay" atclone'./zplug.zsh'
zinit light g-plane/zsh-yarn-autocompletions
zpcompinit
| atnanasi/dotfiles | .zsh/rc/30-zinit.zsh | Shell | mit | 490 |
#!/bin/bash
targetDir="iContainer"
if [ "$#" -gt 0 ]; then
targetDir=$1
fi
targetFile="$targetDir.tar.gz"
# 1. compile sources
echo " > compiling sources ..."
# 1. compile less files
cd less
./style.sh
cd -
# 2. compile sources
grunt deploy
# 2. create deployment package
echo " > creating deployment directory ..."
mkdir $targetDir
cp -rL bin config models public routes services utils $targetDir/
cp app.js package.json README.md node_modules.tar.gz $targetDir/
mkdir $targetDir/scripts
cp -r scripts/import $targetDir/scripts
echo " > creating deployment package ..."
tar cfz $targetFile $targetDir
echo " > cleaning up ..."
rm -r $targetDir
echo "done" | icypher-zizek/icontainer | deploy.sh | Shell | mit | 672 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-387-1
#
# Security announcement date: 2016-01-14 00:00:00 UTC
# Script generation date: 2017-01-01 21:09:06 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - openssh:1:5.5p1-6+squeeze8
#
# Last versions recommanded by security team:
# - openssh:1:5.5p1-6+squeeze8
#
# CVE List:
# - CVE-2016-0777
# - CVE-2016-0778
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade openssh=1:5.5p1-6+squeeze8 -y
| Cyberwatch/cbw-security-fixes | Debian_6_(Squeeze)/x86_64/2016/DLA-387-1.sh | Shell | mit | 646 |
#!/bin/sh
IP=192.168.0.101
timer=30
events=500
throttle=350
adb usb
sleep 2
adb tcpip 5555
sleep 5
#TODO: Get args from terminal
adb connect $IP
sleep 3
connectStatus=`adb devices|wc -l`
if [[ $connectStatus -gt 3 ]]; then
echo "Safe to Disconnect"
for i in `seq $timer 1`;
do
echo $i
sleep 1
done
echo "Operation Monkey Commenced"
#Throttle optional
adb shell monkey -v $events --throttle $throttle --ignore-crashes --ignore-timeouts --ignore-security-exceptions
else
echo "Couldn't Connect to device"
fi | rahatm1/AndroidMonkeyPrank | AndroidMonkeyPrank.sh | Shell | mit | 532 |
# https://www.reddit.com/r/zsh/comments/eblqvq/del_pgup_and_pgdown_input_in_terminal/fb7337q/
# If NumLock is off, translate keys to make them appear the same as with NumLock on.
bindkey -s '^[OM' '^M' # enter
bindkey -s '^[Ok' '+'
bindkey -s '^[Om' '-'
bindkey -s '^[Oj' '*'
bindkey -s '^[Oo' '/'
bindkey -s '^[OX' '='
# If someone switches our terminal to application mode (smkx), translate keys to make
# them appear the same as in raw mode (rmkx).
bindkey -s '^[OH' '^[[H' # home
bindkey -s '^[OF' '^[[F' # end
bindkey -s '^[OA' '^[[A' # up
bindkey -s '^[OB' '^[[B' # down
bindkey -s '^[OD' '^[[D' # left
bindkey -s '^[OC' '^[[C' # right
# TTY sends different key codes. Translate them to regular.
bindkey -s '^[[1~' '^[[H' # home
bindkey -s '^[[4~' '^[[F' # end
# Bind Home, End and a bunch of other standard things
bindkey '^?' backward-delete-char # bs delete one char backward
bindkey '^[[3~' delete-char # delete delete one char forward
bindkey '^[[H' beginning-of-line # home go to the beginning of line
bindkey '^[[F' end-of-line # end go to the end of line
bindkey '^[[1;5C' forward-word # ctrl+right go forward one word
bindkey '^[[1;5D' backward-word # ctrl+left go backward one word
bindkey '^H' backward-kill-word # ctrl+bs delete previous word
bindkey '^[[3;5~' kill-word # ctrl+del delete next word
bindkey '^J' backward-kill-line # ctrl+j delete everything before cursor
bindkey '^[[D' backward-char # left move cursor one char backward
bindkey '^[[C' forward-char # right move cursor one char forward
bindkey '^[[A' up-line-or-beginning-search # up prev command in history
bindkey '^[[B' down-line-or-beginning-search # down next command in history
autoload -Uz up-line-or-beginning-search
autoload -Uz down-line-or-beginning-search
zle -N up-line-or-beginning-search
zle -N down-line-or-beginning-search
| pdelre/dotfiles | zsh/bindkey.zsh | Shell | mit | 2,100 |
echo 'Fusing and updating gtest sources/headers for NBA...'
echo 'NOTE: You need to run "git pull" in 3rdparty/googletest manually first.'
TMPDIR=/tmp/nba-gtest-update
NBADIR=..
mkdir -p $TMPDIR
python2 googletest/googletest/scripts/fuse_gtest_files.py $TMPDIR
cp googletest/googletest/src/gtest_main.cc $TMPDIR/gtest
# Replace relative include path to absolute path
sed -i 's/^#include "gtest\/gtest.h"/#include <gtest\/gtest.h>/' $TMPDIR/gtest/*.cc
cp $TMPDIR/gtest/gtest-all.cc $NBADIR/src/lib/gtest
cp $TMPDIR/gtest/gtest_main.cc $NBADIR/src/lib/gtest
cp $TMPDIR/gtest/gtest.h $NBADIR/include/gtest
rm -rf $TMPDIR
| ANLAB-KAIST/NBA | 3rdparty/update-googletest.sh | Shell | mit | 618 |
#!/bin/bash
# tiny script to check if all of the stations in
# the station list file still resolve as some
# channels are periodically removed.
STATIONS_FILE="stations"
for url in $(cut -d '|' -f 3 "$STATIONS_FILE" | awk '{printf "https://somafm.com%s\n", $1}') ; do
printf "checking %s..." "$url"
curl --max-time 10 --silent -o /dev/null "$url"
[[ "$?" -eq 0 ]] && printf "OK" || printf "FAILED"
echo
done
| bcicen/somacli | check_stations.sh | Shell | mit | 414 |
# 4
read -r -d '' INST_GITIGNORE <<EOM
# ignore logs
logs/
# tls certs
letsencrypt/
acme/
ocsp/
EOM
read -r -d '' INST_CONFIGFILE <<"EOM"
#
# configfile for caddy manager
#
#CADDY_DIR=caddy
#SERVICES_DIR=services
#PROJECT=demo
# Network for services to connect to caddy
#CADDYNET=CADDYNET
# Mail address for Let's Encrypt
#ACME_MAIL=ACME_MAIL
# Default server hostname for generating subdomains
#FQDN=domain.tld
# Settings for the caddy Docker image
#CADDY_FEATURES='DNS,cors,filemanager,git,hugo,ipfilter,jwt,locale,minify,ratelimit,realip,upload'
#CADDY_IMAGENAME=fciserver/caddy
#HUGO_VERSION=0.18 # only tested with 0.17 and 0.18 because the upstream tar archive names are inconsistent
EOM
read -r -d '' INST_DOCKERFILE <<"EOM"
ENV OPENSSL_VERSION 1.0.2e-r0
RUN apk add --no-cache \
bash \
bind-tools \
ca-certificates \
curl \
drill \
git \
openssh-client \
sudo \
"openssl>=${OPENSSL_VERSION}"
# Install hugo
ARG HUGO_VERSION
ENV HUGO_VERSION ${HUGO_VERSION:-0.18}
ENV URL="https://github.com/spf13/hugo/releases/download/v${HUGO_VERSION}/hugo_${HUGO_VERSION}_Linux-64bit.tar.gz"
RUN \
curl -sSLo /tmp/hugo.tgz ${URL} \
&& tar xzf /tmp/hugo.tgz -C /tmp hugo_${HUGO_VERSION}_linux_amd64/hugo_${HUGO_VERSION}_linux_amd64 \
&& mv /tmp/hugo_${HUGO_VERSION}_linux_amd64/hugo_${HUGO_VERSION}_linux_amd64 /usr/local/bin/hugo \
&& rm -rf /tmp/*
# Install caddy
ARG CADDY_FEATURES
ENV CADDY_FEATURES ${CADDY_FEATURES:-"DNS,cors,filemanager,git,hugo,ipfilter,jwt,locale,minify,ratelimit,realip,upload"}
RUN curl -fsSL https://getcaddy.com | bash -s ${CADDY_FEATURES}
# Fix to use git plugin
RUN mkdir /root/.ssh \
&& echo -e "\
StrictHostKeyChecking no\\n\
UserKnownHostsFile /dev/null\\n\
" > /root/.ssh/config
#RUN adduser -Du 1000 caddy \
# && mkdir /home/caddy/.ssh \
# && cp /root/.ssh/config /home/caddy/.ssh/config
#USER caddy
EXPOSE 53 53/udp 80 443 2015
ENTRYPOINT ["caddy"]
EOM
read -r -d '' INST_CADDYFILE <<EOM
http://start.domain.tld:80 http://:80 http://www.domain.tld:80 http://domain.tld:80 {
tls off
# add this if you like to enable tls
# tls ACME_MAIL
log / /data/logs/caddy.log "[startpage] - {when} - {remote} - {proto} {method} {path} - {status} {size}"
root /data/www
minify
redir /ip /ip.txt
mime .txt text/plain
templates /ip .txt
}#END_start
EOM
read -r -d '' INST_COMPOSE <<EOM
version: "2"
networks:
backend:
external:
name: CADDYNET
services:
caddy:
image: CADDY_IMAGENAME
restart: on-failure:5
cap_add:
- NET_BIND_SERVICE
user: root
networks:
- backend
ports:
- "80:80"
- "443:443"
# - "2015:2015"
# - "53:53"
# - "53:53/udp"
# command: -http2=false -conf /data/conf/caddyfile
command: -type http -port 80 -http2=false -conf /data/conf/caddyfile
# command: -type dns -port 53 -conf /data/conf/corefile
read_only: true
working_dir: /data
environment:
- CADDYPATH=/data
volumes:
- ./caddy:/data:rw
EOM
read -r -d '' NEW_CADDYFILE <<EOM
http://SERVICE.domain.tld:80 {
tls off
# add this if you like to enable tls
# tls ACME_MAIL
log / /data/logs/services.log "[SERVICE] - {when} - {remote} - {proto} {method} {path} - {status} {size}"
proxy / http://SERVICE:80/ {
transparent
}
}
EOM
read -r -d '' NEW_COMPOSE <<EOM
version: '2'
networks:
backend:
external:
name: CADDYNET
services:
SERVICE:
networks:
- backend
restart: on-failure:5
expose:
- 80
image: SERVICE
build:
context: ./docker/
dockerfile: Dockerfile
EOM
read -r -d '' NEW_DOCKERFILE <<EOM
FROM busybox
#FROM armhf/busybox
WORKDIR /www
COPY index.html /www/index.html
EXPOSE 80
ENTRYPOINT ["httpd"]
CMD ["-f","-v","-p","80","-h", "/www"]
EOM
read -r -d '' PLUGIN_CADDYFILE <<EOM
http://start.domain.tld:80/file {
tls off
# add this if you like to enable tls
# tls [email protected]
root /data/htdocs
log / /data/logs/plugins.log "[browse] - {when} - {remote} - {proto} {method} {path} - {status} {size}"
browse /
# protect using HTTP basic auth
basicauth / admin password
}
http://start.domain.tld:80/filemanager {
tls off
# add this if you like to enable tls
# tls [email protected]
root /data/htdocs/files
log / /data/logs/plugins.log "[filemanager] - {when} - {remote} - {proto} {method} {path} - {status} {size}"
filemanager {
show /data/htdocs/files/
}
# protect using HTTP basic auth
basicauth / admin password
}
http://start.domain.tld:80/hugo {
tls off
# add this if you like to enable tls
# tls [email protected]
root /data/htdocs/hugo/public
log / /data/logs/plugins.log "[hugo] - {when} - {remote} - {proto} {method} {path} - {status} {size}"
hugo /data/htdocs/hugo
# protect the admin area using HTTP basic auth
basicauth /admin admin password
}
http://start.domain.tld:80/git {
tls off
# add this if you like to enable tls
# tls [email protected]
root /data/htdocs/git/www
log / /data/logs/plugins.log "[git] - {when} - {remote} - {proto} {method} {path} - {status} {size}"
git {
# repo ssh://[email protected]:22/octocat/octocat.github.io.git
repo https://github.com/octocat/octocat.github.io.git
branch master
# path /data/htdocs/git/www
# ssh key for pulling private repos
# key /data/htdocs/git/key/id_rsa
hook_type github
# Webhook url: http://start.domain.tld:80/git/webhook
hook /webhook webhook-secret
interval 86400
}
}
EOM
read -r -d '' PLUGIN_WEBLINKS <<EOM
[
{
"name": "git",
"link": "/git",
"button": "btn-success",
"image": "empty"
},
{
"name": "hugo",
"link": "/hugo",
"button": "btn-success",
"image": "empty"
},
{
"name": "hugo admin",
"link": "/hugo/admin",
"button": "btn-danger",
"image": "empty"
},
{
"name": "filemanager",
"link": "/filemanager",
"button": "btn-danger",
"image": "empty"
},
{
"name": "filebrowser",
"link": "/file",
"button": "btn-warning",
"image": "empty"
}
]
EOM
read -r -d '' WEB_MAINJS <<"EOM"
jQuery.fn.extend({
linklist: function (kwargs) {
var self = $(this);
$.ajax({
url: dataurl,
async:true,
contentType:"application/json",
dataType: "json",
success: function(data){
$.each(data, function(key, attributes){
var my_link = (typeof attributes['link'] != 'undefined') ? attributes['link'] : "";
var my_button = (typeof attributes['button'] != 'undefined') ? attributes['button'] : "";
var my_name = (typeof attributes['name'] != 'undefined') ? attributes['name'] : "";
var my_image = ((typeof attributes['image'] != 'undefined') && (attributes['image'] == "empty")) ? my_name : attributes['image'];
var new_div = $("<li>");
var new_anchor = $("<a>");
$(new_anchor).attr("href", my_link);
$(new_anchor).addClass("btn");
$(new_anchor).addClass(my_button);
$(new_anchor).attr("style", "height:150px");
if ((typeof my_image != 'undefined') && (my_image != "empty")) {
var new_content = $("<span>");
if (my_image != my_name) {
$(new_anchor).html(my_name);
new_content = $("<img>");
$(new_content).addClass("img-responsive");
$(new_content).attr("src", my_image);
}else{
var new_br = $("<br>");
$(new_br).appendTo(new_anchor);
new_content = $("<div>");
$(new_content).attr("style", "width:100px;height:100px");
$(new_content).html(my_name);
}
$(new_content).appendTo(new_anchor);
}
$(new_anchor).appendTo(new_div);
console.log(new_div);
$(new_div).appendTo(self);
});
}
});
}
});
/*
jQuery.fn.extend({
impress: function (kwargs) {
var self = $(this);
$.ajax({
url: "vcf/api.php",
async:true,
contentType:"text/html",
dataType: "html",
success: function(data){
$(self).html(data);
}
});
}
});
*/
$(document).ready(function(){
$("#content").linklist();
// $("#impressum").impress();
});
EOM
read -r -d '' WEB_HTML <<"EOM"
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<title>Landingpage</title>
<!-- Bootstrap -->
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet">
<!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
<style>
ul {
list-style-type: none;
}
li {
float:left;
padding:20px;
}
</style>
<script>
var dataurl="DATASOURCE"
</script>
</head>
<body>
<div class="container">
<div class="jumbotron">
<h1>landingpage</h1>
<p class="lead"></p>
</div>
<div class="container">
<ul id="content">
<li><a style="height:150px" class="btn btn-info" href="FIRSTLINK"><br><div style="width:100px;height:100px">FIRSTTITLE</div></a></li>
</ul>
</div>
</div><!--/.container-->
<hr>
<footer>
<p>© 2015 Company, Inc.</p>
<div id="impressum"></div>
</footer>
<!-- jQuery (necessary for Bootstrap's JavaScript plugins) -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.0/jquery.min.js"></script>
<!--<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.0/jquery.min.js"></script>-->
<!-- Include all compiled plugins (below), or include individual files as needed -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
<script src="main.js"></script>
</body>
</html>
EOM
| firecyberice/caddy | lib/templates.sh | Shell | mit | 11,047 |
#!/bin/bash
/usr/bin/hhvm --config ./hhvm.ini --mode server -u www-data | vearutop/pinba-pure-php | benchmark/start-hhvm.sh | Shell | mit | 72 |
#!/bin/bash
if [ -z "${BASH_VERSION+Defined?}" ]; then
echo "bash-pack: No shell interpreter other than Bash is supported"
exit 1
fi
# enable / disable bash-debug based on environment variable.
#
# Globals:
# bash_debug set to 1 to enable xtrace
enable_bash_debug() {
[ "${bash_debug:-0}" = 0 ] ||
set -x
}
# Check major version of bash to be at least a given number
#
# Arguments:
# 1. major_version version to check against
#
# Returns:
# 0 if greater than or equal to major_version, otherwise 1
is_bash_version() {
(( ${BASH_VERSION%%.*} >= "$1" ))
}
# initialise and load specified scripts; fenced by __bash_pack_loaded
[ -n "${__bash_pack_loaded:-}" ] || {
__bash_pack_loaded=1
modules_loading=(init)
in_array() {
for ((i=2; i<=$#; ++i)); do
[ "${!i}" != "$1" ] || return 0
done
return 1
}
# Locate components of a module (also the bash version is taken into account)
bash_pack_module_locate_scripts() {
[[ ${BASH_VERSION} =~ ^([0-9]+)\.([0-9]+)\.([0-9]+) ]]
local version_Mmp="${BASH_REMATCH[0]}" \
version_Mm="${BASH_REMATCH[1]}.${BASH_REMATCH[2]}" \
version_M="${BASH_REMATCH[1]}"
local paths=( $(printf "${BASH_PACK_DIR}/${1}-bv%s.sh\n" "${version_Mmp}" "${version_Mm}" "${version_M}") "${BASH_PACK_DIR}/${1}.sh" )
local path
for path in "${paths[@]}"; do
if [ -f "$path" ] && [ -r "$path" ]; then
echo -n "$path"
return 0
fi
done
return 1
}
bash_pack_module_get_new_required_modules() {
local module all_new_required_modules new_required_modules=()
all_new_required_modules=( $(sed -nr 's/.*^#require (.*)$.*/\1/p' "$@") )
for module in "${all_new_required_modules[@]+"${all_new_required_modules[@]}"}"; do
in_array "$module" "${modules_loading[@]}" "${new_required_modules[@]+"${new_required_modules[@]}"}" ||
new_required_modules+=("$module")
done
printf '%s\n' "${new_required_modules[@]+"${new_required_modules[@]}"}"
}
bash_pack_module_load_by_name() {
local path pre_path post_path this_module_paths=()
if in_array "$1" "${modules_loading[@]}"; then
return 0
fi
if ! path="$( bash_pack_module_locate_scripts "${1}" )"; then
echo "Could not find module: ${1}"
exit 1
fi
# locate and load the module
modules_loading+=("${1}")
if pre_path="$( bash_pack_module_locate_scripts "${1}-pre" )"; then
this_module_paths+=("$pre_path")
fi
this_module_paths+=( "$path" )
if post_path="$( bash_pack_module_locate_scripts "${1}-post" )"; then
this_module_paths+=("$post_path")
fi
# look for direct deps
local required_modules=( $(bash_pack_module_get_new_required_modules "${this_module_paths[@]}") )
local module
for module in "${required_modules[@]+"${required_modules[@]}"}"; do
bash_pack_module_load_by_name "$module"
done
printf '%s\n' "${this_module_paths[@]}"
}
BASH_PACK_DIR="${BASH_SOURCE[0]%/*}"
[ -d "$BASH_PACK_DIR" ] || BASH_PACK_DIR=.
enable_bash_debug
scripts_loading=()
# Find files to load for any module-names given as args
for ((i=1; i<=$#; ++i)); do
scripts_loading+=($(bash_pack_module_load_by_name "${!i}"))
done
# once all of them are found, load them
for script in "${scripts_loading[@]+"${scripts_loading[@]}"}"; do
# shellcheck disable=SC1090
. "$script"
done
}
| codemedic/bash-pack | init.sh | Shell | mit | 3,702 |
#!/bin/bash
if [ -z "$BACKEND_ID" -o -z "$SERVER_ID" -o -z "$URL" -o -z "$VULCAN_URL" -o -z "$TIMEOUT_SECS" ]; then
echo "register-healthcheck requires the following variables:" >&2
echo ' $BACKEND_ID $SERVER_ID $URL $VULCAN_URL $TIMEOUT_SECS' >&2
exit 1
fi
function ttl_param {
if [ -n "$FRONTEND_TTL_SECS" ]; then
echo "--ttl ${FRONTEND_TTL_SECS}s"
fi
}
function disableBackend {
vctl server rm --backend $BACKEND_ID --id $SERVER_ID --vulcan $VULCAN_URL
}
function enableBackend {
vctl server upsert \
--id $SERVER_ID \
--backend $BACKEND_ID \
--url $URL \
--vulcan $VULCAN_URL \
$(ttl_param)
}
echo 'Adding backend, server, and frontend to vulcand'
vctl backend upsert --id $BACKEND_ID --vulcan $VULCAN_URL
while true; do
curl --silent -I $URL/healthcheck | head -n 1 | awk '{print $2}' | grep '200'
HEALTHCHECK_STATUS=$?
if [ $HEALTHCHECK_STATUS -eq 0 ]; then
enableBackend
else
disableBackend
fi
sleep $TIMEOUT_SECS
done
| octoblu/docker-register-sidekick | register-healthcheck.sh | Shell | mit | 1,004 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2013:0831
#
# Security announcement date: 2013-05-17 00:35:37 UTC
# Script generation date: 2017-01-01 21:10:45 UTC
#
# Operating System: CentOS 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libvirt-client.i686:0.10.2-18.el6_4.5
# - libvirt-devel.i686:0.10.2-18.el6_4.5
# - libvirt.x86_64:0.10.2-18.el6_4.5
# - libvirt-client.x86_64:0.10.2-18.el6_4.5
# - libvirt-devel.x86_64:0.10.2-18.el6_4.5
# - libvirt-lock-sanlock.x86_64:0.10.2-18.el6_4.5
# - libvirt-python.x86_64:0.10.2-18.el6_4.5
#
# Last versions recommanded by security team:
# - libvirt-client.i686:0.10.2-46.el6_6.2
# - libvirt-devel.i686:0.10.2-46.el6_6.2
# - libvirt.x86_64:0.10.2-46.el6_6.2
# - libvirt-client.x86_64:0.10.2-46.el6_6.2
# - libvirt-devel.x86_64:0.10.2-46.el6_6.2
# - libvirt-lock-sanlock.x86_64:0.10.2-46.el6_6.2
# - libvirt-python.x86_64:0.10.2-46.el6_6.2
#
# CVE List:
# - CVE-2013-1962
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install libvirt-client.i686-0.10.2 -y
sudo yum install libvirt-devel.i686-0.10.2 -y
sudo yum install libvirt.x86_64-0.10.2 -y
sudo yum install libvirt-client.x86_64-0.10.2 -y
sudo yum install libvirt-devel.x86_64-0.10.2 -y
sudo yum install libvirt-lock-sanlock.x86_64-0.10.2 -y
sudo yum install libvirt-python.x86_64-0.10.2 -y
| Cyberwatch/cbw-security-fixes | CentOS_6/x86_64/2013/CESA-2013:0831.sh | Shell | mit | 1,462 |
#!/bin/sh
# Hinomoto Installation Script
# Written in 2012 by 伴上段
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
if [ -z "$CC" ]; then
echo "ERROR: The environment CC is not defined. Please define it before running this installation script." >&2
exit 1
fi
if [ $# -eq 0 ]; then
echo "ERROR: No installation directory specified. Please specify it as the first command-line argument for this script." >&2
exit 1
fi
if [ ! -d "$1" ]; then
echo "ERROR: $1 is not a directory." >&2
exit 1
fi
set -x
$CC $CFLAGS -o $1/eadate eadate.c
$CC $CFLAGS -o $1/dunicode dunicode.c
$CC $CFLAGS -o $1/eunicode eunicode.c
cp taguranges $1
cp taguhan $1
cp filteruhan $1
cp uniquhan $1
cp annouhan $1
| jtvaughan/hinomoto | install.sh | Shell | cc0-1.0 | 1,047 |
#*******************************************************************************
# Copyright (c) 2010-2014, Gabor Szarnyas, Istvan Rath and Daniel Varro
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Gabor Szarnyas - initial API and implementation
#*******************************************************************************
#!/bin/bash
HOSTS=`avahi-browse -at | grep IPv4 | grep _4store | cut -d " " -f 6 | cut -d "-" -f 1`
while read -r HOSTS; do
hostToIP="$HOSTS"; avahi-browse -atr 2> /dev/null | sed -n '/Workstation/,+2p' | sed -n '/IPv4/,+2p' | sed -n '/'$hostToIP'/,+2p' | grep 'address' | cut -d "[" -f 2 | cut -d "]" -f 1
done <<< "$HOSTS"
| FTSRG-archive/4store-graph-driver | scripts/4s-discover.sh | Shell | epl-1.0 | 872 |
#! /bin/sh
# Copyright (C) 1997-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Test _DEPENDENCIES variable. From Lee Iverson.
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
AC_SUBST(DEPS)
END
cat > Makefile.am << 'END'
bin_PROGRAMS = TerraVision
TerraVision_SOURCES = \
AboutDialog.c Clock.c Dialogs.c DrawModel.c \
TsmWidget.c Gats.c GATSDialogs.c Model.c ModelAnim.c \
ScannedMap.c \
TerraVision.c TerraVisionAvs.c TerraVisionCAVE.c \
Texture.c ThreeDControl.c ThreeDPanel.c \
ThreeDWidget.c ThreeDWidget1.c TileManager.c \
TileRequester.c TwoDWidget.c \
Visible.c RequestGenerator.c X11FrameGrab.c \
matrix.c pixmaps.c xpmhash.c xpmread.c xcolor.c xv24to8.c
DEPS = @DEPS@
TerraVision_DEPENDENCIES = $(DEPS)
END
$ACLOCAL
$AUTOMAKE
| komh/automake-os2 | t/depend3.sh | Shell | gpl-2.0 | 1,386 |
#!/bin/sh
export reportpath="/var/raptor/scan_results"
export zip_upload_dir="/var/raptor/uploads"
export git_clone_dir="/var/raptor/clones"
#IMPORTANT: Do NOT add the trailing slash after the URLs.
############PUBLIC###############
#your-public-github-endpoint-here
export ext_git_url="https://github.com"
#your-public-github-api-endpoint-here
export ext_git_apiurl="https://api.github.com"
#your-public-github-username-here
export ext_git_user="dpnishant"
#your-public-github-token-here
export ext_git_token="55230bdae78b690c187135e766a03a21d5e15d8c"
##############INTERNAL#############
#your-internal-github-endpoint-here
export int_git_url=""
#your-internal-github-api-endpoint-here
export int_git_apiurl=""
#your-internal-github-username-here
export int_git_user=""
#your-internal-github-token-here
export int_git_token=""
cd backend
gunicorn -c config.py server:app
| dpnishant/raptor | start.sh | Shell | gpl-2.0 | 885 |
echo "vm.swappiness = 10" >> /etc/sysctl.conf
sysctl vm.swappiness=10
addgroup supergroup
adduser root supergroup
adduser vagrant supergroup
# Add group and user for Hadoop Monitor web application
export PASSWORD=`openssl passwd -1 password`
addgroup webgroup
useradd -m -s /bin/bash -g webgroup webuser -p $PASSWORD
##
## INSTALL PACKAGES
##
# setup a source for maven3 which is required by Accumulo.
echo "deb http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main" | tee -a /etc/apt/sources.list
echo "deb-src http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main" | tee -a /etc/apt/sources.list
apt-get update
apt-get -y install openjdk-6-jdk subversion expect git
#apt-get -y install curl git openssh-server openssh-client terminator openjdk-6-jdk subversion screen g++ make meld build-essential g++-multilib
apt-get -y --force-yes install maven3
#
# Set the locale
locale-gen en_US
# remove the symbolic link to maven2. You can still access it via /usr/share/maven2/bin/mvn
ln -s /usr/share/maven3/bin/mvn /usr/bin/mvn
export VFILES=/vagrant/files
export BASE_DIR=/home/vagrant/accumulo_home
cat > /etc/profile.d/accumulo_setup.sh <<EOF
export ACCUMULO_HOME=/home/vagrant/accumulo_home/bin/accumulo
export HADOOP_PREFIX=/home/vagrant/accumulo_home/bin/hadoop
export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-amd64
export ZOOKEEPER_HOME=/home/vagrant/accumulo_home/bin/zookeeper
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/opt/vagrant_ruby/bin
export PATH=\$ACCUMULO_HOME/bin:\$PATH
export PATH=\$HADOOP_PREFIX/bin:\$PATH
export PATH=/usr/lib/jvm/java-6-openjdk-amd64/bin:\$PATH
export PATH=\$ZOOKEEPER_HOME/bin:\$PATH
EOF
source /etc/profile.d/accumulo_setup.sh
export HADOOP_VERSION=hadoop-1.2.1
export ZOOKEEPER_VERSION=zookeeper-3.4.3
export LOGFILE=$HOME/build.log
export PASSWORD=`openssl passwd -1 password`
mkdir -p $BASE_DIR/software $BASE_DIR/bin
mkdir -p /home/vagrant/.ssh
chmod 700 /home/vagrant/.ssh
chown -R vagrant:vagrant /home/vagrant/.ssh
su vagrant -c "ssh-keygen -t rsa -P '' -f /home/vagrant/.ssh/id_rsa"
mkdir -p /vagrant/files/ssh
cp /home/vagrant/.ssh/id_rsa.pub /vagrant/files/ssh/`hostname`.pub
| medined/Accumulo_Snapshot_By_Vagrant | files/setup.sh | Shell | gpl-2.0 | 2,200 |
#!/bin/sh
#collect_data.sh
#author: wgc
filename=`date +%y%m%d`
touch $filename
data=$1
while read -r line
do
../paris-traceroute --algo=exhaustive $line >> paris-traceroute$filename 2>&1
done < $data
| wgcitgkaka/paris_Cplus_0.1 | src/test/collect_data.sh | Shell | gpl-2.0 | 204 |
#!/bin/sh
if [ -z "${CFG_TARGET}" ] ; then
CFG_TARGET=`pwd`/target
echo "CFG_TARGET=${CFG_TARGET}"
fi
if [ -z "${CFG_SOURCE}" ] ; then
CFG_SOURCE=`pwd`/source
echo "CFG_SOURCE=${CFG_SOURCE}"
fi
if [ -z "${CFG_BUILD}" ] ; then
CFG_BUILD=`pwd`/build
echo "CFG_BUILD=${CFG_BUILD}"
fi
if [ -z "${CFG_LINUX_CONFIG}" ] ; then
CFG_LINUX_CONFIG=i386_defconfig
echo "CFG_LINUX_CONFIG=${CFG_LINUX_CONFIG}"
fi
if [ -z "${CFG_LINUX_IMAGE}" ] ; then
CFG_LINUX_IMAGE=bzImage
echo "CFG_LINUX_IMAGE=${CFG_LINUX_IMAGE}"
fi
mkdir -p $CFG_TARGET
mkdir -p $CFG_SOURCE
mkdir -p $CFG_BUILD
cd $CFG_SOURCE
make O=$CFG_BUILD mrproper || exit 1
make O=$CFG_BUILD $CFG_LINUX_CONFIG || exit 1
make O=$CFG_BUILD dep || exit 1
make O=$CFG_BUILD clean || exit 1
make O=$CFG_BUILD $CFG_LINUX_IMAGE modules || exit 1
make O=$CFG_BUILD INSTALL_MOD_PATH=$CFG_TARGET modules_install || exit 1
exit 0
| robacklin/linux-3.7.2 | quick-make.sh | Shell | gpl-2.0 | 878 |
#!/bin/bash
################################################
# Tool to program a polytouchdemo on Karo TX #
# Please send feedback to: #
# [email protected] #
# Dominik Peuker November 2014 #
# Glyn GmbH & Co. KG #
# #
#History #
#----------------------------------------------#
#0.1 - - Initial Version #
################################################
clear
echo "Program Polytouchdemo to TX28"
echo "-----------------------------"
echo
#Presetting
. /$HOME/PycharmProjects/practice/flasher.conf
#IPH=192.168.15.176 #Host
#IPT=192.168.15.205 #Target
#port=/dev/ttyUSB0 #serial port for console
uboot= #Bootloader
image= #Environment
dtb= #Device Tree
kernel= #Linux kernel
rootfs= #...demo
echo
#preparation
echo "Please check:"
echo "tftp - server running?"
echo "serial cable connected?"
echo "ethernet connected?"
echo "module TXxx (TX-XXXX) inserted?"
echo "power supply connected?"
echo "continue (y/n)"
read continue
if [ "$continue" != y ]
then
echo "exiting now!"
exit 0
else
clear
fi
#Keep or set IP adresses / serial port?
echo "IP adresses currently set to:"
echo "Host: "${IPH}
echo "Target: "${IPT}
echo "Serial port is currently set to "${port}
echo
echo "Keep these settings (y) or enter new adresses (n)?"
read settings
if [ "$settings" != y ]
then
#Host
echo "Please enter IP of your host (serverip):"
read IPH
echo
#Target
echo "Please enter IP of your target (ipaddr):"
read IPT
echo
#serial port
echo "Please enter your serial like this: /dev/ttyS0:"
read port
echo
#correct?
echo "Host:"${IPH}
echo "Target:"${IPT}
echo "Port:"${port}
#wait and clear screen
sleep 4
clear
else
#clear screen
clear
fi
#Mainfunction
#cleanup
echo " 1/20 - Clean Partitions"
#delete kernel
echo 'nand erase.part linux' > ${port}
sleep 3
#delete rootfs
echo 'nand erase.part rootfs' > ${port}
sleep 3
echo " 2/20 - Set IP adresses"
echo 'setenv serverip '${IPH} > ${port}
echo 'setenv ipaddr '${IPT} > ${port}
echo " 3/20 - Change autostart / autoload"
echo 'setenv autoload no' > ${port}
echo 'setenv autostart no' > ${port}
echo 'saveenv' > ${port}
echo " 4/20 - Update Bootloader"
sleep 5
echo 'tftp ${loadaddr}' ${uboot} > ${port}
echo " 5/20 - Transfer Bootloader"
sleep 10
echo " 6/20 - Install Bootloader"
sleep 5
echo 'romupdate ${fileaddr}' > ${port}
sleep 5
echo " 7/20 - Reset"
echo 'reset' > ${port}
sleep 5
echo " 8/20 - Set default environment"
echo 'env default -f -a' > ${port}
echo " 9/20 - Set IP adresses"
sleep 5
echo 'setenv serverip '${IPH} > ${port}
echo 'setenv ipaddr '${IPT} > ${port}
echo "10/20 - Transfer Environment"
#copy and source predefinded environment
echo 'tftp ${loadaddr}' ${image} > ${port}
sleep 8
echo 'source ${fileaddr}' > ${port}
sleep 5
#override IP - Settings in predefined Environment
echo 'setenv serverip '${IPH} > ${port}
echo 'setenv ipaddr '${IPT} > ${port}
echo 'saveenv' > ${port}
echo "11/20 - Transfer device tree"
echo 'tftp ${loadaddr}' ${dtb} > ${port}
echo > ${port}
sleep 3
echo > ${port}
echo "12/20 - Save device tree"
echo 'nand erase.part dtb' > ${port}
echo > ${port}
sleep 3
echo 'nand write.jffs2 ${fileaddr} dtb ${filesize}' > ${port}
echo > ${port}
sleep 3
echo 'reset' > ${port}
echo > ${port}
sleep 5
echo > ${port}
#copy and install kernel
echo "13/20 - Transfer Linux Kernel"
echo 'tftp ${loadaddr}' ${kernel} > ${port}
sleep 15
echo 'nand erase.part linux' > ${port}
sleep 5
echo "14/20 - Save Linux Kernel"
echo 'nand write.jffs2 ${fileaddr} linux ${filesize}' > ${port}
sleep 5
#copy and install filesystem
echo "15/20 - Transfer Filesystem"
echo 'tftp ${loadaddr}' ${rootfs} > ${port}
sleep 25
echo 'nand erase.part rootfs' > ${port}
sleep 5
echo "16/20 - Save Filesystem"
echo 'nand write.trimffs ${fileaddr} rootfs ${filesize}' > ${port}
sleep 15
echo "17/20 - Reset and Reboot"
echo 'reset' > ${port}
sleep 3
echo > ${port}
echo > ${port}
#backlight is only 50% so far, set it to 100%
echo "18/20 - Set backlight to full brightness"
sleep 2
echo 'fdt set /backlight default-brightness-level <0x01>' > ${port}
sleep 2
echo > ${port}
echo "19/20 - Save Environment"
echo 'run fdtsave' > ${port}
echo > ${port}
sleep 3
echo "20/20 - Finished Programming!"
#ready for start
#change displaysettings
echo "Display currently set to EDT 5,7 (ETV570)"
echo "possible other video modes are:"
echo "1: ET0350 ET0350G0DH6"
echo "2: ET0430 ET0430G0DH6"
echo "3: ET0500 ET0500G0DH6"
echo "4: ETQ570 ETQ570G0DH6 or ETQ570G2DH6"
#add ETV570 if "y" was entered unintentionally
echo "5: ETV570 ETMV570"
echo "6: ET0700 ET0700G0DH6 or ET0700G0BDH6"
echo "7: VGA standard VGA"
echo "change video mode? (y/n)"
read video_decision
if [ "$video_decision" != y ]
then
echo "Video resolution set to ETV570, exiting now!"
exit 0
else
echo "Please enter number of desired video mode (1-6)"
read video_mode
if [ "$video_mode" = 1 ]
then
echo 'setenv video_mode ET0350' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 2 ]
then
echo 'setenv video_mode ET0430' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 3 ]
then
echo 'setenv video_mode ET0500' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 4 ]
then
echo 'setenv video_mode ETQ570' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 5 ]
then
echo 'setenv video_mode ETV570' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 6 ]
then
echo 'setenv video_mode ET0700' > ${port}
echo 'saveenv' > ${port}
echo > ${port}
sleep 3
#we need to invert the pixelclock for the newer 7"
#Otherwise the output won't be correct and some pixels are strange
echo "For newer EDT 7 inch Displays pixelclock needs to be inverted"
echo "Partnumber is: (G-)ETM0700G0BDH6"
echo "Invert pixelclock? (y/n)"
read invert
if [ ${invert} = y ]
then
echo 'fdt set display/display-timings/timing4/ pixelclk-active <0>' > ${port}
sleep 3
echo 'run fdtsave' > ${port}
echo > ${port}
echo "Finished!"
exit 0
else
echo "Finished!"
exit 0
fi
else [ "$video_mode" = 7 ]
echo 'setenv video_mode VGA' > ${port}
echo 'saveenv'
sleep 3
echo "Finished!"
exit 0
fi
fi
| jitter77/practice | template.sh | Shell | gpl-2.0 | 7,612 |
#!/bin/bash
. `dirname $0`/functions.sh
rm -f reloc3 reloc3lib*.so reloc3.log
rm -f prelink.cache
$CC -shared -O2 -fpic -o reloc3lib1.so $srcdir/reloc3lib1.c
$CC -shared -O2 -fpic -o reloc3lib2.so $srcdir/reloc1lib2.c reloc3lib1.so
BINS="reloc3"
LIBS="reloc3lib1.so reloc3lib2.so"
$CCLINK -o reloc3 $srcdir/reloc3.c -Wl,--rpath-link,. reloc3lib2.so
strip -g $BINS $LIBS
savelibs
echo $PRELINK ${PRELINK_OPTS--vm} ./reloc3 > reloc3.log
$PRELINK ${PRELINK_OPTS--vm} ./reloc3 >> reloc3.log 2>&1 || exit 1
grep -q ^`echo $PRELINK | sed 's/ .*$/: /'` reloc3.log && exit 2
LD_LIBRARY_PATH=. ./reloc3 >> reloc3.log || exit 3
readelf -a ./reloc3 >> reloc3.log 2>&1 || exit 4
# So that it is not prelinked again
chmod -x ./reloc3
comparelibs >> reloc3.log 2>&1 || exit 5
| ystk/debian-prelink | testsuite/reloc3.sh | Shell | gpl-2.0 | 762 |
#!/bin/sh
proj=$1
if [ "$proj" == "" ]; then
echo oracc-project-runtime.sh: must give project on command line
exit 1
fi
if [ -w $ORACC_HOME/$proj/00any ]; then
destdir=$ORACC_HOME/$proj/00any
else
destdir=$ORACC/00any
fi
if [ ! -w $destdir ]; then
echo oracc-project-runtime.sh: cannot write to $destdir. Stop
exit 1
fi
fproj=`/bin/echo -n $proj | tr / -`
echo :$fproj:
cd $ORACC
tar --exclude www/$proj/estindex -Jcf $destdir/$fproj-runtime.tar.xz \
agg/projects/images/$proj.png \
bld/$proj/[PQX][0-9][0-9][0-9] \
pub/$proj \
xml/$proj \
www/$proj
| oracc/oracc | misc/admin/oracc-project-runtime.sh | Shell | gpl-2.0 | 594 |
# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston
# MA 02110-1301 USA.
##############################################################################
# Some common macro definitions
##############################################################################
# NOTE: "vendor" is used in upgrade/downgrade check, so you can't
# change these, has to be exactly as is.
%define mysql_old_vendor MySQL AB
%define mysql_vendor_2 Sun Microsystems, Inc.
%define mysql_vendor Oracle and/or its affiliates
%define mysql_version @VERSION@
%define mysqld_user mysql
%define mysqld_group mysql
%define mysqldatadir /var/lib/mysql
%define release 1
#
# Macros we use which are not available in all supported versions of RPM
#
# - defined/undefined are missing on RHEL4
#
%if %{expand:%{?defined:0}%{!?defined:1}}
%define defined() %{expand:%%{?%{1}:1}%%{!?%{1}:0}}
%endif
%if %{expand:%{?undefined:0}%{!?undefined:1}}
%define undefined() %{expand:%%{?%{1}:0}%%{!?%{1}:1}}
%endif
# ----------------------------------------------------------------------------
# RPM build tools now automatically detect Perl module dependencies. This
# detection causes problems as it is broken in some versions, and it also
# provides unwanted dependencies from mandatory scripts in our package.
# It might not be possible to disable this in all versions of RPM, but here we
# try anyway. We keep the "AutoReqProv: no" for the "test" sub package, as
# disabling here might fail, and that package has the most problems.
# See:
# http://fedoraproject.org/wiki/Packaging/Perl#Filtering_Requires:_and_Provides
# http://www.wideopen.com/archives/rpm-list/2002-October/msg00343.html
# ----------------------------------------------------------------------------
%undefine __perl_provides
%undefine __perl_requires
##############################################################################
# Command line handling
##############################################################################
#
# To set options:
#
# $ rpmbuild --define="option <x>" ...
#
# ----------------------------------------------------------------------------
# Commercial builds
# ----------------------------------------------------------------------------
%if %{undefined commercial}
%define commercial 0
%endif
# ----------------------------------------------------------------------------
# Source name
# ----------------------------------------------------------------------------
%if %{undefined src_base}
%define src_base mysql
%endif
%define src_dir %{src_base}-%{mysql_version}
# ----------------------------------------------------------------------------
# Feature set (storage engines, options). Default to community (everything)
# ----------------------------------------------------------------------------
%if %{undefined feature_set}
%define feature_set community
%endif
# ----------------------------------------------------------------------------
# Server comment strings
# ----------------------------------------------------------------------------
%if %{undefined compilation_comment_debug}
%define compilation_comment_debug MySQL Community Server - Debug (GPL)
%endif
%if %{undefined compilation_comment_release}
%define compilation_comment_release MySQL Community Server (GPL)
%endif
# ----------------------------------------------------------------------------
# Product and server suffixes
# ----------------------------------------------------------------------------
%if %{undefined product_suffix}
%if %{defined short_product_tag}
%define product_suffix -%{short_product_tag}
%else
%define product_suffix %{nil}
%endif
%endif
%if %{undefined server_suffix}
%define server_suffix %{nil}
%endif
# ----------------------------------------------------------------------------
# Distribution support
# ----------------------------------------------------------------------------
%if %{undefined distro_specific}
%define distro_specific 0
%endif
%if %{distro_specific}
%if %(test -f /etc/enterprise-release && echo 1 || echo 0)
%define oelver %(rpm -qf --qf '%%{version}\\n' /etc/enterprise-release | sed -e 's/^\\([0-9]*\\).*/\\1/g')
%if "%oelver" == "4"
%define distro_description Oracle Enterprise Linux 4
%define distro_releasetag oel4
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%if "%oelver" == "5"
%define distro_description Oracle Enterprise Linux 5
%define distro_releasetag oel5
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%{error:Oracle Enterprise Linux %{oelver} is unsupported}
%endif
%endif
%else
%if %(test -f /etc/oracle-release && echo 1 || echo 0)
%define elver %(rpm -qf --qf '%%{version}\\n' /etc/oracle-release | sed -e 's/^\\([0-9]*\\).*/\\1/g')
%if "%elver" == "6"
%define distro_description Oracle Linux 6
%define distro_releasetag el6
%define distro_buildreq gcc-c++ ncurses-devel perl time zlib-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%{error:Oracle Linux %{elver} is unsupported}
%endif
%else
%if %(test -f /etc/redhat-release && echo 1 || echo 0)
%define rhelver %(rpm -qf --qf '%%{version}\\n' /etc/redhat-release | sed -e 's/^\\([0-9]*\\).*/\\1/g')
%if "%rhelver" == "4"
%define distro_description Red Hat Enterprise Linux 4
%define distro_releasetag rhel4
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%if "%rhelver" == "5"
%define distro_description Red Hat Enterprise Linux 5
%define distro_releasetag rhel5
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%if "%rhelver" == "6"
%define distro_description Red Hat Enterprise Linux 6
%define distro_releasetag rhel6
%define distro_buildreq gcc-c++ ncurses-devel perl time zlib-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%{error:Red Hat Enterprise Linux %{rhelver} is unsupported}
%endif
%endif
%endif
%else
%if %(test -f /etc/SuSE-release && echo 1 || echo 0)
%define susever %(rpm -qf --qf '%%{version}\\n' /etc/SuSE-release | cut -d. -f1)
%if "%susever" == "10"
%define distro_description SUSE Linux Enterprise Server 10
%define distro_releasetag sles10
%define distro_buildreq gcc-c++ gdbm-devel gperf ncurses-devel openldap2-client zlib-devel
%define distro_requires aaa_base coreutils grep procps pwdutils
%else
%if "%susever" == "11"
%define distro_description SUSE Linux Enterprise Server 11
%define distro_releasetag sles11
%define distro_buildreq gcc-c++ gdbm-devel gperf ncurses-devel openldap2-client procps pwdutils zlib-devel
%define distro_requires aaa_base coreutils grep procps pwdutils
%else
%{error:SuSE %{susever} is unsupported}
%endif
%endif
%else
%{error:Unsupported distribution}
%endif
%endif
%endif
%endif
%else
%define glibc_version %(/lib/libc.so.6 | grep stable | cut -d, -f1 | cut -c38-)
%define distro_description Generic Linux (glibc %{glibc_version})
%define distro_releasetag linux_glibc%{glibc_version}
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel
%define distro_requires coreutils grep procps /sbin/chkconfig /usr/sbin/useradd /usr/sbin/groupadd
%endif
# Avoid debuginfo RPMs, leaves binaries unstripped
%define debug_package %{nil}
# Hack to work around bug in RHEL5 __os_install_post macro, wrong inverted
# test for __debug_package
%define __strip /bin/true
# ----------------------------------------------------------------------------
# Support optional "tcmalloc" library (experimental)
# ----------------------------------------------------------------------------
%if %{defined malloc_lib_target}
%define WITH_TCMALLOC 1
%else
%define WITH_TCMALLOC 0
%endif
##############################################################################
# Configuration based upon above user input, not to be set directly
##############################################################################
%if %{commercial}
%define license_files_server %{src_dir}/LICENSE.mysql
%define license_type Commercial
%else
%define license_files_server %{src_dir}/COPYING %{src_dir}/README
%define license_type GPL
%endif
##############################################################################
# Main spec file section
##############################################################################
Name: MySQL%{product_suffix}
Summary: MySQL: a very fast and reliable SQL database server
Group: Applications/Databases
Version: @MYSQL_RPM_VERSION@
Release: %{release}%{?distro_releasetag:.%{distro_releasetag}}
Distribution: %{distro_description}
License: Copyright (c) 2000, @MYSQL_COPYRIGHT_YEAR@, %{mysql_vendor}. All rights reserved. Under %{license_type} license as shown in the Description field.
Source: http://www.mysql.com/Downloads/MySQL-@MYSQL_BASE_VERSION@/%{src_dir}.tar.gz
URL: http://www.mysql.com/
Packager: MySQL Release Engineering <[email protected]>
Vendor: %{mysql_vendor}
Provides: msqlormysql MySQL-server
BuildRequires: %{distro_buildreq}
# Regression tests may take a long time, override the default to skip them
%{!?runselftest:%global runselftest 1}
# Think about what you use here since the first step is to
# run a rm -rf
BuildRoot: %{_tmppath}/%{name}-%{version}-build
# From the manual
%description
The MySQL(TM) software delivers a very fast, multi-threaded, multi-user,
and robust SQL (Structured Query Language) database server. MySQL Server
is intended for mission-critical, heavy-load production systems as well
as for embedding into mass-deployed software. MySQL is a trademark of
%{mysql_vendor}
The MySQL software has Dual Licensing, which means you can use the MySQL
software free of charge under the GNU General Public License
(http://www.gnu.org/licenses/). You can also purchase commercial MySQL
licenses from %{mysql_vendor} if you do not wish to be bound by the terms of
the GPL. See the chapter "Licensing and Support" in the manual for
further info.
The MySQL web site (http://www.mysql.com/) provides the latest
news and information about the MySQL software. Also please see the
documentation and the manual for more information.
##############################################################################
# Sub package definition
##############################################################################
%package -n MySQL-server%{product_suffix}
Summary: MySQL: a very fast and reliable SQL database server
Group: Applications/Databases
Requires: %{distro_requires}
%if %{defined susever}
Provides: msqlormysql MySQL MySQL-server
Conflicts: mysql mysql-server mysql-advanced mysql-server-advanced
Obsoletes: MySQL MySQL-server
Obsoletes: MySQL-server-classic MySQL-server-community MySQL-server-enterprise
Obsoletes: MySQL-server-advanced MySQL-server-advanced-gpl MySQL-server-enterprise-gpl
%else
Obsoletes: MySQL < %{version}-%{release}
Obsoletes: MySQL-server < %{version}-%{release}
Obsoletes: MySQL-server-advanced < %{version}-%{release}
Obsoletes: mysql mysql-server mysql-advanced mysql-server-advanced
Obsoletes: MySQL-server-classic MySQL-server-community MySQL-server-enterprise
Obsoletes: MySQL-server-advanced-gpl MySQL-server-enterprise-gpl
Provides: msqlormysql MySQL MySQL-server MySQL-server-advanced
%endif
%description -n MySQL-server%{product_suffix}
The MySQL(TM) software delivers a very fast, multi-threaded, multi-user,
and robust SQL (Structured Query Language) database server. MySQL Server
is intended for mission-critical, heavy-load production systems as well
as for embedding into mass-deployed software. MySQL is a trademark of
%{mysql_vendor}
The MySQL software has Dual Licensing, which means you can use the MySQL
software free of charge under the GNU General Public License
(http://www.gnu.org/licenses/). You can also purchase commercial MySQL
licenses from %{mysql_vendor} if you do not wish to be bound by the terms of
the GPL. See the chapter "Licensing and Support" in the manual for
further info.
The MySQL web site (http://www.mysql.com/) provides the latest news and
information about the MySQL software. Also please see the documentation
and the manual for more information.
This package includes the MySQL server binary as well as related utilities
to run and administer a MySQL server.
If you want to access and work with the database, you have to install
package "MySQL-client%{product_suffix}" as well!
# ----------------------------------------------------------------------------
%package -n MySQL-client%{product_suffix}
Summary: MySQL - Client
Group: Applications/Databases
%if %{defined susever}
Provides: MySQL-client
Conflicts: mysql mysql-advanced
Obsoletes: MySQL-client
Obsoletes: MySQL-client-classic MySQL-client-community MySQL-client-enterprise
Obsoletes: MySQL-client-advanced MySQL-client-advanced-gpl MySQL-client-enterprise-gpl
%else
Obsoletes: mysql mysql-advanced
Obsoletes: MySQL-client < %{version}-%{release}
Obsoletes: MySQL-client-advanced < %{version}-%{release}
Obsoletes: MySQL-client-classic MySQL-client-community MySQL-client-enterprise
Obsoletes: MySQL-client-advanced-gpl MySQL-client-enterprise-gpl
Provides: MySQL-client MySQL-client-advanced
Provides: mysql
%endif
%description -n MySQL-client%{product_suffix}
This package contains the standard MySQL clients and administration tools.
For a description of MySQL see the base MySQL RPM or http://www.mysql.com/
# ----------------------------------------------------------------------------
%package -n MySQL-test%{product_suffix}
Summary: MySQL - Test suite
Group: Applications/Databases
%if %{defined susever}
Requires: MySQL-client perl
Provides: MySQL-test
Conflicts: mysql-test mysql-test-advanced
Obsoletes: MySQL-test
Obsoletes: MySQL-test-classic MySQL-test-community MySQL-test-enterprise
Obsoletes: MySQL-test-advanced MySQL-test-advanced-gpl MySQL-test-enterprise-gpl
AutoReqProv: no
%else
Requires: MySQL-client perl
Conflicts: mysql-test mysql-test-advanced
Obsoletes: MySQL-test < %{version}-%{release}
Obsoletes: MySQL-test-advanced < %{version}-%{release}
Obsoletes: MySQL-test-classic MySQL-test-community MySQL-test-enterprise
Obsoletes: MySQL-test-advanced-gpl MySQL-test-enterprise-gpl
Provides: MySQL-test MySQL-test-advanced
AutoReqProv: no
%endif
%description -n MySQL-test%{product_suffix}
This package contains the MySQL regression test suite.
For a description of MySQL see the base MySQL RPM or http://www.mysql.com/
# ----------------------------------------------------------------------------
%package -n MySQL-devel%{product_suffix}
Summary: MySQL - Development header files and libraries
Group: Applications/Databases
%if %{defined susever}
Provides: MySQL-devel
Conflicts: mysql-devel mysql-embedded-devel mysql-devel-advanced mysql-embedded-devel-advanced
Obsoletes: MySQL-devel
Obsoletes: MySQL-devel-classic MySQL-devel-community MySQL-devel-enterprise
Obsoletes: MySQL-devel-advanced MySQL-devel-advanced-gpl MySQL-devel-enterprise-gpl
%else
Conflicts: mysql-devel mysql-embedded-devel mysql-devel-advanced mysql-embedded-devel-advanced
Obsoletes: MySQL-devel < %{version}-%{release}
Obsoletes: MySQL-devel-advanced < %{version}-%{release}
Obsoletes: MySQL-devel-classic MySQL-devel-community MySQL-devel-enterprise
Obsoletes: MySQL-devel-advanced-gpl MySQL-devel-enterprise-gpl
Provides: MySQL-devel MySQL-devel-advanced
%endif
%description -n MySQL-devel%{product_suffix}
This package contains the development header files and libraries necessary
to develop MySQL client applications.
For a description of MySQL see the base MySQL RPM or http://www.mysql.com/
# ----------------------------------------------------------------------------
%package -n MySQL-shared%{product_suffix}
Summary: MySQL - Shared libraries
Group: Applications/Databases
%if %{defined susever}
Provides: MySQL-shared
Obsoletes: MySQL-shared-standard MySQL-shared-pro
Obsoletes: MySQL-shared-pro-cert MySQL-shared-pro-gpl
Obsoletes: MySQL-shared-pro-gpl-cert MySQL-shared
Obsoletes: MySQL-shared-classic MySQL-shared-community MySQL-shared-enterprise
Obsoletes: MySQL-shared-advanced MySQL-shared-advanced-gpl MySQL-shared-enterprise-gpl
%else
Obsoletes: MySQL-shared-standard MySQL-shared-pro
Obsoletes: MySQL-shared-pro-cert MySQL-shared-pro-gpl
Obsoletes: MySQL-shared < %{version}-%{release}
Obsoletes: MySQL-shared-advanced < %{version}-%{release}
Obsoletes: MySQL-shared-pro-gpl-cert
Obsoletes: MySQL-shared-classic MySQL-shared-community MySQL-shared-enterprise
Obsoletes: MySQL-shared-advanced-gpl MySQL-shared-enterprise-gpl
Provides: MySQL-shared MySQL-shared-advanced
%endif
%description -n MySQL-shared%{product_suffix}
This package contains the shared libraries (*.so*) which certain languages
and applications need to dynamically load and use MySQL.
# ----------------------------------------------------------------------------
%package -n MySQL-embedded%{product_suffix}
Summary: MySQL - Embedded library
Group: Applications/Databases
%if %{defined susever}
Requires: MySQL-devel
Provides: MySQL-embedded
Conflicts: mysql-embedded mysql-embedded-advanced
Obsoletes: MySQL-embedded
Obsoletes: MySQL-embedded-pro
Obsoletes: MySQL-embedded-classic MySQL-embedded-community MySQL-embedded-enterprise
Obsoletes: MySQL-embedded-advanced MySQL-embedded-advanced-gpl MySQL-embedded-enterprise-gpl
%else
Requires: MySQL-devel
Conflicts: mysql-embedded mysql-embedded-advanced
Obsoletes: MySQL-embedded-pro
Obsoletes: MySQL-embedded < %{version}-%{release}
Obsoletes: MySQL-embedded-advanced < %{version}-%{release}
Obsoletes: MySQL-embedded-classic MySQL-embedded-community MySQL-embedded-enterprise
Obsoletes: MySQL-embedded-advanced-gpl MySQL-embedded-enterprise-gpl
Provides: MySQL-embedded MySQL-embedded-advanced
%endif
%description -n MySQL-embedded%{product_suffix}
This package contains the MySQL server as an embedded library.
The embedded MySQL server library makes it possible to run a full-featured
MySQL server inside the client application. The main benefits are increased
speed and more simple management for embedded applications.
The API is identical for the embedded MySQL version and the
client/server version.
For a description of MySQL see the base MySQL RPM or http://www.mysql.com/
##############################################################################
%prep
%setup -T -a 0 -c -n %{src_dir}
##############################################################################
%build
# Fail quickly and obviously if user tries to build as root
%if %runselftest
if [ x"`id -u`" = x0 ]; then
echo "The MySQL regression tests may fail if run as root."
echo "If you really need to build the RPM as root, use"
echo "--define='runselftest 0' to skip the regression tests."
exit 1
fi
%endif
# Be strict about variables, bail at earliest opportunity, etc.
set -eu
# Optional package files
touch optional-files-devel
#
# Set environment in order of preference, MYSQL_BUILD_* first, then variable
# name, finally a default. RPM_OPT_FLAGS is assumed to be a part of the
# default RPM build environment.
#
# This is a hack, $RPM_OPT_FLAGS on ia64 hosts contains flags which break
# the compile in cmd-line-utils/libedit - needs investigation, but for now
# we simply unset it and use those specified directly in cmake.
%if "%{_arch}" == "ia64"
RPM_OPT_FLAGS=
%endif
export PATH=${MYSQL_BUILD_PATH:-$PATH}
export CC=${MYSQL_BUILD_CC:-${CC:-gcc}}
export CXX=${MYSQL_BUILD_CXX:-${CXX:-g++}}
export CFLAGS=${MYSQL_BUILD_CFLAGS:-${CFLAGS:-$RPM_OPT_FLAGS}}
export CXXFLAGS=${MYSQL_BUILD_CXXFLAGS:-${CXXFLAGS:-$RPM_OPT_FLAGS -felide-constructors}}
export LDFLAGS=${MYSQL_BUILD_LDFLAGS:-${LDFLAGS:-}}
export CMAKE=${MYSQL_BUILD_CMAKE:-${CMAKE:-cmake}}
export MAKE_JFLAG=${MYSQL_BUILD_MAKE_JFLAG:-}
# By default, a build will include the bundeled "yaSSL" library for SSL.
# However, there may be a need to override.
# Protect against undefined variables if there is no override option.
%if %{undefined with_ssl}
%define ssl_option %{nil}
%else
%define ssl_option -DWITH_SSL=%{with_ssl}
%endif
# Build debug mysqld and libmysqld.a
mkdir debug
(
cd debug
# Attempt to remove any optimisation flags from the debug build
CFLAGS=`echo " ${CFLAGS} " | \
sed -e 's/ -O[0-9]* / /' \
-e 's/ -unroll2 / /' \
-e 's/ -ip / /' \
-e 's/^ //' \
-e 's/ $//'`
CXXFLAGS=`echo " ${CXXFLAGS} " | \
sed -e 's/ -O[0-9]* / /' \
-e 's/ -unroll2 / /' \
-e 's/ -ip / /' \
-e 's/^ //' \
-e 's/ $//'`
# XXX: MYSQL_UNIX_ADDR should be in cmake/* but mysql_version is included before
# XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM
${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \
-DCMAKE_BUILD_TYPE=Debug \
-DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \
-DFEATURE_SET="%{feature_set}" \
%{ssl_option} \
-DCOMPILATION_COMMENT="%{compilation_comment_debug}" \
-DMYSQL_SERVER_SUFFIX="%{server_suffix}"
echo BEGIN_DEBUG_CONFIG ; egrep '^#define' include/config.h ; echo END_DEBUG_CONFIG
make ${MAKE_JFLAG} VERBOSE=1
)
# Build full release
mkdir release
(
cd release
# XXX: MYSQL_UNIX_ADDR should be in cmake/* but mysql_version is included before
# XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM
${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \
-DFEATURE_SET="%{feature_set}" \
%{ssl_option} \
-DCOMPILATION_COMMENT="%{compilation_comment_release}" \
-DMYSQL_SERVER_SUFFIX="%{server_suffix}"
echo BEGIN_NORMAL_CONFIG ; egrep '^#define' include/config.h ; echo END_NORMAL_CONFIG
make ${MAKE_JFLAG} VERBOSE=1
)
%if %runselftest
MTR_BUILD_THREAD=auto
export MTR_BUILD_THREAD
(cd release && make test-bt-fast || true)
%endif
##############################################################################
%install
RBR=$RPM_BUILD_ROOT
MBD=$RPM_BUILD_DIR/%{src_dir}
# Ensure that needed directories exists
install -d $RBR%{_sysconfdir}/{logrotate.d,init.d}
install -d $RBR%{mysqldatadir}/mysql
install -d $RBR%{_datadir}/mysql-test
install -d $RBR%{_datadir}/mysql/SELinux/RHEL4
install -d $RBR%{_includedir}
install -d $RBR%{_libdir}
install -d $RBR%{_mandir}
install -d $RBR%{_sbindir}
# Install all binaries
(
cd $MBD/release
make DESTDIR=$RBR install
)
# FIXME: at some point we should stop doing this and just install everything
# FIXME: directly into %{_libdir}/mysql - perhaps at the same time as renaming
# FIXME: the shared libraries to use libmysql*-$major.$minor.so syntax
mv -v $RBR/%{_libdir}/*.a $RBR/%{_libdir}/mysql/
# Install logrotate and autostart
install -m 644 $MBD/release/support-files/mysql-log-rotate $RBR%{_sysconfdir}/logrotate.d/mysql
install -m 755 $MBD/release/support-files/mysql.server $RBR%{_sysconfdir}/init.d/mysql
# Create a symlink "rcmysql", pointing to the init.script. SuSE users
# will appreciate that, as all services usually offer this.
ln -s %{_sysconfdir}/init.d/mysql $RBR%{_sbindir}/rcmysql
# Touch the place where the my.cnf config file might be located
# Just to make sure it's in the file list and marked as a config file
touch $RBR%{_sysconfdir}/my.cnf
# Install SELinux files in datadir
install -m 600 $MBD/%{src_dir}/support-files/RHEL4-SElinux/mysql.{fc,te} \
$RBR%{_datadir}/mysql/SELinux/RHEL4
%if %{WITH_TCMALLOC}
# Even though this is a shared library, put it under /usr/lib*/mysql, so it
# doesn't conflict with possible shared lib by the same name in /usr/lib*. See
# `mysql_config --variable=pkglibdir` and mysqld_safe for how this is used.
install -m 644 "%{malloc_lib_source}" \
"$RBR%{_libdir}/mysql/%{malloc_lib_target}"
%endif
# Remove man pages we explicitly do not want to package, avoids 'unpackaged
# files' warning.
# This has become obsolete: rm -f $RBR%{_mandir}/man1/make_win_bin_dist.1*
##############################################################################
# Post processing actions, i.e. when installed
##############################################################################
%pre -n MySQL-server%{product_suffix}
# This is the code running at the beginning of a RPM upgrade action,
# before replacing the old files with the new ones.
# ATTENTION: Parts of this are duplicated in the "triggerpostun" !
# There are users who deviate from the default file system layout.
# Check local settings to support them.
if [ -x %{_bindir}/my_print_defaults ]
then
mysql_datadir=`%{_bindir}/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p'`
PID_FILE_PATT=`%{_bindir}/my_print_defaults server mysqld | grep '^--pid-file=' | sed -n 's/--pid-file=//p'`
fi
if [ -z "$mysql_datadir" ]
then
mysql_datadir=%{mysqldatadir}
fi
if [ -z "$PID_FILE_PATT" ]
then
PID_FILE_PATT="$mysql_datadir/*.pid"
fi
# Check if we can safely upgrade. An upgrade is only safe if it's from one
# of our RPMs in the same version family.
# Handle both ways of spelling the capability.
installed=`rpm -q --whatprovides mysql-server 2> /dev/null`
if [ $? -ne 0 -o -z "$installed" ]; then
installed=`rpm -q --whatprovides MySQL-server 2> /dev/null`
fi
if [ $? -eq 0 -a -n "$installed" ]; then
installed=`echo $installed | sed 's/\([^ ]*\) .*/\1/'` # Tests have shown duplicated package names
vendor=`rpm -q --queryformat='%{VENDOR}' "$installed" 2>&1`
version=`rpm -q --queryformat='%{VERSION}' "$installed" 2>&1`
myoldvendor='%{mysql_old_vendor}'
myvendor_2='%{mysql_vendor_2}'
myvendor='%{mysql_vendor}'
myversion='%{mysql_version}'
old_family=`echo $version \
| sed -n -e 's,^\([1-9][0-9]*\.[0-9][0-9]*\)\..*$,\1,p'`
new_family=`echo $myversion \
| sed -n -e 's,^\([1-9][0-9]*\.[0-9][0-9]*\)\..*$,\1,p'`
[ -z "$vendor" ] && vendor='<unknown>'
[ -z "$old_family" ] && old_family="<unrecognized version $version>"
[ -z "$new_family" ] && new_family="<bad package specification: version $myversion>"
error_text=
if [ "$vendor" != "$myoldvendor" \
-a "$vendor" != "$myvendor_2" \
-a "$vendor" != "$myvendor" ]; then
error_text="$error_text
The current MySQL server package is provided by a different
vendor ($vendor) than $myoldvendor, $myvendor_2, or $myvendor.
Some files may be installed to different locations, including log
files and the service startup script in %{_sysconfdir}/init.d/.
"
fi
if [ "$old_family" != "$new_family" ]; then
error_text="$error_text
Upgrading directly from MySQL $old_family to MySQL $new_family may not
be safe in all cases. A manual dump and restore using mysqldump is
recommended. It is important to review the MySQL manual's Upgrading
section for version-specific incompatibilities.
"
fi
if [ -n "$error_text" ]; then
cat <<HERE >&2
******************************************************************
A MySQL server package ($installed) is installed.
$error_text
A manual upgrade is required.
- Ensure that you have a complete, working backup of your data and my.cnf
files
- Shut down the MySQL server cleanly
- Remove the existing MySQL packages. Usually this command will
list the packages you should remove:
rpm -qa | grep -i '^mysql-'
You may choose to use 'rpm --nodeps -ev <package-name>' to remove
the package which contains the mysqlclient shared library. The
library will be reinstalled by the MySQL-shared-compat package.
- Install the new MySQL packages supplied by $myvendor
- Ensure that the MySQL server is started
- Run the 'mysql_upgrade' program
This is a brief description of the upgrade process. Important details
can be found in the MySQL manual, in the Upgrading section.
******************************************************************
HERE
exit 1
fi
fi
# We assume that if there is exactly one ".pid" file,
# it contains the valid PID of a running MySQL server.
NR_PID_FILES=`ls $PID_FILE_PATT 2>/dev/null | wc -l`
case $NR_PID_FILES in
0 ) SERVER_TO_START='' ;; # No "*.pid" file == no running server
1 ) SERVER_TO_START='true' ;;
* ) SERVER_TO_START='' # Situation not clear
SEVERAL_PID_FILES=true ;;
esac
# That logic may be debated: We might check whether it is non-empty,
# contains exactly one number (possibly a PID), and whether "ps" finds it.
# OTOH, if there is no such process, it means a crash without a cleanup -
# is that a reason not to start a new server after upgrade?
STATUS_FILE=$mysql_datadir/RPM_UPGRADE_MARKER
if [ -f $STATUS_FILE ]; then
echo "Some previous upgrade was not finished:"
ls -ld $STATUS_FILE
echo "Please check its status, then do"
echo " rm $STATUS_FILE"
echo "before repeating the MySQL upgrade."
exit 1
elif [ -n "$SEVERAL_PID_FILES" ] ; then
echo "You have more than one PID file:"
ls -ld $PID_FILE_PATT
echo "Please check which one (if any) corresponds to a running server"
echo "and delete all others before repeating the MySQL upgrade."
exit 1
fi
NEW_VERSION=%{mysql_version}-%{release}
# The "pre" section code is also run on a first installation,
# when there is no data directory yet. Protect against error messages.
# Check for the existence of subdirectory "mysql/", the database of system
# tables like "mysql.user".
if [ -d $mysql_datadir/mysql ] ; then
echo "MySQL RPM upgrade to version $NEW_VERSION" > $STATUS_FILE
echo "'pre' step running at `date`" >> $STATUS_FILE
echo >> $STATUS_FILE
fcount=`ls -ltr $mysql_datadir/*.err 2>/dev/null | wc -l`
if [ $fcount -gt 0 ] ; then
echo "ERR file(s):" >> $STATUS_FILE
ls -ltr $mysql_datadir/*.err >> $STATUS_FILE
echo >> $STATUS_FILE
echo "Latest 'Version' line in latest file:" >> $STATUS_FILE
grep '^Version' `ls -tr $mysql_datadir/*.err | tail -1` | \
tail -1 >> $STATUS_FILE
echo >> $STATUS_FILE
fi
if [ -n "$SERVER_TO_START" ] ; then
# There is only one PID file, race possibility ignored
echo "PID file:" >> $STATUS_FILE
ls -l $PID_FILE_PATT >> $STATUS_FILE
cat $PID_FILE_PATT >> $STATUS_FILE
echo >> $STATUS_FILE
echo "Server process:" >> $STATUS_FILE
ps -fp `cat $PID_FILE_PATT` >> $STATUS_FILE
echo >> $STATUS_FILE
echo "SERVER_TO_START=$SERVER_TO_START" >> $STATUS_FILE
else
# Take a note we checked it ...
echo "PID file:" >> $STATUS_FILE
ls -l $PID_FILE_PATT >> $STATUS_FILE 2>&1
fi
fi
# Shut down a previously installed server first
# Note we *could* make that depend on $SERVER_TO_START, but we rather don't,
# so a "stop" is attempted even if there is no PID file.
# (Maybe the "stop" doesn't work then, but we might fix that in itself.)
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql stop > /dev/null 2>&1
echo "Giving mysqld 5 seconds to exit nicely"
sleep 5
fi
%post -n MySQL-server%{product_suffix}
# This is the code running at the end of a RPM install or upgrade action,
# after the (new) files have been written.
# ATTENTION: Parts of this are duplicated in the "triggerpostun" !
# There are users who deviate from the default file system layout.
# Check local settings to support them.
if [ -x %{_bindir}/my_print_defaults ]
then
mysql_datadir=`%{_bindir}/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p'`
fi
if [ -z "$mysql_datadir" ]
then
mysql_datadir=%{mysqldatadir}
fi
NEW_VERSION=%{mysql_version}-%{release}
STATUS_FILE=$mysql_datadir/RPM_UPGRADE_MARKER
# ----------------------------------------------------------------------
# Create data directory if needed, check whether upgrade or install
# ----------------------------------------------------------------------
if [ ! -d $mysql_datadir ] ; then mkdir -m 755 $mysql_datadir; fi
if [ -f $STATUS_FILE ] ; then
SERVER_TO_START=`grep '^SERVER_TO_START=' $STATUS_FILE | cut -c17-`
else
SERVER_TO_START=''
fi
# echo "Analyzed: SERVER_TO_START=$SERVER_TO_START"
if [ ! -d $mysql_datadir/mysql ] ; then
mkdir $mysql_datadir/mysql $mysql_datadir/test
echo "MySQL RPM installation of version $NEW_VERSION" >> $STATUS_FILE
else
# If the directory exists, we may assume it is an upgrade.
echo "MySQL RPM upgrade to version $NEW_VERSION" >> $STATUS_FILE
fi
# ----------------------------------------------------------------------
# Make MySQL start/shutdown automatically when the machine does it.
# ----------------------------------------------------------------------
# NOTE: This still needs to be debated. Should we check whether these links
# for the other run levels exist(ed) before the upgrade?
# use chkconfig on Enterprise Linux and newer SuSE releases
if [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --add mysql
# use insserv for older SuSE Linux versions
elif [ -x /sbin/insserv ] ; then
/sbin/insserv %{_sysconfdir}/init.d/mysql
fi
# ----------------------------------------------------------------------
# Create a MySQL user and group. Do not report any problems if it already
# exists.
# ----------------------------------------------------------------------
groupadd -r %{mysqld_group} 2> /dev/null || true
useradd -M -r -d $mysql_datadir -s /bin/bash -c "MySQL server" \
-g %{mysqld_group} %{mysqld_user} 2> /dev/null || true
# The user may already exist, make sure it has the proper group nevertheless
# (BUG#12823)
usermod -g %{mysqld_group} %{mysqld_user} 2> /dev/null || true
# ----------------------------------------------------------------------
# Change permissions so that the user that will run the MySQL daemon
# owns all database files.
# ----------------------------------------------------------------------
chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
# ----------------------------------------------------------------------
# Initiate databases if needed
# ----------------------------------------------------------------------
if ! grep '^MySQL RPM upgrade' $STATUS_FILE >/dev/null 2>&1 ; then
# Fix bug#45415: no "mysql_install_db" on an upgrade
# Do this as a negative to err towards more "install" runs
# rather than to miss one.
%{_bindir}/mysql_install_db --rpm --user=%{mysqld_user} --random-passwords
# Attention: Now 'root' is the only database user,
# its password is a random value found in ~/.mysql_secret,
# and the "password expired" flag is set:
# Any client needs that password, and the first command
# executed must be a new "set password"!
fi
# ----------------------------------------------------------------------
# Upgrade databases if needed would go here - but it cannot be automated yet
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Change permissions again to fix any new files.
# ----------------------------------------------------------------------
chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
# ----------------------------------------------------------------------
# Fix permissions for the permission database so that only the user
# can read them.
# ----------------------------------------------------------------------
chmod -R og-rw $mysql_datadir/mysql
# ----------------------------------------------------------------------
# install SELinux files - but don't override existing ones
# ----------------------------------------------------------------------
SETARGETDIR=/etc/selinux/targeted/src/policy
SEDOMPROG=$SETARGETDIR/domains/program
SECONPROG=$SETARGETDIR/file_contexts/program
if [ -f /etc/redhat-release ] \
&& (grep -q "Red Hat Enterprise Linux .. release 4" /etc/redhat-release \
|| grep -q "CentOS release 4" /etc/redhat-release) ; then
echo
echo
echo 'Notes regarding SELinux on this platform:'
echo '========================================='
echo
echo 'The default policy might cause server startup to fail because it is'
echo 'not allowed to access critical files. In this case, please update'
echo 'your installation.'
echo
echo 'The default policy might also cause inavailability of SSL related'
echo 'features because the server is not allowed to access /dev/random'
echo 'and /dev/urandom. If this is a problem, please do the following:'
echo
echo ' 1) install selinux-policy-targeted-sources from your OS vendor'
echo ' 2) add the following two lines to '$SEDOMPROG/mysqld.te':'
echo ' allow mysqld_t random_device_t:chr_file read;'
echo ' allow mysqld_t urandom_device_t:chr_file read;'
echo ' 3) cd to '$SETARGETDIR' and issue the following command:'
echo ' make load'
echo
echo
fi
if [ -x sbin/restorecon ] ; then
sbin/restorecon -R var/lib/mysql
fi
# Was the server running before the upgrade? If so, restart the new one.
if [ "$SERVER_TO_START" = "true" ] ; then
# Restart in the same way that mysqld will be started normally.
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql start
echo "Giving mysqld 5 seconds to start"
sleep 5
fi
fi
# Collect an upgrade history ...
echo "Upgrade/install finished at `date`" >> $STATUS_FILE
echo >> $STATUS_FILE
echo "=====" >> $STATUS_FILE
STATUS_HISTORY=$mysql_datadir/RPM_UPGRADE_HISTORY
cat $STATUS_FILE >> $STATUS_HISTORY
mv -f $STATUS_FILE ${STATUS_FILE}-LAST # for "triggerpostun"
#echo "Thank you for installing the MySQL Community Server! For Production
#systems, we recommend MySQL Enterprise, which contains enterprise-ready
#software, intelligent advisory services, and full production support with
#scheduled service packs and more. Visit www.mysql.com/enterprise for more
#information."
%preun -n MySQL-server%{product_suffix}
# Which '$1' does this refer to? Fedora docs have info:
# " ... a count of the number of versions of the package that are installed.
# Action Count
# Install the first time 1
# Upgrade 2 or higher (depending on the number of versions installed)
# Remove last version of package 0 "
#
# http://docs.fedoraproject.org/en-US/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch09s04s05.html
if [ $1 = 0 ] ; then
# Stop MySQL before uninstalling it
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql stop > /dev/null
# Remove autostart of MySQL
# use chkconfig on Enterprise Linux and newer SuSE releases
if [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --del mysql
# For older SuSE Linux versions
elif [ -x /sbin/insserv ] ; then
/sbin/insserv -r %{_sysconfdir}/init.d/mysql
fi
fi
fi
# We do not remove the mysql user since it may still own a lot of
# database files.
%triggerpostun -n MySQL-server%{product_suffix} --MySQL-server-community
# Setup: We renamed this package, so any existing "server-community"
# package will be removed when this "server" is installed.
# Problem: RPM will first run the "pre" and "post" sections of this script,
# and only then the "preun" of that old community server.
# But this "preun" includes stopping the server and uninstalling the service,
# "chkconfig --del mysql" which removes the symlinks to the start script.
# Solution: *After* the community server got removed, restart this server
# and re-install the service.
#
# For information about triggers in spec files, see the Fedora docs:
# http://docs.fedoraproject.org/en-US/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch10s02.html
# For all details of this code, see the "pre" and "post" sections.
# There are users who deviate from the default file system layout.
# Check local settings to support them.
if [ -x %{_bindir}/my_print_defaults ]
then
mysql_datadir=`%{_bindir}/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p'`
fi
if [ -z "$mysql_datadir" ]
then
mysql_datadir=%{mysqldatadir}
fi
NEW_VERSION=%{mysql_version}-%{release}
STATUS_FILE=$mysql_datadir/RPM_UPGRADE_MARKER-LAST # Note the difference!
STATUS_HISTORY=$mysql_datadir/RPM_UPGRADE_HISTORY
if [ -f $STATUS_FILE ] ; then
SERVER_TO_START=`grep '^SERVER_TO_START=' $STATUS_FILE | cut -c17-`
else
# This should never happen, but let's be prepared
SERVER_TO_START=''
fi
echo "Analyzed: SERVER_TO_START=$SERVER_TO_START"
if [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --add mysql
# use insserv for older SuSE Linux versions
elif [ -x /sbin/insserv ] ; then
/sbin/insserv %{_sysconfdir}/init.d/mysql
fi
# Was the server running before the upgrade? If so, restart the new one.
if [ "$SERVER_TO_START" = "true" ] ; then
# Restart in the same way that mysqld will be started normally.
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql start
echo "Giving mysqld 5 seconds to start"
sleep 5
fi
fi
echo "Trigger 'postun --community' finished at `date`" >> $STATUS_HISTORY
echo >> $STATUS_HISTORY
echo "=====" >> $STATUS_HISTORY
# ----------------------------------------------------------------------
# Clean up the BuildRoot after build is done
# ----------------------------------------------------------------------
%clean
[ "$RPM_BUILD_ROOT" != "/" ] && [ -d $RPM_BUILD_ROOT ] \
&& rm -rf $RPM_BUILD_ROOT;
##############################################################################
# Files section
##############################################################################
%files -n MySQL-server%{product_suffix} -f release/support-files/plugins.files
%defattr(-,root,root,0755)
%if %{defined license_files_server}
%doc %{license_files_server}
%endif
%doc %{src_dir}/Docs/ChangeLog
%doc %{src_dir}/Docs/INFO_SRC*
%doc release/Docs/INFO_BIN*
%doc release/support-files/my-default.cnf
%doc %attr(644, root, root) %{_infodir}/mysql.info*
%doc %attr(644, root, man) %{_mandir}/man1/innochecksum.1*
%doc %attr(644, root, man) %{_mandir}/man1/my_print_defaults.1*
%doc %attr(644, root, man) %{_mandir}/man1/myisam_ftdump.1*
%doc %attr(644, root, man) %{_mandir}/man1/myisamchk.1*
%doc %attr(644, root, man) %{_mandir}/man1/myisamlog.1*
%doc %attr(644, root, man) %{_mandir}/man1/myisampack.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_convert_table_format.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_fix_extensions.1*
%doc %attr(644, root, man) %{_mandir}/man8/mysqld.8*
%doc %attr(644, root, man) %{_mandir}/man1/mysqld_multi.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqld_safe.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqldumpslow.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_install_db.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_plugin.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_secure_installation.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_setpermission.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_upgrade.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlhotcopy.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlman.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql.server.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqltest.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_tzinfo_to_sql.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_zap.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlbug.1*
%doc %attr(644, root, man) %{_mandir}/man1/perror.1*
%doc %attr(644, root, man) %{_mandir}/man1/replace.1*
%doc %attr(644, root, man) %{_mandir}/man1/resolve_stack_dump.1*
%doc %attr(644, root, man) %{_mandir}/man1/resolveip.1*
%ghost %config(noreplace,missingok) %{_sysconfdir}/my.cnf
%attr(755, root, root) %{_bindir}/innochecksum
%attr(755, root, root) %{_bindir}/my_print_defaults
%attr(755, root, root) %{_bindir}/myisam_ftdump
%attr(755, root, root) %{_bindir}/myisamchk
%attr(755, root, root) %{_bindir}/myisamlog
%attr(755, root, root) %{_bindir}/myisampack
%attr(755, root, root) %{_bindir}/mysql_convert_table_format
%attr(755, root, root) %{_bindir}/mysql_fix_extensions
%attr(755, root, root) %{_bindir}/mysql_install_db
%attr(755, root, root) %{_bindir}/mysql_plugin
%attr(755, root, root) %{_bindir}/mysql_secure_installation
%attr(755, root, root) %{_bindir}/mysql_setpermission
%attr(755, root, root) %{_bindir}/mysql_tzinfo_to_sql
%attr(755, root, root) %{_bindir}/mysql_upgrade
%attr(755, root, root) %{_bindir}/mysql_zap
%attr(755, root, root) %{_bindir}/mysqlbug
%attr(755, root, root) %{_bindir}/mysqld_multi
%attr(755, root, root) %{_bindir}/mysqld_safe
%attr(755, root, root) %{_bindir}/mysqldumpslow
%attr(755, root, root) %{_bindir}/mysqlhotcopy
%attr(755, root, root) %{_bindir}/mysqltest
%attr(755, root, root) %{_bindir}/perror
%attr(755, root, root) %{_bindir}/replace
%attr(755, root, root) %{_bindir}/resolve_stack_dump
%attr(755, root, root) %{_bindir}/resolveip
%attr(755, root, root) %{_sbindir}/mysqld
%attr(755, root, root) %{_sbindir}/mysqld-debug
%attr(755, root, root) %{_sbindir}/rcmysql
%attr(755, root, root) %{_libdir}/mysql/plugin/daemon_example.ini
%if %{WITH_TCMALLOC}
%attr(755, root, root) %{_libdir}/mysql/%{malloc_lib_target}
%endif
%attr(644, root, root) %config(noreplace,missingok) %{_sysconfdir}/logrotate.d/mysql
%attr(755, root, root) %{_sysconfdir}/init.d/mysql
%attr(755, root, root) %{_datadir}/mysql/
# ----------------------------------------------------------------------------
%files -n MySQL-client%{product_suffix}
%defattr(-, root, root, 0755)
%attr(755, root, root) %{_bindir}/msql2mysql
%attr(755, root, root) %{_bindir}/mysql
%attr(755, root, root) %{_bindir}/mysql_find_rows
%attr(755, root, root) %{_bindir}/mysql_waitpid
%attr(755, root, root) %{_bindir}/mysqlaccess
# XXX: This should be moved to %{_sysconfdir}
%attr(644, root, root) %{_bindir}/mysqlaccess.conf
%attr(755, root, root) %{_bindir}/mysqladmin
%attr(755, root, root) %{_bindir}/mysqlbinlog
%attr(755, root, root) %{_bindir}/mysqlcheck
%attr(755, root, root) %{_bindir}/mysqldump
%attr(755, root, root) %{_bindir}/mysqlimport
%attr(755, root, root) %{_bindir}/mysqlshow
%attr(755, root, root) %{_bindir}/mysqlslap
%attr(755, root, root) %{_bindir}/mysql_config_editor
%doc %attr(644, root, man) %{_mandir}/man1/msql2mysql.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_find_rows.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_waitpid.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlaccess.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqladmin.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlbinlog.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlcheck.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqldump.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlimport.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlshow.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlslap.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_config_editor.1*
# ----------------------------------------------------------------------------
%files -n MySQL-devel%{product_suffix} -f optional-files-devel
%defattr(-, root, root, 0755)
%doc %attr(644, root, man) %{_mandir}/man1/comp_err.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_config.1*
%attr(755, root, root) %{_bindir}/mysql_config
%dir %attr(755, root, root) %{_includedir}/mysql
%dir %attr(755, root, root) %{_libdir}/mysql
%{_includedir}/mysql/*
%{_datadir}/aclocal/mysql.m4
%{_libdir}/mysql/libmysqlclient.a
%{_libdir}/mysql/libmysqlclient_r.a
%{_libdir}/mysql/libmysqlservices.a
# ----------------------------------------------------------------------------
%files -n MySQL-shared%{product_suffix}
%defattr(-, root, root, 0755)
# Shared libraries (omit for architectures that don't support them)
%{_libdir}/libmysql*.so*
%post -n MySQL-shared%{product_suffix}
/sbin/ldconfig
%postun -n MySQL-shared%{product_suffix}
/sbin/ldconfig
# ----------------------------------------------------------------------------
%files -n MySQL-test%{product_suffix}
%defattr(-, root, root, 0755)
%attr(-, root, root) %{_datadir}/mysql-test
%attr(755, root, root) %{_bindir}/mysql_client_test
%attr(755, root, root) %{_bindir}/mysql_client_test_embedded
%attr(755, root, root) %{_bindir}/mysqltest_embedded
%doc %attr(644, root, man) %{_mandir}/man1/mysql_client_test.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql-stress-test.pl.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql-test-run.pl.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_client_test_embedded.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqltest_embedded.1*
# ----------------------------------------------------------------------------
%files -n MySQL-embedded%{product_suffix}
%defattr(-, root, root, 0755)
%attr(755, root, root) %{_bindir}/mysql_embedded
%attr(644, root, root) %{_libdir}/mysql/libmysqld.a
%attr(644, root, root) %{_libdir}/mysql/libmysqld-debug.a
##############################################################################
# The spec file changelog only includes changes made to the spec file
# itself - note that they must be ordered by date (important when
# merging BK trees)
##############################################################################
%changelog
* Mon Nov 05 2012 Joerg Bruehe <[email protected]>
- Allow to override the default to use the bundled yaSSL by an option like
--define="with_ssl /path/to/ssl"
* Wed Oct 10 2012 Bjorn Munch <[email protected]>
- Replace old my-*.cnf config file examples with template my-default.cnf
* Fri Oct 05 2012 Joerg Bruehe <[email protected]>
- Let the installation use the new option "--random-passwords" of "mysql_install_db".
(Bug# 12794345 Ensure root password)
- Fix an inconsistency: "new install" vs "upgrade" are told from the (non)existence
of "$mysql_datadir/mysql" (holding table "mysql.user" and other system stuff).
* Tue Jul 24 2012 Joerg Bruehe <[email protected]>
- Add a macro "runselftest":
if set to 1 (default), the test suite will be run during the RPM build;
this can be oveeridden via the command line by adding
--define "runselftest 0"
Failures of the test suite will NOT make the RPM build fail!
* Mon Jul 16 2012 Joerg Bruehe <[email protected]>
- Add the man page for the "mysql_config_editor".
* Mon Jun 11 2012 Joerg Bruehe <[email protected]>
- Make sure newly added "SPECIFIC-ULN/" directory does not disturb packaging.
* Wed Feb 29 2012 Brajmohan Saxena <[email protected]>
- Removal all traces of the readline library from mysql (BUG 13738013)
* Wed Sep 28 2011 Joerg Bruehe <[email protected]>
- Fix duplicate mentioning of "mysql_plugin" and its manual page,
it is better to keep alphabetic order in the files list (merging!).
* Wed Sep 14 2011 Joerg Bruehe <[email protected]>
- Let the RPM capabilities ("obsoletes" etc) ensure that an upgrade may replace
the RPMs of any configuration (of the current or the preceding release series)
by the new ones. This is done by not using the implicitly generated capabilities
(which include the configuration name) and relying on more generic ones which
just list the function ("server", "client", ...).
The implicit generation cannot be prevented, so all these capabilities must be
explicitly listed in "Obsoletes:"
* Tue Sep 13 2011 Jonathan Perkin <[email protected]>
- Add support for Oracle Linux 6 and Red Hat Enterprise Linux 6. Due to
changes in RPM behaviour ($RPM_BUILD_ROOT is removed prior to install)
this necessitated a move of the libmygcc.a installation to the install
phase, which is probably where it belonged in the first place.
* Tue Sep 13 2011 Joerg Bruehe <[email protected]>
- "make_win_bin_dist" and its manual are dropped, cmake does it different.
* Thu Sep 08 2011 Daniel Fischer <[email protected]>
- Add mysql_plugin man page.
* Tue Aug 30 2011 Tor Didriksen <[email protected]>
- Set CXX=g++ by default to add a dependency on libgcc/libstdc++.
Also, remove the use of the -fno-exceptions and -fno-rtti flags.
TODO: update distro_buildreq/distro_requires
* Tue Aug 30 2011 Joerg Bruehe <[email protected]>
- Add the manual page for "mysql_plugin" to the server package.
* Fri Aug 19 2011 Joerg Bruehe <[email protected]>
- Null-upmerge the fix of bug#37165: This spec file is not affected.
- Replace "/var/lib/mysql" by the spec file variable "%{mysqldatadir}".
* Fri Aug 12 2011 Daniel Fischer <[email protected]>
- Source plugin library files list from cmake-generated file.
* Mon Jul 25 2011 Chuck Bell <[email protected]>
- Added the mysql_plugin client - enables or disables plugins.
* Thu Jul 21 2011 Sunanda Menon <[email protected]>
- Fix bug#12561297: Added the MySQL embedded binary
* Thu Jul 07 2011 Joerg Bruehe <[email protected]>
- Fix bug#45415: "rpm upgrade recreates test database"
Let the creation of the "test" database happen only during a new installation,
not in an RPM upgrade.
This affects both the "mkdir" and the call of "mysql_install_db".
* Thu Feb 09 2011 Joerg Bruehe <[email protected]>
- Fix bug#56581: If an installation deviates from the default file locations
("datadir" and "pid-file"), the mechanism to detect a running server (on upgrade)
should still work, and use these locations.
The problem was that the fix for bug#27072 did not check for local settings.
* Mon Jan 31 2011 Joerg Bruehe <[email protected]>
- Install the new "manifest" files: "INFO_SRC" and "INFO_BIN".
* Tue Nov 23 2010 Jonathan Perkin <[email protected]>
- EXCEPTIONS-CLIENT has been deleted, remove it from here too
- Support MYSQL_BUILD_MAKE_JFLAG environment variable for passing
a '-j' argument to make.
* Mon Nov 1 2010 Georgi Kodinov <[email protected]>
- Added test authentication (WL#1054) plugin binaries
* Wed Oct 6 2010 Georgi Kodinov <[email protected]>
- Added example external authentication (WL#1054) plugin binaries
* Wed Aug 11 2010 Joerg Bruehe <[email protected]>
- With a recent spec file cleanup, names have changed: A "-community" part was dropped.
Reflect that in the "Obsoletes" specifications.
- Add a "triggerpostun" to handle the uninstall of the "-community" server RPM.
- This fixes bug#55015 "MySQL server is not restarted properly after RPM upgrade".
* Tue Jun 15 2010 Joerg Bruehe <[email protected]>
- Change the behaviour on installation and upgrade:
On installation, do not autostart the server.
*Iff* the server was stopped before the upgrade is started, this is taken as a
sign the administrator is handling that manually, and so the new server will
not be started automatically at the end of the upgrade.
The start/stop scripts will still be installed, so the server will be started
on the next machine boot.
This is the 5.5 version of fixing bug#27072 (RPM autostarting the server).
* Tue Jun 1 2010 Jonathan Perkin <[email protected]>
- Implement SELinux checks from distribution-specific spec file.
* Wed May 12 2010 Jonathan Perkin <[email protected]>
- Large number of changes to build using CMake
- Introduce distribution-specific RPMs
- Drop debuginfo, build all binaries with debug/symbols
- Remove __os_install_post, use native macro
- Remove _unpackaged_files_terminate_build, make it an error to have
unpackaged files
- Remove cluster RPMs
* Wed Mar 24 2010 Joerg Bruehe <[email protected]>
- Add "--with-perfschema" to the configure options.
* Mon Mar 22 2010 Joerg Bruehe <[email protected]>
- User "usr/lib*" to allow for both "usr/lib" and "usr/lib64",
mask "rmdir" return code 1.
- Remove "ha_example.*" files from the list, they aren't built.
* Wed Mar 17 2010 Joerg Bruehe <[email protected]>
- Fix a wrong path name in handling the debug plugins.
* Wed Mar 10 2010 Joerg Bruehe <[email protected]>
- Take the result of the debug plugin build and put it into the optimized tree,
so that it becomes part of the final installation;
include the files in the packlist. Part of the fixes for bug#49022.
* Mon Mar 01 2010 Joerg Bruehe <[email protected]>
- Set "Oracle and/or its affiliates" as the vendor and copyright owner,
accept upgrading from packages showing MySQL or Sun as vendor.
* Fri Feb 12 2010 Joerg Bruehe <[email protected]>
- Formatting changes:
Have a consistent structure of separator lines and of indentation
(8 leading blanks => tab).
- Introduce the variable "src_dir".
- Give the environment variables "MYSQL_BUILD_CC(CXX)" precedence
over "CC" ("CXX").
- Drop the old "with_static" argument analysis, this is not supported
in 5.1 since ages.
- Introduce variables to control the handlers individually, as well
as other options.
- Use the new "--with-plugin" notation for the table handlers.
- Drop handling "/etc/rc.d/init.d/mysql", the switch to "/etc/init.d/mysql"
was done back in 2002 already.
- Make "--with-zlib-dir=bundled" the default, add an option to disable it.
- Add missing manual pages to the file list.
- Improve the runtime check for "libgcc.a", protect it against being tried
with the Intel compiler "icc".
* Mon Jan 11 2010 Joerg Bruehe <[email protected]>
- Change RPM file naming:
- Suffix like "-m2", "-rc" becomes part of version as "_m2", "_rc".
- Release counts from 1, not 0.
* Wed Dec 23 2009 Joerg Bruehe <[email protected]>
- The "semisync" plugin file name has lost its introductory "lib",
adapt the file lists for the subpackages.
This is a part missing from the fix for bug#48351.
- Remove the "fix_privilege_tables" manual, it does not exist in 5.5
(and likely, the whole script will go, too).
* Mon Nov 16 2009 Joerg Bruehe <[email protected]>
- Fix some problems with the directives around "tcmalloc" (experimental),
remove erroneous traces of the InnoDB plugin (that is 5.1 only).
* Fri Oct 06 2009 Magnus Blaudd <[email protected]>
- Removed mysql_fix_privilege_tables
* Fri Oct 02 2009 Alexander Nozdrin <[email protected]>
- "mysqlmanager" got removed from version 5.4, all references deleted.
* Fri Aug 28 2009 Joerg Bruehe <[email protected]>
- Merge up from 5.1 to 5.4: Remove handling for the InnoDB plugin.
* Thu Aug 27 2009 Joerg Bruehe <[email protected]>
- This version does not contain the "Instance manager", "mysqlmanager":
Remove it from the spec file so that packaging succeeds.
* Mon Aug 24 2009 Jonathan Perkin <[email protected]>
- Add conditionals for bundled zlib and innodb plugin
* Fri Aug 21 2009 Jonathan Perkin <[email protected]>
- Install plugin libraries in appropriate packages.
- Disable libdaemon_example and ftexample plugins.
* Thu Aug 20 2009 Jonathan Perkin <[email protected]>
- Update variable used for mysql-test suite location to match source.
* Fri Nov 07 2008 Joerg Bruehe <[email protected]>
- Correct yesterday's fix, so that it also works for the last flag,
and fix a wrong quoting: un-quoted quote marks must not be escaped.
* Thu Nov 06 2008 Kent Boortz <[email protected]>
- Removed "mysql_upgrade_shell"
- Removed some copy/paste between debug and normal build
* Thu Nov 06 2008 Joerg Bruehe <[email protected]>
- Modify CFLAGS and CXXFLAGS such that a debug build is not optimized.
This should cover both gcc and icc flags. Fixes bug#40546.
* Fri Aug 29 2008 Kent Boortz <[email protected]>
- Removed the "Federated" storage engine option, and enabled in all
* Tue Aug 26 2008 Joerg Bruehe <[email protected]>
- Get rid of the "warning: Installed (but unpackaged) file(s) found:"
Some generated files aren't needed in RPMs:
- the "sql-bench/" subdirectory
Some files were missing:
- /usr/share/aclocal/mysql.m4 ("devel" subpackage)
- Manual "mysqlbug" ("server" subpackage)
- Program "innochecksum" and its manual ("server" subpackage)
- Manual "mysql_find_rows" ("client" subpackage)
- Script "mysql_upgrade_shell" ("client" subpackage)
- Program "ndb_cpcd" and its manual ("ndb-extra" subpackage)
- Manuals "ndb_mgm" + "ndb_restore" ("ndb-tools" subpackage)
* Mon Mar 31 2008 Kent Boortz <[email protected]>
- Made the "Federated" storage engine an option
- Made the "Cluster" storage engine and sub packages an option
* Wed Mar 19 2008 Joerg Bruehe <[email protected]>
- Add the man pages for "ndbd" and "ndb_mgmd".
* Mon Feb 18 2008 Timothy Smith <[email protected]>
- Require a manual upgrade if the alread-installed mysql-server is
from another vendor, or is of a different major version.
* Wed May 02 2007 Joerg Bruehe <[email protected]>
- "ndb_size.tmpl" is not needed any more,
"man1/mysql_install_db.1" lacked the trailing '*'.
* Sat Apr 07 2007 Kent Boortz <[email protected]>
- Removed man page for "mysql_create_system_tables"
* Wed Mar 21 2007 Daniel Fischer <[email protected]>
- Add debug server.
* Mon Mar 19 2007 Daniel Fischer <[email protected]>
- Remove Max RPMs; the server RPMs contain a mysqld compiled with all
features that previously only were built into Max.
* Fri Mar 02 2007 Joerg Bruehe <[email protected]>
- Add several man pages for NDB which are now created.
* Fri Jan 05 2007 Kent Boortz <[email protected]>
- Put back "libmygcc.a", found no real reason it was removed.
- Add CFLAGS to gcc call with --print-libgcc-file, to make sure the
correct "libgcc.a" path is returned for the 32/64 bit architecture.
* Mon Dec 18 2006 Joerg Bruehe <[email protected]>
- Fix the move of "mysqlmanager" to section 8: Directory name was wrong.
* Thu Dec 14 2006 Joerg Bruehe <[email protected]>
- Include the new man pages for "my_print_defaults" and "mysql_tzinfo_to_sql"
in the server RPM.
- The "mysqlmanager" man page got moved from section 1 to 8.
* Thu Nov 30 2006 Joerg Bruehe <[email protected]>
- Call "make install" using "benchdir_root=%{_datadir}",
because that is affecting the regression test suite as well.
* Thu Nov 16 2006 Joerg Bruehe <[email protected]>
- Explicitly note that the "MySQL-shared" RPMs (as built by MySQL AB)
replace "mysql-shared" (as distributed by SuSE) to allow easy upgrading
(bug#22081).
* Mon Nov 13 2006 Joerg Bruehe <[email protected]>
- Add "--with-partition" to all server builds.
- Use "--report-features" in one test run per server build.
* Tue Aug 15 2006 Joerg Bruehe <[email protected]>
- The "max" server is removed from packages, effective from 5.1.12-beta.
Delete all steps to build, package, or install it.
* Mon Jul 10 2006 Joerg Bruehe <[email protected]>
- Fix a typing error in the "make" target for the Perl script to run the tests.
* Tue Jul 04 2006 Joerg Bruehe <[email protected]>
- Use the Perl script to run the tests, because it will automatically check
whether the server is configured with SSL.
* Tue Jun 27 2006 Joerg Bruehe <[email protected]>
- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216)
- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade,
there are some more aspects which need to be solved before this is possible.
For now, just ensure the binary "mysql_upgrade" is delivered and installed.
* Thu Jun 22 2006 Joerg Bruehe <[email protected]>
- Close a gap of the previous version by explicitly using
a newly created temporary directory for the socket to be used
in the "mysql_upgrade" operation, overriding any local setting.
* Tue Jun 20 2006 Joerg Bruehe <[email protected]>
- To run "mysql_upgrade", we need a running server;
start it in isolation and skip password checks.
* Sat May 20 2006 Kent Boortz <[email protected]>
- Always compile for PIC, position independent code.
* Wed May 10 2006 Kent Boortz <[email protected]>
- Use character set "all" when compiling with Cluster, to make Cluster
nodes independent on the character set directory, and the problem
that two RPM sub packages both wants to install this directory.
* Mon May 01 2006 Kent Boortz <[email protected]>
- Use "./libtool --mode=execute" instead of searching for the
executable in current directory and ".libs".
* Fri Apr 28 2006 Kent Boortz <[email protected]>
- Install and run "mysql_upgrade"
* Wed Apr 12 2006 Jim Winstead <[email protected]>
- Remove sql-bench, and MySQL-bench RPM (will be built as an independent
project from the mysql-bench repository)
* Tue Apr 11 2006 Jim Winstead <[email protected]>
- Remove old mysqltestmanager and related programs
* Sat Apr 01 2006 Kent Boortz <[email protected]>
- Set $LDFLAGS from $MYSQL_BUILD_LDFLAGS
* Wed Mar 07 2006 Kent Boortz <[email protected]>
- Changed product name from "Community Edition" to "Community Server"
* Mon Mar 06 2006 Kent Boortz <[email protected]>
- Fast mutexes is now disabled by default, but should be
used in Linux builds.
* Mon Feb 20 2006 Kent Boortz <[email protected]>
- Reintroduced a max build
- Limited testing of 'debug' and 'max' servers
- Berkeley DB only in 'max'
* Mon Feb 13 2006 Joerg Bruehe <[email protected]>
- Use "-i" on "make test-force";
this is essential for later evaluation of this log file.
* Thu Feb 09 2006 Kent Boortz <[email protected]>
- Pass '-static' to libtool, link static with our own libraries, dynamic
with system libraries. Link with the bundled zlib.
* Wed Feb 08 2006 Kristian Nielsen <[email protected]>
- Modified RPM spec to match new 5.1 debug+max combined community packaging.
* Sun Dec 18 2005 Kent Boortz <[email protected]>
- Added "client/mysqlslap"
* Mon Dec 12 2005 Rodrigo Novo <[email protected]>
- Added zlib to the list of (static) libraries installed
- Added check against libtool wierdness (WRT: sql/mysqld || sql/.libs/mysqld)
- Compile MySQL with bundled zlib
- Fixed %packager name to "MySQL Production Engineering Team"
* Mon Dec 05 2005 Joerg Bruehe <[email protected]>
- Avoid using the "bundled" zlib on "shared" builds:
As it is not installed (on the build system), this gives dependency
problems with "libtool" causing the build to fail.
(Change was done on Nov 11, but left uncommented.)
* Tue Nov 22 2005 Joerg Bruehe <[email protected]>
- Extend the file existence check for "init.d/mysql" on un-install
to also guard the call to "insserv"/"chkconfig".
* Thu Oct 27 2005 Lenz Grimmer <[email protected]>
- added more man pages
* Wed Oct 19 2005 Kent Boortz <[email protected]>
- Made yaSSL support an option (off by default)
* Wed Oct 19 2005 Kent Boortz <[email protected]>
- Enabled yaSSL support
* Sat Oct 15 2005 Kent Boortz <[email protected]>
- Give mode arguments the same way in all places
- Moved copy of mysqld.a to "standard" build, but
disabled it as we don't do embedded yet in 5.0
* Fri Oct 14 2005 Kent Boortz <[email protected]>
- For 5.x, always compile with --with-big-tables
- Copy the config.log file to location outside
the build tree
* Fri Oct 14 2005 Kent Boortz <[email protected]>
- Removed unneeded/obsolete configure options
- Added archive engine to standard server
- Removed the embedded server from experimental server
- Changed suffix "-Max" => "-max"
- Changed comment string "Max" => "Experimental"
* Thu Oct 13 2005 Lenz Grimmer <[email protected]>
- added a usermod call to assign a potential existing mysql user to the
correct user group (BUG#12823)
- Save the perror binary built during Max build so it supports the NDB
error codes (BUG#13740)
- added a separate macro "mysqld_group" to be able to define the
user group of the mysql user seperately, if desired.
* Thu Sep 29 2005 Lenz Grimmer <[email protected]>
- fixed the removing of the RPM_BUILD_ROOT in the %clean section (the
$RBR variable did not get expanded, thus leaving old build roots behind)
* Thu Aug 04 2005 Lenz Grimmer <[email protected]>
- Fixed the creation of the mysql user group account in the postinstall
section (BUG 12348)
- Fixed enabling the Archive storage engine in the Max binary
* Tue Aug 02 2005 Lenz Grimmer <[email protected]>
- Fixed the Requires: tag for the server RPM (BUG 12233)
* Fri Jul 15 2005 Lenz Grimmer <[email protected]>
- create a "mysql" user group and assign the mysql user account to that group
in the server postinstall section. (BUG 10984)
* Tue Jun 14 2005 Lenz Grimmer <[email protected]>
- Do not build statically on i386 by default, only when adding either "--with
static" or "--define '_with_static 1'" to the RPM build options. Static
linking really only makes sense when linking against the specially patched
glibc 2.2.5.
* Mon Jun 06 2005 Lenz Grimmer <[email protected]>
- added mysql_client_test to the "bench" subpackage (BUG 10676)
- added the libndbclient static and shared libraries (BUG 10676)
* Wed Jun 01 2005 Lenz Grimmer <[email protected]>
- use "mysqldatadir" variable instead of hard-coding the path multiple times
- use the "mysqld_user" variable on all occasions a user name is referenced
- removed (incomplete) Brazilian translations
- removed redundant release tags from the subpackage descriptions
* Wed May 25 2005 Joerg Bruehe <[email protected]>
- Added a "make clean" between separate calls to "BuildMySQL".
* Thu May 12 2005 Guilhem Bichot <[email protected]>
- Removed the mysql_tableinfo script made obsolete by the information schema
* Wed Apr 20 2005 Lenz Grimmer <[email protected]>
- Enabled the "blackhole" storage engine for the Max RPM
* Wed Apr 13 2005 Lenz Grimmer <[email protected]>
- removed the MySQL manual files (html/ps/texi) - they have been removed
from the MySQL sources and are now available seperately.
* Mon Apr 4 2005 Petr Chardin <[email protected]>
- old mysqlmanager, mysqlmanagerc and mysqlmanager-pwger renamed into
mysqltestmanager, mysqltestmanager and mysqltestmanager-pwgen respectively
* Fri Mar 18 2005 Lenz Grimmer <[email protected]>
- Disabled RAID in the Max binaries once and for all (it has finally been
removed from the source tree)
* Sun Feb 20 2005 Petr Chardin <[email protected]>
- Install MySQL Instance Manager together with mysqld, touch mysqlmanager
password file
* Mon Feb 14 2005 Lenz Grimmer <[email protected]>
- Fixed the compilation comments and moved them into the separate build sections
for Max and Standard
* Mon Feb 7 2005 Tomas Ulin <[email protected]>
- enabled the "Ndbcluster" storage engine for the max binary
- added extra make install in ndb subdir after Max build to get ndb binaries
- added packages for ndbcluster storage engine
* Fri Jan 14 2005 Lenz Grimmer <[email protected]>
- replaced obsoleted "BuildPrereq" with "BuildRequires" instead
* Thu Jan 13 2005 Lenz Grimmer <[email protected]>
- enabled the "Federated" storage engine for the max binary
* Tue Jan 04 2005 Petr Chardin <[email protected]>
- ISAM and merge storage engines were purged. As well as appropriate
tools and manpages (isamchk and isamlog)
* Thu Dec 31 2004 Lenz Grimmer <[email protected]>
- enabled the "Archive" storage engine for the max binary
- enabled the "CSV" storage engine for the max binary
- enabled the "Example" storage engine for the max binary
* Thu Aug 26 2004 Lenz Grimmer <[email protected]>
- MySQL-Max now requires MySQL-server instead of MySQL (BUG 3860)
* Fri Aug 20 2004 Lenz Grimmer <[email protected]>
- do not link statically on IA64/AMD64 as these systems do not have
a patched glibc installed
* Tue Aug 10 2004 Lenz Grimmer <[email protected]>
- Added libmygcc.a to the devel subpackage (required to link applications
against the the embedded server libmysqld.a) (BUG 4921)
* Mon Aug 09 2004 Lenz Grimmer <[email protected]>
- Added EXCEPTIONS-CLIENT to the "devel" package
* Thu Jul 29 2004 Lenz Grimmer <[email protected]>
- disabled OpenSSL in the Max binaries again (the RPM packages were the
only exception to this anyway) (BUG 1043)
* Wed Jun 30 2004 Lenz Grimmer <[email protected]>
- fixed server postinstall (mysql_install_db was called with the wrong
parameter)
* Thu Jun 24 2004 Lenz Grimmer <[email protected]>
- added mysql_tzinfo_to_sql to the server subpackage
- run "make clean" instead of "make distclean"
* Mon Apr 05 2004 Lenz Grimmer <[email protected]>
- added ncurses-devel to the build prerequisites (BUG 3377)
* Thu Feb 12 2004 Lenz Grimmer <[email protected]>
- when using gcc, _always_ use CXX=gcc
- replaced Copyright with License field (Copyright is obsolete)
* Tue Feb 03 2004 Lenz Grimmer <[email protected]>
- added myisam_ftdump to the Server package
* Tue Jan 13 2004 Lenz Grimmer <[email protected]>
- link the mysql client against libreadline instead of libedit (BUG 2289)
* Mon Dec 22 2003 Lenz Grimmer <[email protected]>
- marked /etc/logrotate.d/mysql as a config file (BUG 2156)
* Fri Dec 13 2003 Lenz Grimmer <[email protected]>
- fixed file permissions (BUG 1672)
* Thu Dec 11 2003 Lenz Grimmer <[email protected]>
- made testing for gcc3 a bit more robust
* Fri Dec 05 2003 Lenz Grimmer <[email protected]>
- added missing file mysql_create_system_tables to the server subpackage
* Fri Nov 21 2003 Lenz Grimmer <[email protected]>
- removed dependency on MySQL-client from the MySQL-devel subpackage
as it is not really required. (BUG 1610)
* Fri Aug 29 2003 Lenz Grimmer <[email protected]>
- Fixed BUG 1162 (removed macro names from the changelog)
- Really fixed BUG 998 (disable the checking for installed but
unpackaged files)
* Tue Aug 05 2003 Lenz Grimmer <[email protected]>
- Fixed BUG 959 (libmysqld not being compiled properly)
- Fixed BUG 998 (RPM build errors): added missing files to the
distribution (mysql_fix_extensions, mysql_tableinfo, mysqldumpslow,
mysql_fix_privilege_tables.1), removed "-n" from install section.
* Wed Jul 09 2003 Lenz Grimmer <[email protected]>
- removed the GIF Icon (file was not included in the sources anyway)
- removed unused variable shared_lib_version
- do not run automake before building the standard binary
(should not be necessary)
- add server suffix '-standard' to standard binary (to be in line
with the binary tarball distributions)
- Use more RPM macros (_exec_prefix, _sbindir, _libdir, _sysconfdir,
_datadir, _includedir) throughout the spec file.
- allow overriding CC and CXX (required when building with other compilers)
* Fri May 16 2003 Lenz Grimmer <[email protected]>
- re-enabled RAID again
* Wed Apr 30 2003 Lenz Grimmer <[email protected]>
- disabled MyISAM RAID (--with-raid) - it throws an assertion which
needs to be investigated first.
* Mon Mar 10 2003 Lenz Grimmer <[email protected]>
- added missing file mysql_secure_installation to server subpackage
(BUG 141)
* Tue Feb 11 2003 Lenz Grimmer <[email protected]>
- re-added missing pre- and post(un)install scripts to server subpackage
- added config file /etc/my.cnf to the file list (just for completeness)
- make sure to create the datadir with 755 permissions
* Mon Jan 27 2003 Lenz Grimmer <[email protected]>
- removed unused CC and CXX variables
- CFLAGS and CXXFLAGS should honor RPM_OPT_FLAGS
* Fri Jan 24 2003 Lenz Grimmer <[email protected]>
- renamed package "MySQL" to "MySQL-server"
- fixed Copyright tag
- added mysql_waitpid to client subpackage (required for mysql-test-run)
* Wed Nov 27 2002 Lenz Grimmer <[email protected]>
- moved init script from /etc/rc.d/init.d to /etc/init.d (the majority of
Linux distributions now support this scheme as proposed by the LSB either
directly or via a compatibility symlink)
- Use new "restart" init script action instead of starting and stopping
separately
- Be more flexible in activating the automatic bootup - use insserv (on
older SuSE versions) or chkconfig (Red Hat, newer SuSE versions and
others) to create the respective symlinks
* Wed Sep 25 2002 Lenz Grimmer <[email protected]>
- MySQL-Max now requires MySQL >= 4.0 to avoid version mismatches
(mixing 3.23 and 4.0 packages)
* Fri Aug 09 2002 Lenz Grimmer <[email protected]>
- Turn off OpenSSL in MySQL-Max for now until it works properly again
- enable RAID for the Max binary instead
- added compatibility link: safe_mysqld -> mysqld_safe to ease the
transition from 3.23
* Thu Jul 18 2002 Lenz Grimmer <[email protected]>
- Reworked the build steps a little bit: the Max binary is supposed
to include OpenSSL, which cannot be linked statically, thus trying
to statically link against a special glibc is futile anyway
- because of this, it is not required to make yet another build run
just to compile the shared libs (saves a lot of time)
- updated package description of the Max subpackage
- clean up the BuildRoot directory afterwards
* Mon Jul 15 2002 Lenz Grimmer <[email protected]>
- Updated Packager information
- Fixed the build options: the regular package is supposed to
include InnoDB and linked statically, while the Max package
should include BDB and SSL support
* Fri May 03 2002 Lenz Grimmer <[email protected]>
- Use more RPM macros (e.g. infodir, mandir) to make the spec
file more portable
- reorganized the installation of documentation files: let RPM
take care of this
- reorganized the file list: actually install man pages along
with the binaries of the respective subpackage
- do not include libmysqld.a in the devel subpackage as well, if we
have a special "embedded" subpackage
- reworked the package descriptions
* Mon Oct 8 2001 Monty
- Added embedded server as a separate RPM
* Fri Apr 13 2001 Monty
- Added mysqld-max to the distribution
* Tue Jan 2 2001 Monty
- Added mysql-test to the bench package
* Fri Aug 18 2000 Tim Smith <[email protected]>
- Added separate libmysql_r directory; now both a threaded
and non-threaded library is shipped.
* Wed Sep 28 1999 David Axmark <[email protected]>
- Added the support-files/my-example.cnf to the docs directory.
- Removed devel dependency on base since it is about client
development.
* Wed Sep 8 1999 David Axmark <[email protected]>
- Cleaned up some for 3.23.
* Thu Jul 1 1999 David Axmark <[email protected]>
- Added support for shared libraries in a separate sub
package. Original fix by David Fox ([email protected])
- The --enable-assembler switch is now automatically disables on
platforms there assembler code is unavailable. This should allow
building this RPM on non i386 systems.
* Mon Feb 22 1999 David Axmark <[email protected]>
- Removed unportable cc switches from the spec file. The defaults can
now be overridden with environment variables. This feature is used
to compile the official RPM with optimal (but compiler version
specific) switches.
- Removed the repetitive description parts for the sub rpms. Maybe add
again if RPM gets a multiline macro capability.
- Added support for a pt_BR translation. Translation contributed by
Jorge Godoy <[email protected]>.
* Wed Nov 4 1998 David Axmark <[email protected]>
- A lot of changes in all the rpm and install scripts. This may even
be a working RPM :-)
* Sun Aug 16 1998 David Axmark <[email protected]>
- A developers changelog for MySQL is available in the source RPM. And
there is a history of major user visible changed in the Reference
Manual. Only RPM specific changes will be documented here.
| mmplayer/MySQL | support-files/mysql.spec.sh | Shell | gpl-2.0 | 82,091 |
#!/usr/bin/env bash
# This is a test
###CLEAR VARIABLES
DOWNLOAD_SET=""
DOWNLOADER=""
PKGMAN=""
TEST="0"
# !!!TEST copy this line wherever you need the script to stop in a test
if [ "$TEST" == "1" ] ; then return 0 ; fi
DOWNLOAD_SELECTION=""
UNKNOWN_OPT=""
###SET VARIABLES
DOWNLOAD_DATE="`date +%Y-%m`"
WORKINGDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
###CONFIGURATION
DOWNLOAD_DIRECTORY="$WORKINGDIR/downloads"
mkdir $DOWNLOAD_DIRECTORY
mkdir $DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE
LOGFILE="$DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/download_progress.log"
touch $LOGFILE
printlog () {
echo $1
if [ "$2" == "failed" ] ; then
echo "FAIL: $1" >> $LOGFILE
else
echo $1 >> $LOGFILE
fi
}
###OPTIONS PROCESSING
SKIP_ARG="0"
for ARG ; do
if [ -n != "$ARG" ] ; then
UNKNOWN_OPT="1"
if [ "$SKIP_ARG" == "1" ] ; then
SKIP_ARG="0"
UNKNOWN_OPT="0"
fi
if [ "$ARG" == "-h" ] || [ "$ARG" == "--help" ] ; then
echo "Usage: pdu.sh -i [USER'S INITIALS]... -s [DOWNLOAD SET]... -c -h"
echo " -h, --help prints this help message"
echo " -i, --initials Specifies initials to be appended to file names"
echo " -s, --set allows you to choose from a predefined set of downloads"
echo " -c, --configure walks you through the configuration process"
echo " -r, --reset resets all logs"
echo "Welcome to the Program Downloader Utility (PDU). This program was created to automatically download programs from the internet using the terminal-based Lynx web browser."
echo "Configuration files can be found in the support/ directory. Every URL given in the categories will be downloaded into a matching subfolder. At this time, only websites from majorgeeks.com are supported, and you will want to put the download page in line, NOT the general information page. This allows you to choose which mirror you'd like to download. For all other direct downloads, you can put them in 'unsorted', and they will be downloaded via wget."
return 0
fi
if [ "$ARG" == "-c" ] || [ "$ARG" == "--configure" ] ; then
echo "Please enter the path to the folder you would like your new downloads to be dropped off:"
read DOWNLOAD_DIRECTORY
UNKNOWN_OPT="0"
shift
fi
if [ "$ARG" == "-i" ] || [ "$ARG" == "--initials" ] ; then
if [ -z "$2" ] ; then
echo "Please specify initials to be placed on downloads" && return 1
else
DOWNLOADER="$2"
echo "$DOWNLOADER will be appended to filenames."
UNKNOWN_OPT="0"
SKIP_ARG="1"
fi
fi
if [ "$ARG" == "-s" ] || [ "$ARG" == "--set" ] ; then
if [ -z "$2" ] ; then
echo "Please specify set to download" && return 1
else
echo "Downloading $2..."
DOWNLOAD_SET="$2"
UNKNOWN_OPT="0"
SKIP_ARG="1"
fi
fi
if [ "$ARG" == "-r" ] || [ "$ARG" == "--reset" ] ; then
UNKNOWN_OPT="0"
printlog "renaming download_progress.log to download_progress_`date`.log"
mv $DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/download_progress.log "$DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/download_progress_`date`.log"
printlog "logs cleared: renamed download_progress.log to download_progress_`date`.log"
if [ -n != "`ls $DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/badfiles/`" ] ; then
rm -f $WORKINGDIR/logs/badfiles/* && printlog "files cleared"
else printlog "no files to clear"
fi
fi
if [ "$ARG" == "-t" ] || [ "$ARG" == "--test" ] ; then
echo "Test mode!"
TEST="1"
UNKNOWN_OPT="0"
fi
if [ "$UNKNOWN_OPT" == "1" ] ; then
echo "unkown option!" && return 1
fi
fi
done
###FUNCTIONS
pckmgrchk () {
which rpm
if [ "$?" == "0" ] ; then
PKGMAN="rpm"
printlog "Package Manager: $PKGMAN"
fi
which apt
if [ "$?" == "0" ] ; then
PKGMAN="apt"
printlog "Package Manager: $PKGMAN"
fi
if [ -z "$PKGMAN" ] ; then
printlog "Package manager not recognized! Please make sure rpm or apt are installed and working!" && return 1
fi
}
depcheck () {
which "$1" >> /dev/null
if [ "$?" != "0" ] ; then
printlog "This program requires $1 to be installed in order to run properly. You can install it by typing:"
if [ "$PKGMAN" == "apt" ] ; then
printlog "sudo apt-get install $1"
INSACTN="apt-get"
elif [ "$PKGMAN" == "rpm" ] ; then
printlog "yum install $1"
INSACTN="yum"
else printlog "Package manager not recognized! Please make sure rpm or apt are installed and working!" && return 1
fi
printlog "Or we can try to install it right now. Would you like to? (Y/N)"
UINPUT=0
read UINPUT # grab first letter of input, upper or lower it, and check for THAT input. Shorter.
until [ $UINPUT == "exit" ] ; do
if [ $UINPUT == "Y" ] || [ $UINPUT == "y" ] || [ $UINPUT == "yes" ] || [ $UINPUT == "Yes" ] || [ $UINPUT == "YES" ] ; then
printlog "Installing $1..."
sudo $INSACTN install $1
UINPUT="exit"
elif [ $UINPUT == "N" ] || [ $UINPUT == "n" ] || [ $UINPUT == "no" ] || [ $UINPUT == "No" ] || [ $UINPUT == "NO" ] ; then
printlog "Package install cancelled." && return 0
else echo "I beg your pardon?"
fi
done
else printlog "Dependency check of $1 success"
fi }
progupdatechk () {
if [ -f "$2" ] ; then
printlog "File already in download directory! Deleting new file and skipping..."
rm -f $1
else
printlog "Moving $1 to $2..."
mv "$1" "$2"
fi
}
progdownload () {
mkdir "$DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/$2"
NOW=$(date +"%Y_%m_%d") && printlog "$DOWNLOAD_SET started at $NOW"
MYNUM="0"
until [ "$URL" == "exit" ] ; do
echo "" >> $LOGFILE
MYNUM=$((MYNUM + 1))
URL="$(sed ''$MYNUM'q;d' $1)" && printlog "$MYNUM) downloading $URL"
mkdir "$WORKINGDIR/tmp" 2> /dev/null
cd "$WORKINGDIR/tmp"
lynx -cmd_script="$WORKINGDIR/support/mgcmd.txt" --accept-all-cookies $URL
FILE=`(ls | head -n 1)`
if [ -z "$FILE" ] ; then
printlog "Download incomplete: $URL" "failed"
else
EXT=`echo -n $FILE | tail -c 3`
BAD=`cat "$WORKINGDIR/support/whiteexts.txt" | grep -v "#" | grep -cim1 "$EXT"`
until [ -z "$FILE" ] ; do
if [ $BAD == "0" ] ; then
progupdatechk "$FILE" "$DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/badfiles/$FILE"
printlog "Download $FILE is of unknown type. $URL" "failed"
else
if [ -z $DOWNLOADER ] ; then
progupdatechk "$FILE" "$DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/$DOWNLOAD_SET/$FILE"
else
progupdatechk "$FILE" "$DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/$DOWNLOAD_SET/${FILE%%.*}($DOWNLOADER).${FILE#*.}"
fi
printlog "Download success of $FILE from $URL"
fi
FILE=`(ls | head -n 1)`
done
fi
cd "$WORKINGDIR"
done
}
###PROGRAM START
cd $WORKINGDIR
echo "" >> $LOGFILE
printlog "lpd started at `date`"
###DEPENDENCY CHECK
pckmgrchk
depcheck wget
depcheck lynx
###MENU
mkdir "$DOWNLOAD_DIRECTORY/`date +%Y-%m`"
mkdir "$DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/badfiles"
if [ -z $DOWNLOAD_SET ] ; then
until [ "$DOWNLOAD_SET" == "exit" ] ; do
UNKNOWN_OPT="1"
echo "Which batch would you like to download?"
echo "all antivirus creative utilities office clear_logs configure help exit"
# DOWNLOAD_SELECTION="All Majorgeeks Wgets Antivirus Creative Utilities Office Clear_logs Configure Exit"
# select opt in $DOWNLOAD_SELECTION; do
# DOWNLOAD_SET="$opt"
# done
read DOWNLOAD_SET
if [ "$DOWNLOAD_SET" == "all" ] ; then
UNKNOWN_OPT="0"
printlog "Downloading $DOWNLOAD_SET ..."
progdownload "$WORKINGDIR/wgetadrs.txt" "unsorted"
progdownload "$WORKINGDIR/support/antivirus.txt" "antivirus"
progdownload "$WORKINGDIR/support/creative.txt" "creative"
progdownload "$WORKINGDIR/support/utilities.txt" "utilities"
progdownload "$WORKINGDIR/support/office.txt" "office"
fi
if [ "$DOWNLOAD_SET" == "antivirus" ]; then
UNKNOWN_OPT="0"
printlog "Downloading $DOWNLOAD_SET ..."
progdownload "$WORKINGDIR/support/antivirus.txt" "antivirus"
fi
if [ "$DOWNLOAD_SET" == "creative" ]; then
UNKNOWN_OPT="0"
printlog "Downloading $DOWNLOAD_SET ..."
progdownload "$WORKINGDIR/support/creative.txt" "creative"
fi
if [ "$DOWNLOAD_SET" == "utilities" ]; then
UNKNOWN_OPT="0"
printlog "Downloading $DOWNLOAD_SET ..."
progdownload "$WORKINGDIR/support/utilities.txt" "utilities"
fi
if [ "$DOWNLOAD_SET" == "office" ]; then
UNKNOWN_OPT="0"
printlog "Downloading $DOWNLOAD_SET ..."
progdownload "$WORKINGDIR/support/office.txt" "office"
fi
if [ "$DOWNLOAD_SET" == "clear_logs" ]; then
UNKNOWN_OPT="0"
printlog "renaming download_progress.log to download_progress_`date`.log"
mv $DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/download_progress.log "$DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/download_progress_`date`.log"
printlog "logs cleared: renamed download_progress.log to download_progress_`date`.log"
if [ -n != "`ls $DOWNLOAD_DIRECTORY/$DOWNLOAD_DATE/badfiles/`" ] ; then
rm -f $WORKINGDIR/logs/badfiles/* && printlog "files cleared"
else printlog "no files to clear"
fi
fi
if [ "$DOWNLOAD_SET" == "configure" ]; then
UNKNOWN_OPT="0"
echo "Please enter the path to the folder you would like your new downloads to be dropped off:"
read DOWNLOAD_DIRECTORY
export DOWNLOAD_DIRECTORY
fi
if [ "$DOWNLOAD_SET" == "help" ]; then
UNKNOWN_OPT="0"
echo "Usage: pdu.sh -i [USER'S INITIALS]... -s [DOWNLOAD SET]... -c -h"
echo " -h, --help prints this help message"
echo " -i, --initials Specifies initials to be appended to file names"
echo " -s, --set allows you to choose from a predefined set of downloads"
echo " -c, --configure walks you through the configuration process"
echo " -r, --reset resets all logs"
echo "Welcome to the Program Downloader Utility (PDU). This program was created to automatically download programs from the internet using the terminal-based Lynx web browser."
echo "Configuration files can be found in the support/ directory. Every URL given in the categories will be downloaded into a matching subfolder. At this time, only websites from majorgeeks.com are supported, and you will want to put the download page in line, NOT the general information page. This allows you to choose which mirror you'd like to download. For all other direct downloads, you can put them in 'unsorted', and they will be downloaded via wget."
fi
if [ "$DOWNLOAD_SET" == "exit" ]; then
UNKNOWN_OPT="0"
printlog "Goodbye!"
DOWNLOAD_SET="exit"
fi
if [ "$UNKNOWN_OPT" == "1" ] ; then
echo "I beg your pardon?"
fi
done
else
printlog "Downloading $DOWNLOAD_SET ..."
progdownload "$WORKINGDIR/support/$DOWNLOAD_SET.txt" "$DOWNLOAD_SET"
fi
| qbit/Lynx-Program-Downloader | lpd.sh | Shell | gpl-2.0 | 11,074 |
#!/bin/bash
#
#FILE
# /usr/sbin/container-rsync.sh
# $Id$
#
#PURPOSE
# Test alpha dev script for unxsVZ hot spare container sync
# Keep a container on this host rsync'd with a source container on a remote host
#
#AUTHOR/LEGAL
# (C) 2009- Gary Wallis for Unixservice. GPLv2 Licensed.
#
#
if [ "$1" == "" ] || [ "$2" == "" ] || [ "$3" == "" ];then
echo "usage $0 <local clone veid> <remote production veid> <remote-server hostname/ip>";
exit 0;
fi
ssh $3 "ls /vz/private/$2" > /dev/null 2>&1;
if [ $? != 0 ]; then
echo "no $3 /vz/private/$2";
exit 1;
fi
grep -l "\-clone" /etc/vz/conf/$1.conf > /dev/null 2>&1;
if [ $? != 0 ]; then
echo "local veid:$1 does not appear to be a clone";
exit 1;
fi
ls /vz/private/$1 > /dev/null 2>&1;
if [ $? != 0 ]; then
echo "no local /vz/private/$1";
exit 1;
fi
#/usr/bin/rsync -avxlH --dry-run\
/usr/bin/rsync -avxlH \
--delete \
--exclude "/proc" --exclude "/sys" --exclude "/dev" --exclude "/tmp" \
--exclude "/var/run" --exclude "/etc/sysconfig/network" --exclude "/etc/sysconfig/network-scripts/" \
--exclude "/var/lock" --exclude "etc/hosts" --exclude "etc/resolv.conf" \
--exclude "/var/lib/mlocate/mlocate.db" --exclude "/etc/rc.d/rc6.d/S00vzreboot" \
--exclude "/var/log/" --exclude "/var/lib/random-seed" --exclude "/var/spool/" \
--exclude "/etc/aliases.db" \
-e 'ssh -c blowfish -ax' \
$3:/vz/private/$2/* \
/vz/private/$1
| unxs0/unxsVZ | tools/openvz/container-rsync.sh | Shell | gpl-2.0 | 1,414 |
set -e # Abort on error
./bin/trailing_whitespace_test.sh
./bin/indentation.sh
./bin/hlint_health.sh
./bin/sanity.sh
| NorfairKing/the-notes | bin/code_health.sh | Shell | gpl-2.0 | 117 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2011-2018 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# File creation events (such as the first time we run a command), should not
# result in the monitor re-running an autoupdate.
. ./tup.sh
check_monitor_supported
monitor --autoupdate > .tup/.monitor.output 2>&1
tup flush
cat > main.c << HERE
#include <stdio.h>
int main(void)
{
return 0;
}
HERE
cat > Tupfile << HERE
: foreach *.c |> gcc -c %f -o %o |> %B.o
: *.o |> gcc %f -o %o |> prog
HERE
tup flush
check_exist prog
if ! cat .tup/.monitor.output | grep Updated | wc -l | grep 1 > /dev/null; then
sleep 0.5
if ! cat .tup/.monitor.output | grep Updated | wc -l | grep 1 > /dev/null; then
echo "Monitor output:" 1>&2
cat .tup/.monitor.output 1>&2
echo "Error: tup should only update once" 1>&2
exit 1
fi
fi
eotup
| jonatanolofsson/tup | test/t7052-autoupdate8.sh | Shell | gpl-2.0 | 1,479 |
#!/bin/sh
#Install Docker
#sudo which wget
#sudo apt-get update $ sudo apt-get install wget
#wget -qO- https://get.docker.com/ | sh
#echo "Docker Setup complete"
#install NodeJs
#sudo apt-get update
#sudo apt-get install nodejs
#sudo apt-get install npm
#echo "NodeJS setup Complete"
###########################
# Start Docker
###########################
sudo chmod 777 ../API/DockerTimeout.sh
sudo chmod 777 ../API/Payload/script.sh
sudo chmod 777 ../API/Payload/javaRunner.sh
sudo chmod 777 UpdateDocker.sh
sudo service docker restart
sudo ./UpdateDocker.sh
| ManoloBrn/Open-CloudCompiler | Setup/Install.sh | Shell | gpl-2.0 | 567 |
#!/bin/bash -x
echo "[Toromino's Scripts] TCleaner will clean your Downloads folder!"
mkdir $HOME/Downloads/TCleaner/
mkdir $HOME/Downloads/TCleaner/Music/
mkdir $HOME/Downloads/TCleaner/Pictures/
mkdir $HOME/Downloads/TCleaner/Compressed/
mkdir $HOME/Downloads/TCleaner/Packages/
mkdir $HOME/Downloads/TCleaner/Shell-Scripts/
mkdir $HOME/Downloads/TCleaner/Windows-Wine/
mkdir $HOME/Downloads/TCleaner/Videos/
mkdir $HOME/Downloads/TCleaner/Other/
mv $HOME/Downloads/*.mp3 $HOME/Downloads/TCleaner/Music/
mv $HOME/Downloads/*.ogg $HOME/Downloads/TCleaner/Music/
mv $HOME/Downloads/*.wav $HOME/Downloads/TCleaner/Music/
mv $HOME/Downloads/*.jpg $HOME/Downloads/TCleaner/Pictures/
mv $HOME/Downloads/*.png $HOME/Downloads/TCleaner/Pictures/
mv $HOME/Downloads/*.zip $HOME/Downloads/TCleaner/Compressed/
mv $HOME/Downloads/*.tar $HOME/Downloads/TCleaner/Compressed/
mv $HOME/Downloads/*.tar.gz $HOME/Downloads/TCleaner/Compressed/
mv $HOME/Downloads/*.run $HOME/Downloads/TCleaner/Shell-Scripts/
mv $HOME/Downloads/*.sh $HOME/Downloads/TCleaner/Shell-Scripts/
mv $HOME/Downloads/*.deb $HOME/Downloads/TCleaner/Packages/
mv $HOME/Downloads/*.rpm $HOME/Downloads/TCleaner/Packages/
mv $HOME/Downloads/*.mp4 $HOME/Downloads/TCleaner/Videos/
mv $HOME/Downloads/*.avi $HOME/Downloads/TCleaner/Videos/
mv $HOME/Downloads/*.exe $HOME/Downloads/TCleaner/Windows-Wine/
mv $HOME/Downloads/*.msi $HOME/Downloads/TCleaner/Windows-Wine/
mv $HOME/Downloads/*.bat $HOME/Downloads/TCleaner/Windows-Wine/
mv $HOME/Downloads/* $HOME/Downloads/TCleaner/Other/
| Toromino/TCleaner | TCleaner.sh | Shell | gpl-2.0 | 1,547 |
# No bug for the moment, want to raise libtool issue in general.
EAPI="5"
source "${PORTDIR}/${CATEGORY}/${PN}/${BASH_SOURCE[0]##*/}"
EAPI="5"
inherit libtool
eval "
src_prepare() {
$(function_body src_prepare)
elibtoolize
# Fool make to use g-ir-scanner from PATH.
touch gtk/g-ir-scanner || die
}"
| chewi/cross-boss | overlay/x11-libs/gtk+/ebuild:3.sh | Shell | gpl-2.0 | 307 |
#!/bin/sh
cd "$( dirname "${BASH_SOURCE[0]}" )"
PROJECT="org.kde.activeWindowControl"
NAME="plasma_applet_$PROJECT"
# svn checkout svn://anonsvn.kde.org/home/kde/trunk/l10n-kf5/scripts
# export PATH=/path/to/l10n-kf5/scripts:$PATH
extract-messages.sh
sed -e "s,Report-Msgid-Bugs-To: http://bugs.kde.org,Report-Msgid-Bugs-To: https://github.com/kotelnik/plasma-applet-active-window-control/issues," -i "po/$NAME.pot"
echo "Merging translations"
catalogs=`find ./po -name '*.po'`
for cat in $catalogs; do
echo $cat
msgmerge -o $cat.new $cat po/$NAME.pot
mv $cat.new $cat
done
echo "Done merging translations"
| kotelnik/plasma-applet-active-window-control | translations/update-translations.sh | Shell | gpl-2.0 | 618 |
#!/bin/sh
echo "Starting thiolsTemp app"
echo "Creating scanning thread"
sudo hcitool -i hci0 lescan --passive --duplicates >> /dev/null &
echo "Creating parser scanning thread"
sudo hcidump -i ^Ci0 -R |./parser/bleParser
| Slyde/thiolsTemp | thiosTemp.sh | Shell | gpl-2.0 | 227 |
###
# Librairies de gestions des containeurs Docker
# ==============================================================================
# @package olixsh
# @author Olivier
##
###
# Paramètres
##
OLIX_DOCKER_NAME=
###
# Vérifie si le binaire est installé
##
function Docker.installed()
{
debug "Docker.installed ()"
System.binary.exists 'docker'
return $?
}
###
# Vérifie si on peut parler avec le démon Docker
##
function Docker.running()
{
debug "Docker.daemon ()"
docker info > /dev/null 2>&1
return $?
}
###
# Vérifie si un containeur existe
# @param $1 : Nom du containeur
##
function Docker.Container.exists()
{
debug "Docker.Container.exists ($1)"
docker inspect $1 > /dev/null 2>&1
return $?
}
###
# Vérifie si un containeur est en cours d'éxécution
# @param $1 : Nom du containeur
##
function Docker.Container.running()
{
debug "Docker.Container.running ($1)"
local RUNNING
RUNNING=$(docker inspect --format="{{.State.Running}}" $1 2>&1)
[[ "${RUNNING}" == "true" ]] && return 0
return 1
}
| sabinus52/olixsh | utils/docker.sh | Shell | gpl-2.0 | 1,070 |
#/bin/bash
echo "Building help..."
basedir=$(pwd)
cd promet/help
rm -r $BUILD_DIR
mkdir $BUILD_DIR
cp help.db $BUILD_DIR
$SQLITE3 $BUILD_DIR/help.db "delete from DOCUMENTS where TYPE<>'W';delete from HISTORY;delete from REPORTS;delete from ACCHISTORY;delete from TEMPLATES;delete from DOCPAGES;delete from DELETEDITEMS;delete from THUMBNAILS;delete from TASKS;delete from OPTIONS;delete from MESSAGEIDX;delete from MESSAGES;delete from ORDERS;vacuum;"
cd $BUILD_DIR
target=help
targetfile=$target-$BUILD_VERSION.zip
targetcur=$target-current.zip
zip -rq $basedir/promet/setup/output/$targetfile .
cd $basedir/promet/output/$TARGET_CPU-$TARGET_OS
if [ "$1" = "upload" ]; then
. ../../setup/build-tools/doupload.sh $targetfile $targetcur
fi
cd $basedir
| cutec-chris/promet-erp | promet/help/build.sh | Shell | gpl-2.0 | 753 |
#!/bin/sh
exitnode=0
# Exit Code
function finish {
echo "[acs] Stopping Automatic Cinema Server"
exitnode=1
}
trap finish 2 3 8 15
# startup mongo & node
while (( $exitnode == 0 ))
do
./AutomaticCinemaServer
done
| urshofer/automatic-cinema-server | start.sh | Shell | gpl-2.0 | 220 |
#!/bin/sh
./scripts/generator.py > Good-songs-to-play.ad
sudo docker run --privileged=true -v $PWD:/documents asciidoctor/docker-asciidoctor asciidoctor -v -a stylesheet=stylesheets/minimal.css Good-songs-to-play.ad -o index.html
sudo docker run --privileged=true -v $PWD:/documents asciidoctor/docker-asciidoctor asciidoctor -r asciidoctor-pdf -b pdf Good-songs-to-play.ad -o index.pdf
sudo docker run --privileged=true -v $PWD:/documents asciidoctor/docker-asciidoctor asciidoctor -v -a stylesheet=stylesheets/minimal.css SetList.ad -o setlist.html
| TheVasin/good-songs-to-play | scripts/publish_with_docker.sh | Shell | gpl-2.0 | 551 |
#!/bin/sh
# Start telnet server
eval `flash TELNET_ACCESS_PORT WEB_WAN_ACCESS_PORT TR069_ENABLED`
if [ -x "/usr/sbin/telnetd" ]; then
echo -n $TELNET_ACCESS_PORT > /var/telnet_port
/usr/sbin/telnetd -l /bin/cli -p $TELNET_ACCESS_PORT
fi
# Start debug telnet server
#eval `flash DEBUG`
#if [ $DEBUG = 1 ]; then
# telnetd -p 1023
#fi
# start web server
echo -n $WEB_WAN_ACCESS_PORT > /var/websv_port
ln -sf /web /var/www
watcher /bin/websv -p $WEB_WAN_ACCESS_PORT &
# Daemon to monitor reset button and reset config
# btnreset &
# Remote tr069 management
if [ "$TR069_ENABLED" = 'Enabled' ]; then
echo 'Starting TR069 agent'
watcher /bin/cpeagent -F /etc/tr069/ -W /tmp/ &
fi
# Sysctl parameters
sysctl.sh
# Ready
ledctl 4
| xandlom/keenetic-firmware | package/base-scripts/all/services.sh | Shell | gpl-2.0 | 732 |
#!/usr/bin/env bash
# Script information
SCRIPT_NAME="WordPress Plugin The Test Suite Installer"
SCRIPT_VERSION="1.0.1"
# Scripts defining custom functions
source $(dirname $0)/include/download.sh
source $(dirname $0)/include/info.sh
source $(dirname $0)/include/downloadWPCLI.sh
source $(dirname $0)/include/downloadCodeception.sh
# Parse arguments
CONFIGURATION_FILE_PATH="settings.cfg"
REINSTALL_PROJECT_FILES=0
while getopts “hpvt:c:” OPTION
do
case $OPTION in
h)
printUsage
exit 1
;;
v)
printVersion
exit 1
;;
p)
echo "Reinstalling project files."
REINSTALL_PROJECT_FILES=1
;;
c)
CONFIGURATION_FILE_PATH=$OPTARG
;;
?)
echo option not found
printUsage
exit 1
;;
esac
done
# Configuration File
if [ ! -f "$CONFIGURATION_FILE_PATH" ]; then
echo The setting file could not be loaded.
exit 1
fi
source "$CONFIGURATION_FILE_PATH"
echo "Using the configuration file: $CONFIGURATION_FILE_PATH"
# Variables
WORKING_DIR=$(pwd)
if [[ -z "$PROJECT_DIR" ]]; then
PROJECT_DIR=$(cd "$WORKING_DIR/.."; pwd)
fi
# convert it to an absolute path
PROJECT_DIR="$(cd "$(dirname "$PROJECT_DIR")"; pwd)/$(basename "$PROJECT_DIR")"
cd "$WORKING_DIR"
TEMP=$([ -z "${TEMP}" ] && echo "/tmp" || echo "$TEMP")
WP_CLI="$TEMP/wp-cli.phar"
CODECEPT="$TEMP/codecept.phar"
C3="$TEMP/c3.php"
TEMP_PROJECT_DIR="$TEMP/$PROJECT_SLUG"
# Fix: Fatal error: Class 'WP_REST_Server' not found in .../wordpress-tests-lib/includes/spy-rest-server.php on line 3
if echo $WP_VERSION | grep -E '[0-9]+\.[0-9]+(\.[0-9]+)?' > /dev/null
then
WP_TESTS_TAG="tags/$WP_VERSION"
else
# http serves a single offer, whereas https serves multiple. we only want one
download http://api.wordpress.org/core/version-check/1.7/ "$TEMP/wp-latest.json"
if [[ $(uname -s) == 'Darwin' ]]; then
SED_REGEXOPTION='-E' #For Max OSX
else
SED_REGEXOPTION='-r' #Other OSes
fi
LATEST_VERSION=$(sed $SED_REGEXOPTION 's/.*"version":"([0-9]+\.[0-9]+(\.[0-9]+)?)".*$/\1/g' "$TEMP/wp-latest.json")
if [[ -z "$LATEST_VERSION" ]]; then
echo "The latest WordPress version could not be found. Script exiting."
exit 1
fi
WP_TESTS_TAG="tags/$LATEST_VERSION"
fi
# convert any relative path or Windows path to linux/unix path to be usable for some path related commands such as basename
if [ ! -d "$WP_TEST_DIR" ]; then
mkdir -p "$WP_TEST_DIR"
fi
cd "$WP_TEST_DIR"
WP_TEST_DIR=$(pwd)
cd "$WORKING_DIR"
echo "Project Dir: $PROJECT_DIR"
echo "Working Dir: $WORKING_DIR"
echo "WP Test Dir: $WP_TEST_DIR"
echo "WP TESTS TAG: $WP_TESTS_TAG"
# Exit on errors, xtrace
# set -x
# set -ex
set -e
# On Travis, the working directory looks like:
# /home/travis/build/michaeluno/sample-tdd-plugin
installWordPress() {
# Remove the destination folder if exists to perform a clean install
# If the project directory path is the test site directory, which is the case of on Travis CI, do not delete it.
if [ ! "$PROJECT_DIR" == "$WP_TEST_DIR" ]; then
rm -rf "$WP_TEST_DIR"
fi
# We use wp-cli command
php "$WP_CLI" core download --force --path="$WP_TEST_DIR"
# Change directory to the test WordPres install directory.
cd "$WP_TEST_DIR"
rm -f wp-config.php
dbpass=
if [[ $DB_PASS ]]; then
echo 'db pass is not empty'
dbpass=--dbpass="${DB_PASS}"
fi
php "$WP_CLI" core config --dbname=$DB_NAME --dbuser="$DB_USER" $dbpass --extra-php <<PHP
define( 'WP_DEBUG', true );
define( 'WP_DEBUG_LOG', true );
\$table_prefix = '$WP_TABLE_PREFIX';
PHP
# Renew the database table
setup_database_table
# Create/renew the database - if the environment variable WP_MULTISITE is set, install multi site network Wordpress.
if [[ $WP_MULTISITE = 1 ]]; then
php "$WP_CLI" core multisite-install --url="$WP_URL" --title="$WP_SITE_TITLE" --admin_user="$WP_ADMIN_USER_NAME" --admin_password="$WP_ADMIN_PASSWORD" --admin_email="$WP_ADMIN_EMAIL"
else
php "$WP_CLI" core install --url="$WP_URL" --title="$WP_SITE_TITLE" --admin_user="$WP_ADMIN_USER_NAME" --admin_password="$WP_ADMIN_PASSWORD" --admin_email="$WP_ADMIN_EMAIL"
fi
}
setup_database_table(){
# If the database table already exists, drop it.
# if [[ -z "$DB_PASS" ]]; then
# DB_PASS="\"\""
# fi
dbpass=
if [[ $DB_PASS ]]; then
echo 'db pass is not empty'
dbpass="-p${DB_PASS}"
fi
# RESULT=`mysql -u$DB_USER -p$DB_PASS --skip-column-names -e "SHOW DATABASES LIKE '$DB_NAME'"`
RESULT=`mysql -u$DB_USER $dbpass --skip-column-names -e "SHOW DATABASES LIKE '$DB_NAME'"`
if [ "$RESULT" == "$DB_NAME" ]; then
php "$WP_CLI" db drop --yes
fi
# mysql -u $DB_USER -p$DB_PASS -e --f "DROP $DB_NAME"
# mysqladmin -u$#DB_USER -p$DB_PASS drop -f $DB_NAME
php "$WP_CLI" db create
}
# Installs WordPress test suite utilities
installWPTestSuite() {
# portable in-place argument for both GNU sed and Mac OSX sed
if [[ $(uname -s) == 'Darwin' ]]; then
local ioption='-i .bak'
else
local ioption='-i'
fi
# Download WordPress unit test suite library
local WP_TEST_SUITES_TEMP_DIR="$TEMP/wordpress-tests-lib"
svn export --force --quiet "https://develop.svn.wordpress.org/${WP_TESTS_TAG}/tests/phpunit/includes/" "$WP_TEST_SUITES_TEMP_DIR/includes"
svn export --force --quiet "https://develop.svn.wordpress.org/${WP_TESTS_TAG}/tests/phpunit/data/" "$WP_TEST_SUITES_TEMP_DIR/data"
# if [[ $WP_MULTISITE = 1 ]]; then
# may download multisite.xml for phpUnit
# fi
# Set up WordPress testing suite library
cd "$WP_TEST_DIR"
# Some paths written in wp-tests-config.php needs to be modified and on Windows systems, it is difficult to modify them to absolute path.
# so in order to use dirname(), the test suites library needs to be placed close to the test site directory.
local WP_TEST_SUITES_DIR="$(pwd)/wordpress-tests-lib"
# Copy the downloaded files to the test WordPress site directory
cp -r "$WP_TEST_SUITES_TEMP_DIR/" "$WP_TEST_DIR"
# Make sure the configuration file does not exist.
if [ -f "$WP_TEST_SUITES_DIR/wp-tests-config.php" ]; then
rm -f "$WP_TEST_SUITES_DIR/wp-tests-config.php"
fi
download https://develop.svn.wordpress.org/${WP_TESTS_TAG}/wp-tests-config-sample.php "$WP_TEST_SUITES_DIR/wp-tests-config.php"
# Edit the tests configuration file.
cd "$WP_TEST_SUITES_DIR"
sed $ioption "s:dirname( __FILE__ ) . '/src/':dirname( dirname( __FILE__ ) ) . '/':" wp-tests-config.php
sed $ioption "s/youremptytestdbnamehere/$DB_NAME/" wp-tests-config.php
sed $ioption "s/yourusernamehere/$DB_USER/" wp-tests-config.php
sed $ioption "s/yourpasswordhere/$DB_PASS/" wp-tests-config.php
sed $ioption "s|localhost|${DB_HOST}|" wp-tests-config.php
# Set the environment variable which is accessed from the unit test bootstrap script.
export WP_TESTS_DIR="$WP_TEST_SUITES_DIR"
}
# Uninstalls default plugins
uninstallPlugins() {
cd "$WP_TEST_DIR"
php "$WP_CLI" plugin uninstall akismet
php "$WP_CLI" plugin uninstall hello
}
# Evacuates plugin project files.
# This needs to be done before installing the Wordpress files.
# When the test WordPress site needs to be placed under the project directory such as on Travis CI,
# simply copying the entire project files into the sub-directory of itself is not possible.
# so evacuate the project files to a temporary location first and then after installing WordPress, copy them back to the WordPress plugin directory.
evacuateProjectFiles() {
# Make sure no old file exists.
if [ -d "$TEMP_PROJECT_DIR" ]; then
rm -rf "$TEMP_PROJECT_DIR"
fi
# The `ln` command gives "Protocol Error" on Windows hosts so use the cp command.
# The below cp command appends an asterisk to drop hidden items especially the .git directory but in that case, the destination directory needs to exist.
mkdir -p "$TEMP_PROJECT_DIR"
# Drop hidden files from being copied
cp -r "$PROJECT_DIR/"* "$TEMP_PROJECT_DIR" 2>/dev/null
}
# Installs the project plugin
installPlugins() {
# Install user specified plugins
if [[ $WP_MULTISITE = 1 ]]; then
local OPTION_ACTIVATE="--activate-network"
else
local OPTION_ACTIVATE="--activate"
fi
for _INSTALL_PLUGIN in "${INSTALL_PLUGINS[@]}"
do :
php "$WP_CLI" plugin install "$_INSTALL_PLUGIN" $OPTION_ACTIVATE
done
}
## This Project Plugin
installProjectFiles() {
# Make sure no old file exists.
if [ -d "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG" ]; then
# Directly removing the directory sometimes fails saying it's not empty. So move it to a different location and then remove.
# mv -f "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG" "$TEMP/$PROJECT_SLUG"
# rm -rf "$TEMP/$PROJECT_SLUG"
# Sometimes moving fails so remove the directory in case.
rm -rf "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG"
fi
# The `ln` command gives "Protocol Error" on Windows hosts so use the cp command.
# The below cp command appends an asterisk to drop hidden items especially the .git directory but in that case, the destination directory needs to exist.
mkdir -p "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG"
# drop hidden files from being copied
# cp -r "$PROJECT_DIR/"* "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG"
cp -r "$TEMP_PROJECT_DIR" "$WP_TEST_DIR/wp-content/plugins"
# wp cli command
cd $WP_TEST_DIR
if [[ $WP_MULTISITE = 1 ]]; then
php "$WP_CLI" plugin activate --network $PROJECT_SLUG
else
php "$WP_CLI" plugin activate $PROJECT_SLUG
fi
echo Installed project files.
}
installCodeception() {
# Make sure no old test files exist
# if [ -d "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/tests" ]; then
# rm -rf "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/tests"
# fi
# Rename `codeception.dist.yaml` temporarily to avoid an error.
# @see https://github.com/Codeception/Codeception/issues/5253
CONFIG_DIST_FILE="$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/codeception.dist.yml"
CONFIG_DIST_FILE_BACKUP="$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/codeception.dist.yml.bak"
mv "$CONFIG_DIST_FILE" "$CONFIG_DIST_FILE_BACKUP"
# Run the bootstrap to generate necessary files.
echo Creating Codeception configuration files.
php "$CODECEPT" bootstrap "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/"
# Restore the global configuration distribution file. (codeception.dist.yml)
mv "$CONFIG_DIST_FILE_BACKUP" "$CONFIG_DIST_FILE"
# @deprecated This seems to be redundant as the entire `tests` directory is copied.
# Copy bootstrap scripts.
# cp -r "$PROJECT_DIR/test/tests/acceptance/_bootstrap.php" "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/tests/acceptance/_bootstrap.php"
# cp -r "$PROJECT_DIR/test/tests/functional/_bootstrap.php" "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/tests/functional/_bootstrap.php"
# cp -r "$PROJECT_DIR/test/tests/unit/_bootstrap.php" "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/tests/unit/_bootstrap.php"
# Create an acceptance setting file.
FILE="$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/tests/acceptance.suite.yml"
cat <<EOM >$FILE
modules:
config:
PhpBrowser:
url: '$WP_URL'
EOM
# Create a Codeception global setting file
FILE="$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/codeception.yml"
cat <<EOM >$FILE
modules:
config:
Db:
dsn: 'mysql:host=$DB_HOST;dbname=$DB_NAME'
user: '$DB_USER'
password: '$DB_PASS'
EOM
# Make it load c3.php
# cp -r "$C3" "$WP_TEST_DIR/c3.php"
# cd "$WP_TEST_DIR"
# sed -i "s:<?php:<?php require( dirname( __FILE__ ) . '/c3.php' );:" index.php
# Create sub-directories used by c3
# mkdir -p "$WP_TEST_DIR/report"
# mkdir -p "$WP_TEST_DIR/c3tmp"
# Dump sql database file.
cd "$WP_TEST_DIR"
mkdir -p "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/tests/_data/"
php "$WP_CLI" db export "$WP_TEST_DIR/wp-content/plugins/$PROJECT_SLUG/test/tests/_data/dump.sql"
}
# Download necessary applications
downloadWPCLI "$WP_CLI"
downloadCodeception "$CODECEPT"
evacuateProjectFiles
# Install components
if [[ REINSTALL_PROJECT_FILES -ne 1 ]]; then
installWordPress
echo Installed WordPress
installWPTestSuite
echo Installed WordPress test suite utilities
uninstallPlugins
echo Installed
installPlugins
fi
installProjectFiles
installCodeception
# Let the user know it's finished
if [[ REINSTALL_PROJECT_FILES -eq 1 ]]; then
echo Reinstallation has been completed!
else
echo Installation has been completed!
fi
| michaeluno/custom-scrollbar | test/install.sh | Shell | gpl-2.0 | 13,448 |
if [ -d tmp ]; then
echo -n "Restarting Nginx ... "
rm -f tmp/restart.txt
touch tmp/restart.txt
fi
| taq/traquitana | config/nginx.sh | Shell | gpl-2.0 | 111 |
#!/bin/sh
logger "Called $0"
exit 0
| olvrlrnz/exec_process | scripts/ret0.sh | Shell | gpl-2.0 | 37 |
#!/bin/bash
while test 1=1
do
./webservice.sh >log.txt 2>error_log.txt
echo "Restarting.. `date`"
done
| LUMII-AILab/Webservices | production.sh | Shell | gpl-3.0 | 107 |
#!/usr/bin/env bash
mkdir -p bundle
mkdir -p autoload
mkdir -p colors
# Install pathogen.
pathogenFile='autoload/pathogen.vim'
echo $pathogenFile
if [ -f $pathogenFile ]
then
echo "Pathogen.vim exists, out with the old and in with the new."
rm $pathogenFile
fi
wget -P autoload https://raw.githubusercontent.com/tpope/vim-pathogen/master/autoload/pathogen.vim
curl -fLo ~/.config/nvim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# Install critical color scheme (vim does not work without this, I swear).
molokai='colors/molokai.vim'
if [ -f $molokai ]
then
echo $molokai"exists, out with the old and in with the new."
rm $molokai
fi
# Install vim-plug magic.
wget -P colors https://raw.githubusercontent.com/tomasr/molokai/master/colors/molokai.vim
nvim +PlugInstall
| dinnerTime/vimrc | setup_env.sh | Shell | gpl-3.0 | 856 |
#!/system/bin/sh
export TMPDIR=/tmp/
mktempS() {
v=$TMPDIR/tmp.$RANDOM
mkdir -p $v
echo $v
}
if ! which mktemp;then
alias mktemp=mktempS
fi
if [ "$#" == 0 ];then
echo "Usage: $0 <original boot.img> [eng|user]"
exit 1
fi
set -e
if [ -f "$2" ];then
scr="$(readlink -f "$2")"
used_scr=1
else
scr="$PWD/changes.sh"
fi
cleanup() {
rm -Rf "$bootimg_extract" "$d2"
}
trap cleanup EXIT
#Ensure binaries are executables
scriptdir="$(dirname "$(readlink -f "$0")")"
for i in sepolicy-inject sepolicy-inject-v2 bootimg-repack bootimg-extract strip-cpio;do
chmod 0755 $scriptdir/bin/$i || true
done
startBootImgEdit() {
f="$(readlink -f "$1")"
homedir="$PWD"
bootimg_extract="$(mktemp -d)"
cd "$bootimg_extract"
"$scriptdir/bin/bootimg-extract" "$f"
[ -f chromeos ] && CHROMEOS=1
d2="$(mktemp -d)"
cd "$d2"
if [ -f "$bootimg_extract"/ramdisk.gz ];then
gunzip -c < "$bootimg_extract"/ramdisk.gz |cpio -i
gunzip -c < "$bootimg_extract"/ramdisk.gz > ramdisk1
elif [ -f "$bootimg_extract"/ramdisk.lzma ];then
lzcat "$bootimg_extract"/ramdisk.lzma |cpio -i
lzcat "$bootimg_extract"/ramdisk.lzma > ramdisk1
else
echo "Unknown ramdisk format"
cd "$homedir"
rm -Rf "$bootimg_extract" "$d2"
exit 1
fi
INITRAMFS_FILES=""
if file init |grep -q Intel;then
DST_ARCH=x86
else
DST_ARCH=arm
fi
}
addFile() {
#Slower but doesn't go into the WARNING
if ! echo $INITRAMFS_FILES |grep -qE "\b$1\b";then
INITRAMFS_FILES="$INITRAMFS_FILES $*"
fi
}
doneBootImgEdit() {
find . -type f -exec touch -t 197001011200 {} \;
#List of files to replace \n separated
echo $INITRAMFS_FILES |tr ' ' '\n' | cpio -o -H newc > ramdisk2
#TODO: Why can't I recreate initramfs from scratch?
#Instead I use the append method. files gets overwritten by the last version if they appear twice
#Hence sepolicy/su/init.rc are our version
#There is a trailer in CPIO file format. Hence strip-cpio
rm -f cpio-*
"$scriptdir/bin/strip-cpio" ramdisk1 $INITRAMFS_FILES
cat cpio-* ramdisk2 > ramdisk.tmp
touch -t 197001011200 ramdisk.tmp
#output is called ramdisk.gz, because repack doesn't care about the file format
if [ -f "$bootimg_extract"/ramdisk.gz ];then
gzip -9 -c -n ramdisk.tmp > "$bootimg_extract"/ramdisk.gz
elif [ -f "$bootimg_extract"/ramdisk.lzma ];then
lzma -7 -c ramdisk.tmp > "$bootimg_extract"/ramdisk.gz
else
exit 1
fi
cd "$bootimg_extract"
rm -Rf "$d2"
"$scriptdir/bin/bootimg-repack" "$f"
cp new-boot.img "$homedir"
cd "$homedir"
rm -Rf "$bootimg_extract"
}
#allow <list of scontext> <list of tcontext> <class> <list of perm>
allow() {
addFile sepolicy
[ -z "$1" -o -z "$2" -o -z "$3" -o -z "$4" ] && false
for s in $1;do
for t in $2;do
"$scriptdir"/bin/sepolicy-inject$SEPOLICY -s $s -t $t -c $3 -p $(echo $4|tr ' ' ',') -P sepolicy
done
done
}
noaudit() {
addFile sepolicy
for s in $1;do
for t in $2;do
for p in $4;do
"$scriptdir"/bin/sepolicy-inject"$SEPOLICY" -s $s -t $t -c $3 -p $p -P sepolicy
done
done
done
}
#Extracted from global_macros
r_file_perms="getattr open read ioctl lock"
x_file_perms="getattr execute execute_no_trans"
rx_file_perms="$r_file_perms $x_file_perms"
w_file_perms="open append write"
rw_file_perms="$r_file_perms $w_file_perms"
rwx_file_perms="$rx_file_perms $w_dir_perms"
rw_socket_perms="ioctl read getattr write setattr lock append bind connect getopt setopt shutdown"
create_socket_perms="create $rw_socket_perms"
rw_stream_socket_perms="$rw_socket_perms listen accept"
create_stream_socket_perms="create $rw_stream_socket_perms"
r_dir_perms="open getattr read search ioctl"
w_dir_perms="open search write add_name remove_name"
ra_dir_perms="$r_dir_perms add name write"
rw_dir_perms="$r_dir_perms $w_dir_perms"
create_dir_perms="create reparent rename rmdir setattr $rw_dir_perms"
allowFSR() {
allow "$1" "$2" dir "$r_dir_perms"
allow "$1" "$2" file "$r_file_perms"
allow "$1" "$2" lnk_file "read getattr"
}
allowFSRW() {
allow "$1" "$2" dir "$rw_dir_perms create"
allow "$1" "$2" file "$rw_file_perms create setattr unlink rename"
allow "$1" "$2" lnk_file "read getattr"
}
allowFSRWX() {
allowFSRW "$1" "$2"
allow "$1" "$2" file "$x_file_perms"
}
startBootImgEdit "$1"
if [ -f sepolicy ] && \
"$scriptdir/bin/sepolicy-inject-v2" -e -s untrusted_app_25 -P sepolicy;then
#Android O
SEPOLICY="-v2"
ANDROID=26
elif [ -f sepolicy ] && \
! "$scriptdir/bin/sepolicy-inject" -e -c filesystem -P sepolicy && \
"$scriptdir/bin/sepolicy-inject-v2" -e -c filesystem -P sepolicy;then
#Android N
SEPOLICY="-v2"
ANDROID=24
elif "$scriptdir/bin/sepolicy-inject" -e -s gatekeeper_service -P sepolicy;then
#Android M
ANDROID=23
elif "$scriptdir/bin/sepolicy-inject" -e -c service_manager -P sepolicy;then
#Android L MR1
ANDROID=21
#TODO: Android 5.0? Android 4.3?
else
#Assume KitKat
ANDROID=19
fi
shift
[ -n "$used_scr" ] && shift
. $scr
if [ -n "$VERSIONED" ];then
if [ -f "$scriptdir"/gitversion ];then
rev="$(cat $scriptdir/gitversion)"
else
pushd $scriptdir
rev="$(git rev-parse --short HEAD)"
popd
fi
echo $rev > super-bootimg
addFile super-bootimg
fi
doneBootImgEdit
if [ -f $scriptdir/keystore.x509.pem -a -f $scriptdir/keystore.pk8 -a -z "$NO_SIGN" -a -z "$CHROMEOS" ];then
java -jar $scriptdir/keystore_tools/BootSignature.jar /boot new-boot.img $scriptdir/keystore.pk8 $scriptdir/keystore.x509.pem new-boot.img.signed
fi
if [ -n "$CHROMEOS" ];then
echo " " > toto1
echo " " > toto2
#TODO: Properly detect ARCH
if $scriptdir/bin/futility-arm version > /dev/null;then
ARCH=arm
else
ARCH=x86
fi
$scriptdir/bin/futility-$ARCH vbutil_keyblock --pack output.keyblock --datapubkey $scriptdir/kernel_data_key.vbpubk --signprivate $scriptdir/kernel_subkey.vbprivk --flags 0x7
$scriptdir/bin/futility-$ARCH vbutil_kernel --pack new-boot.img.signed --keyblock output.keyblock --signprivate $scriptdir/kernel_data_key.vbprivk --version 1 --vmlinuz new-boot.img --config toto1 --arch arm --bootloader toto2 --flags 0x1
rm -f toto1 toto2 output.keyblock
fi
# Call custom boot image patch script
if [ -f "/data/custom_boot_image_patch.sh" ]; then
sh -x /data/custom_boot_image_patch.sh new-boot.img
fi
# Silence warning when boot on Samsung phones
# XXX: This check ONLY works on LIVE devices, not from script
# Here this is not a problem because the change is purely cosmetic, but don't rely on this if for anything else
if getprop ro.product.manufacturer | grep -iq '^samsung$'; then
echo "SEANDROIDENFORCE" >> "new-boot.img"
fi
| DavisNT/super-bootimg | scripts/bootimg.sh | Shell | gpl-3.0 | 6,501 |
#!/usr/bin/env bash
#
# This script assumes a linux environment
set -e
DES=$1
mkdir -p $DES/js
cp src/js/base64-custom.js $DES/js
cp src/js/biditrie.js $DES/js
cp src/js/dynamic-net-filtering.js $DES/js
cp src/js/filtering-context.js $DES/js
cp src/js/hnswitches.js $DES/js
cp src/js/hntrie.js $DES/js
cp src/js/static-filtering-parser.js $DES/js
cp src/js/static-net-filtering.js $DES/js
cp src/js/static-filtering-io.js $DES/js
cp src/js/tasks.js $DES/js
cp src/js/text-utils.js $DES/js
cp src/js/uri-utils.js $DES/js
cp src/js/url-net-filtering.js $DES/js
mkdir -p $DES/lib
cp -R src/lib/punycode.js $DES/lib/
cp -R src/lib/regexanalyzer $DES/lib/
cp -R src/lib/publicsuffixlist $DES/lib/
# Convert wasm modules into json arrays
mkdir -p $DES/js/wasm
cp src/js/wasm/* $DES/js/wasm/
node -pe "JSON.stringify(Array.from(fs.readFileSync('src/js/wasm/hntrie.wasm')))" \
> $DES/js/wasm/hntrie.wasm.json
node -pe "JSON.stringify(Array.from(fs.readFileSync('src/js/wasm/biditrie.wasm')))" \
> $DES/js/wasm/biditrie.wasm.json
node -pe "JSON.stringify(Array.from(fs.readFileSync('src/lib/publicsuffixlist/wasm/publicsuffixlist.wasm')))" \
> $DES/lib/publicsuffixlist/wasm/publicsuffixlist.wasm.json
cp platform/nodejs/*.js $DES/
cp platform/nodejs/README.md $DES/
cp LICENSE.txt $DES/
| gorhill/uBlock | tools/make-nodejs.sh | Shell | gpl-3.0 | 1,465 |
#!/bin/bash
java -jar c:/my/local/repos/chair/droidmate/dev/droidmate/projects/core/src/main/resources/apktool.jar decode --no-src --force $1 | konrad-jamrozik/droidmate | dev/droidmate/scripts/decode_apk.sh | Shell | gpl-3.0 | 142 |
# 18_shutdown_hpsmh.sh
if (( PREVIEW )) ; then
Log "Stopping HP System Mgmt Homepage processes [not done in preview]"
else
LogPrint "Stopping HP System Mgmt Homepage processes"
[[ -x /sbin/init.d/hpsmh ]] && /sbin/init.d/hpsmh stop >&2
# as SMH caused update-ux to hang we do a 2th check
netstat -a | grep -q 2301
if (( $? == 0 )) ; then
LogPrint "smh is still running - force a kill of smh"
KillProc smh
fi
fi
| gdha/upgrade-ux | opt/upgrade-ux/scripts/preinstall/hp/18_shutdown_hpsmh.sh | Shell | gpl-3.0 | 449 |
sudo apt-get install build-essential g++
| williford/.settings | common-apt-packages-install.sh | Shell | gpl-3.0 | 41 |
#!/bin/bash
get_tarball_path() {
if [ $# -lt 1 ]; then
echo "ERROR: Failed to get tarball path. No search path argument given"
return 1
fi
echo "$(find $1 -name procalc_*.orig.tar.gz)"
}
get_tarball_name() {
if [ $# -lt 1 ]; then
echo "ERROR: Failed to get tarball path. No search path argument given"
return 1
fi
tarball_path="$(get_tarball_path $1)"
echo "${tarball_path##*/}"
}
get_tarball_version() {
if [ $# -lt 1 ]; then
echo "ERROR: Failed to get tarball path. No search path argument given"
return 1
fi
tarball_name="$(get_tarball_name $1)"
regex="^procalc_(.*)\.orig\.tar\.gz$"
if [[ ! "$tarball_name" =~ $regex ]]; then
echo "ERROR: Tarball does not exist or has non-conforming file name"
return 1
fi
echo "${BASH_REMATCH[1]}"
}
| RobJinman/pro_office_calc | release/functions.sh | Shell | gpl-3.0 | 807 |
#!/bin/bash
# the round function:
round()
{
echo $(printf %.$2f $(echo "scale=$2;(((10^$2)*$1)+0.5)/(10^$2)" | bc))
};
# Checking parameters number
if [ $# = 0 ]; then
rep="."
typ="-s"
elif [ $# = 1 ]; then
rep=$1
typ="-s"
elif [ $# = 2 ]; then
if [ $2 != "-s" ] && [ $2 != "-r" ]; then
echo "Usage: script [directory] [type]"
echo "Usage: type: -s:sort -r:rename"
exit 1
fi
rep=$1
typ=$2
else
echo "Usage: script [directory] [type]"
echo "Usage: type: -s:sort -r:rename"
exit 1
fi
# List of dangerous extention
dan=("exe" "pif" "application" "gadget" "msi" "msp" "com" "scr" "hta" "cpl" "msc" "jar" "bat" "cmd" "vb" "vbs" "vbe" "js" "jse" "ws" "wsf" "wsc" "wsh" "ps1" "ps1xml" "ps2" "ps2xml" "psc1" "psc2" "msh" "msh1" "msh1" "mshxml" "msh1xml" "msh2xml" "scf" "lnk" "inf" "reg" "docm" "dotm" "xlsm" "xltm" "xlam" "pptm" "potm" "ppam" "ppsm" "sldm")
# Directories creation
if [ $typ = "-s" ]; then
for (( i=0; i<11; i++ )); do mkdir -p lvl$i; done
fi
echo "repertoire:$rep"
# Creation of files list
find $rep/ -type f > tmp.txt
# Analyse all files
while read line
do
val=0;
sum=0;
com=0;
# type of file
ft=`file -bz --mime-type $line | cut -d'/' -f2`
# lvl dangerosity
ave=0;
cd cve-search
# python3.3 search.py -p microsoft -o json | jq -r ".cvss"
python3.3 search.py -f microsoft | grep -o "cvss': '[0-9]*.[0-9]" | cut -d"'" -f3 > tmp2.txt
while read line2
do
sum=`echo "scale=1;($sum+$line2)" | bc`
com=$(($com+1))
# fin de boucle
done < tmp2.txt
ave=`echo "($sum/$com)" | bc`
lvl=$(round $ave 0)
echo "moyenne:$lvl"
cd ..
echo "type:$ft"
# check entention
extention="${line##*.}"
for i in ${dan[@]}; do
if [ "$i" = "$extention" ]; then
val=$(($val+5))
fi
done
echo "val:$val"
lvl=5;
if [ $typ = "-s" ]; then
# mv of file in the directory
# mv $line $lvl
echo "move"
else
# add security lvl before extention
filename="${line%.*}"
# mv $line $filename.$lvl.$extention
fi
# fin de boucle
done < tmp.txt
exit 0
| PokerChichi/TidyMyKey | test_files/script.sh | Shell | gpl-3.0 | 2,113 |
g++ -Wall -Wl,-rpath=libevent/libevent-2.0.21-stable/lib/:cppfastdevelop/cppfoundation/bin/ \
-g server.cpp io_cb.cpp thread_worker.cpp -O0 -o server \
-Llibevent/libevent-2.0.21-stable/lib -Lcppfastdevelop/cppfoundation/bin \
-Ilibevent/libevent-2.0.21-stable/include -Icppfastdevelop/cppfoundation/include \
-levent -lcfclass_d -lcfstub_d
| dungeonsnd/test-code | dev_examples/libevent_multithreads/build.sh | Shell | gpl-3.0 | 345 |
#!/bin/bash
echo '===> Running pre-commit checks for amd' | doppler yellow
mk sniff-amd
| nbloomf/nbloomf.md | hooks/pre-commit.sh | Shell | gpl-3.0 | 90 |
Rscript method1-PigeonAllPups-1.R 11B_splitXY/100A-rmXY/0_cov5reads 1011B_splitXY/100A-rmXY/0_cov5reads/method1-PigeonAllPups-1/NC-vs-IVFfresh
Rscript method1-PigeonAllPups-1.R 11B_splitXY/100A-rmXY/1_cov10reads 1011B_splitXY/100A-rmXY/1_cov10reads/method1-PigeonAllPups-1/NC-vs-IVFfresh
Rscript method1-PigeonAllPups-1.R 11B_splitXY/100A-rmXY/2_cov20reads 1011B_splitXY/100A-rmXY/2_cov20reads/method1-PigeonAllPups-1/NC-vs-IVFfresh
Rscript method1-PigeonAllPups-2.R 11B_splitXY/100A-rmXY/0_cov5reads 1011B_splitXY/100A-rmXY/0_cov5reads/method1-PigeonAllPups-2/NC-vs-IVFfrozen
Rscript method1-PigeonAllPups-2.R 11B_splitXY/100A-rmXY/1_cov10reads 1011B_splitXY/100A-rmXY/1_cov10reads/method1-PigeonAllPups-2/NC-vs-IVFfrozen
Rscript method1-PigeonAllPups-2.R 11B_splitXY/100A-rmXY/2_cov20reads 1011B_splitXY/100A-rmXY/2_cov20reads/method1-PigeonAllPups-2/NC-vs-IVFfrozen
Rscript method1-PigeonAllPups-3.R 11B_splitXY/100A-rmXY/0_cov5reads 1011B_splitXY/100A-rmXY/0_cov5reads/method1-PigeonAllPups-3/NC-vs-ICSIfresh
Rscript method1-PigeonAllPups-3.R 11B_splitXY/100A-rmXY/1_cov10reads 1011B_splitXY/100A-rmXY/1_cov10reads/method1-PigeonAllPups-3/NC-vs-ICSIfresh
Rscript method1-PigeonAllPups-3.R 11B_splitXY/100A-rmXY/2_cov20reads 1011B_splitXY/100A-rmXY/2_cov20reads/method1-PigeonAllPups-3/NC-vs-ICSIfresh
Rscript method1-PigeonAllPups-4.R 11B_splitXY/100A-rmXY/0_cov5reads 1011B_splitXY/100A-rmXY/0_cov5reads/method1-PigeonAllPups-4/NC-vs-ICSIfrozen
Rscript method1-PigeonAllPups-4.R 11B_splitXY/100A-rmXY/1_cov10reads 1011B_splitXY/100A-rmXY/1_cov10reads/method1-PigeonAllPups-4/NC-vs-ICSIfrozen
Rscript method1-PigeonAllPups-4.R 11B_splitXY/100A-rmXY/2_cov20reads 1011B_splitXY/100A-rmXY/2_cov20reads/method1-PigeonAllPups-4/NC-vs-ICSIfrozen
| CTLife/SomeRecords | forRRBS/method1-PigeonAllPups.sh | Shell | gpl-3.0 | 1,961 |
Rscript analyzeDMRs.R \
--file=5_DMR/2E_AlldiffMesites_q0.05_diff0.txt \
--outDir=5_DMR_Annotation \
--RefGenome=hg38 \
--RegionName=1000_Twins_AllPups_essential/A-rmXY/6_Analyze_Tiles_by_methylKit/1_NC1_vs_NC2/5_DMR \
> analyzeDMRs.runLog.txt 2>&1
| CTLife/SomeRecords | forRRBS/merge_annotation/analyzeDMRs.sh | Shell | gpl-3.0 | 281 |
#!/bin/sh
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
if [ "$1" = "rpm" ]; then
# A very simplistic RPM build scenario
if [ -e streams_to_vector_cc_3i.spec ]; then
mydir=`dirname $0`
tmpdir=`mktemp -d`
cp -r ${mydir} ${tmpdir}/streams_to_vector_cc_3i-1.0.0
tar czf ${tmpdir}/streams_to_vector_cc_3i-1.0.0.tar.gz --exclude=".svn" -C ${tmpdir} streams_to_vector_cc_3i-1.0.0
rpmbuild -ta ${tmpdir}/streams_to_vector_cc_3i-1.0.0.tar.gz
rm -rf $tmpdir
else
echo "Missing RPM spec file in" `pwd`
exit 1
fi
else
for impl in cpp ; do
cd $impl
if [ -e build.sh ]; then
./build.sh $*
elif [ -e reconf ]; then
./reconf && ./configure && make
else
echo "No build.sh found for $impl"
fi
cd -
done
fi
| RedhawkSDR/integration-gnuhawk | components/streams_to_vector_cc_3i/build.sh | Shell | gpl-3.0 | 1,606 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.