code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#/bin/sh
# file: simulate_mti.sh
#
# (c) Copyright 2008 - 2011 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#
# set up the working directory
set work work
vlib work
# compile all of the files
vcom -work work ../../implement/results/routed.vhd
vcom -work work clkGen_tb.vhd
# run the simulation
vsim -c -t ps +transport_int_delays -voptargs="+acc" -L secureip -L simprim -sdfmax clkGen_tb/dut=../../implement/results/routed.sdf +no_notifier work.clkGen_tb
| rohit91/HDMI2USB | ipcore_dir/clkGen/simulation/timing/simulate_mti.sh | Shell | bsd-2-clause | 2,561 |
#!/bin/bash
# From: http://stackoverflow.com/a/246128
# - To resolve finding the directory after symlinks
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
CP=$( echo $DIR/../lib/*.jar . | sed 's/ /:/g')
CP=$CP:$( echo $DIR/../ext/*.jar . | sed 's/ /:/g')
#echo $CP
# Find Java
if [ "$JAVA_HOME" = "" ] ; then
JAVA="java -server"
else
JAVA="$JAVA_HOME/bin/java -server"
fi
# Set Java options
if [ "$JAVA_OPTIONS" = "" ] ; then
JAVA_OPTIONS="-Xms32m -Xmx512m"
fi
# Execute the application and return its exit code
exec $JAVA $JAVA_OPTIONS -cp $CP com.tinkerpop.rexster.console.RexsterConsole $@
| alszeb/rexster | rexster-console/src/main/bin/rexster-console.sh | Shell | bsd-3-clause | 802 |
#!/bin/sh
################################################################################
# This file is part of the package effrb. It is subject to the license
# terms in the LICENSE.md file found in the top-level directory of
# this distribution and at https://github.com/pjones/effrb. No part of
# the effrb package, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained
# in the LICENSE.md file.
################################################################################
. `dirname $0`/../common.sh
################################################################################
run_irb_replace_nil <<EOF
nil.to_a
nil.to_i
nil.to_f
EOF
| nfredrik/effrb | irb/ruby/nil-tox.sh | Shell | bsd-3-clause | 712 |
#!/bin/sh
# Copyright 2005-2013 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. `dirname $0`/common.sh
SELINUX_FILE=/etc/sysconfig/selinux
PRINTF='END {printf "%s app=selinux %s %s %s %s\n", DATE, FILEHASH, SELINUX, SELINUXTYPE, SETLOCALDEFS}'
if [ "x$KERNEL" = "xLinux" -a -f $SELINUX_FILE ] ; then
assertHaveCommand cat
# Get file hash
CMD='eval date ; eval LD_LIBRARY_PATH=$SPLUNK_HOME/lib $SPLUNK_HOME/bin/openssl sha1 $SELINUX_FILE ; cat $SELINUX_FILE'
# Get the date.
PARSE_0='NR==1 {DATE=$0}'
# Try to use cross-platform case-insensitive matching for text. Note
# that "match", "tolower", IGNORECASE and other common awk commands or
# options are actually nawk/gawk extensions so avoid them if possible.
PARSE_1='/^[Ss][Ee][Ll][Ii][Nn][Uu][Xx]\=/ { SELINUX="selinux=" substr($0,index($0,"=")+1,length($0)) } '
PARSE_2='/^[Ss][Ee][Ll][Ii][Nn][Uu][Xx][Tt][Yy][Pp][Ee]\=/ { SELINUXTYPE="selinuxtype=" substr($0,index($0,"=")+1,length($0)) } '
PARSE_3='/^[Ss][Ee][Tt][Ll][Oo][Cc][Aa][Ll][Dd][Ee][Ff][Ss]\=/ { SETLOCALDEFS="setlocaldefs=" substr($0,index($0,"=")+1,length($0)) } '
PARSE_4='/^SHA1/ {FILEHASH="file_hash=" $2}'
MASSAGE="$PARSE_0 $PARSE_1 $PARSE_2 $PARSE_3 $PARSE_4"
$CMD | tee $TEE_DEST | $AWK "$MASSAGE $PRINTF"
echo "Cmd = [$CMD]; | $AWK '$MASSAGE $PRINTF'" >> $TEE_DEST
else
echo "SELinux not configured." >> $TEE_DEST
fi | huit/puppet-splunk | files/ta/Splunk_TA_nix/bin/selinuxChecker.sh | Shell | mit | 2,810 |
#!/bin/bash
FN="hapmapsnp6_1.32.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/experiment/src/contrib/hapmapsnp6_1.32.0.tar.gz"
"https://bioarchive.galaxyproject.org/hapmapsnp6_1.32.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-hapmapsnp6/bioconductor-hapmapsnp6_1.32.0_src_all.tar.gz"
)
MD5="11f85eea7363a487738610586e622ecf"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
| bebatut/bioconda-recipes | recipes/bioconductor-hapmapsnp6/post-link.sh | Shell | mit | 1,302 |
#!/bin/bash
ROOT="$1"
if [[ ! -d $ROOT ]]; then
echo "Usage: $0 <rootdir>"
exit 1
fi
if [[ $ROOT -ef / ]]; then
echo "Can't convert the running system."
echo "Please boot with 'rd.convertfs' on the kernel command line,"
echo "to update with the help of the initramfs,"
echo "or run this script from a rescue system."
exit 1
fi
while [[ $ROOT != "${ROOT%/}" ]]; do
ROOT=${ROOT%/}
done
if [ ! -L "$ROOT"/var/run -a -e "$ROOT"/var/run ]; then
echo "Converting /var/run to symlink"
mv -f "$ROOT"/var/run "$ROOT"/var/run.runmove~
ln -sfn ../run "$ROOT"/var/run
fi
if [ ! -L "$ROOT"/var/lock -a -e "$ROOT"/var/lock ]; then
echo "Converting /var/lock to symlink"
mv -f "$ROOT"/var/lock "$ROOT"/var/lock.lockmove~
ln -sfn ../run/lock "$ROOT"/var/lock
fi
needconvert() {
for dir in "$ROOT/bin" "$ROOT/sbin" "$ROOT/lib" "$ROOT/lib64"; do
if [[ -e $dir ]]; then
[[ -L $dir ]] || return 0
fi
done
return 1
}
if ! [ -e "$ROOT/usr/bin" ]; then
echo "$ROOT/usr/bin does not exist!"
echo "Make sure, the kernel command line has enough information"
echo "to mount /usr (man dracut.cmdline)"
exit 1
fi
if ! needconvert; then
echo "Your system is already converted."
exit 0
fi
testfile="$ROOT/.usrmovecheck$$"
rm -f -- "$testfile"
: > "$testfile"
if [[ ! -e $testfile ]]; then
echo "Cannot write to $ROOT/"
exit 1
fi
rm -f -- "$testfile"
testfile="$ROOT/usr/.usrmovecheck$$"
rm -f -- "$testfile"
: > "$testfile"
if [[ ! -e $testfile ]]; then
echo "Cannot write to $ROOT/usr/"
exit 1
fi
rm -f -- "$testfile"
find_mount() {
local dev wanted_dev
wanted_dev="$(readlink -e -q "$1")"
while read -r dev _ || [ -n "$dev" ]; do
[ "$dev" = "$wanted_dev" ] && echo "$dev" && return 0
done < /proc/mounts
return 1
}
# usage: ismounted <mountpoint>
# usage: ismounted /dev/<device>
if command -v findmnt > /dev/null; then
ismounted() {
findmnt "$1" > /dev/null 2>&1
}
else
ismounted() {
if [ -b "$1" ]; then
find_mount "$1" > /dev/null && return 0
return 1
fi
while read -r _ m _ || [ -n "$m" ]; do
[ "$m" = "$1" ] && return 0
done < /proc/mounts
return 1
}
fi
# clean up after ourselves no matter how we die.
cleanup() {
echo "Something failed. Move back to the original state"
for dir in "$ROOT/bin" "$ROOT/sbin" "$ROOT/lib" "$ROOT/lib64" \
"$ROOT/usr/bin" "$ROOT/usr/sbin" "$ROOT/usr/lib" \
"$ROOT/usr/lib64"; do
[[ -d "${dir}.usrmove-new" ]] && rm -fr -- "${dir}.usrmove-new"
if [[ -d "${dir}.usrmove-old" ]]; then
mv "$dir" "${dir}.del~"
mv "${dir}.usrmove-old" "$dir"
rm -fr -- "${dir}.del~"
fi
done
}
trap 'ret=$?; [[ $ret -ne 0 ]] && cleanup;exit $ret;' EXIT
trap 'exit 1;' SIGINT
ismounted "$ROOT/usr" || CP_HARDLINK="-l"
set -e
# merge / and /usr in new dir in /usr
for dir in bin sbin lib lib64; do
rm -rf -- "$ROOT/usr/${dir}.usrmove-new"
[[ -L "$ROOT/$dir" ]] && continue
[[ -d "$ROOT/$dir" ]] || continue
echo "Make a copy of \`$ROOT/usr/$dir'."
[[ -d "$ROOT/usr/$dir" ]] \
&& cp -ax -l "$ROOT/usr/$dir" "$ROOT/usr/${dir}.usrmove-new"
echo "Merge the copy with \`$ROOT/$dir'."
[[ -d "$ROOT/usr/${dir}.usrmove-new" ]] \
|| mkdir -p "$ROOT/usr/${dir}.usrmove-new"
cp -axT $CP_HARDLINK --backup --suffix=.usrmove~ "$ROOT/$dir" "$ROOT/usr/${dir}.usrmove-new"
echo "Clean up duplicates in \`$ROOT/usr/$dir'."
# delete all symlinks that have been backed up
find "$ROOT/usr/${dir}.usrmove-new" -type l -name '*.usrmove~' -delete || :
# replace symlink with backed up binary
# shellcheck disable=SC2156
find "$ROOT/usr/${dir}.usrmove-new" \
-name '*.usrmove~' \
-type f \
-exec bash -c 'p="{}";o=${p%%%%.usrmove~};
[[ -L "$o" ]] && mv -f "$p" "$o"' ';' || :
done
# switch over merged dirs in /usr
for dir in bin sbin lib lib64; do
[[ -d "$ROOT/usr/${dir}.usrmove-new" ]] || continue
echo "Switch to new \`$ROOT/usr/$dir'."
rm -fr -- "$ROOT/usr/${dir}.usrmove-old"
mv "$ROOT/usr/$dir" "$ROOT/usr/${dir}.usrmove-old"
mv "$ROOT/usr/${dir}.usrmove-new" "$ROOT/usr/$dir"
done
# replace dirs in / with links to /usr
for dir in bin sbin lib lib64; do
[[ -L "$ROOT/$dir" ]] && continue
[[ -d "$ROOT/$dir" ]] || continue
echo "Create \`$ROOT/$dir' symlink."
rm -fr -- "$ROOT/${dir}.usrmove-old" || :
mv "$ROOT/$dir" "$ROOT/${dir}.usrmove-old"
ln -sfn usr/$dir "$ROOT/$dir"
done
echo "Clean up backup files."
# everything seems to work; cleanup
for dir in bin sbin lib lib64; do
# if we get killed in the middle of "rm -rf", ensure not to leave
# an incomplete directory, which is moved back by cleanup()
[[ -d "$ROOT/usr/${dir}.usrmove-old" ]] \
&& mv "$ROOT/usr/${dir}.usrmove-old" "$ROOT/usr/${dir}.usrmove-old~"
[[ -d "$ROOT/${dir}.usrmove-old" ]] \
&& mv "$ROOT/${dir}.usrmove-old" "$ROOT/${dir}.usrmove-old~"
done
for dir in bin sbin lib lib64; do
if [[ -d "$ROOT/usr/${dir}.usrmove-old~" ]]; then
rm -rf -- "$ROOT/usr/${dir}.usrmove-old~"
fi
if [[ -d "$ROOT/${dir}.usrmove-old~" ]]; then
rm -rf -- "$ROOT/${dir}.usrmove-old~"
fi
done
for dir in lib lib64; do
[[ -d "$ROOT/$dir" ]] || continue
for lib in "$ROOT"/usr/"${dir}"/lib*.so*.usrmove~; do
[[ -f $lib ]] || continue
mv "$lib" "${lib/.so/_so}"
done
done
set +e
echo "Run ldconfig."
ldconfig -r "$ROOT"
if [[ -f "$ROOT"/etc/selinux/config ]]; then
# shellcheck disable=SC1090
. "$ROOT"/etc/selinux/config
fi
if [ -n "$(command -v setfiles)" ] && [ "$SELINUX" != "disabled" ] && [ -f /etc/selinux/"${SELINUXTYPE}"/contexts/files/file_contexts ]; then
echo "Fixing SELinux labels"
setfiles -r "$ROOT" -p /etc/selinux/"${SELINUXTYPE}"/contexts/files/file_contexts "$ROOT"/sbin "$ROOT"/bin "$ROOT"/lib "$ROOT"/lib64 "$ROOT"/usr/lib "$ROOT"/usr/lib64 "$ROOT"/etc/ld.so.cache "$ROOT"/var/cache/ldconfig || :
fi
echo "Done."
exit 0
| FGrose/dracut | modules.d/30convertfs/convertfs.sh | Shell | gpl-2.0 | 6,204 |
#!/system/bin/sh
# Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Set platform variables
target=`getprop ro.board.platform`
if [ -f /sys/devices/soc0/hw_platform ]; then
soc_hwplatform=`cat /sys/devices/soc0/hw_platform` 2> /dev/null
else
soc_hwplatform=`cat /sys/devices/system/soc/soc0/hw_platform` 2> /dev/null
fi
if [ -f /sys/devices/soc0/soc_id ]; then
soc_hwid=`cat /sys/devices/soc0/soc_id` 2> /dev/null
else
soc_hwid=`cat /sys/devices/system/soc/soc0/id` 2> /dev/null
fi
if [ -f /sys/devices/soc0/platform_version ]; then
soc_hwver=`cat /sys/devices/soc0/platform_version` 2> /dev/null
else
soc_hwver=`cat /sys/devices/system/soc/soc0/platform_version` 2> /dev/null
fi
# Dynamic Memory Managment (DMM) provides a sys file system to the userspace
# that can be used to plug in/out memory that has been configured as unstable.
# This unstable memory can be in Active or In-Active State.
# Each of which the userspace can request by writing to a sys file.
#
# ro.dev.dmm = 1; Indicates that DMM is enabled in the Android User Space. This
# property is set in the Android system properties file.
#
# If ro.dev.dmm.dpd.start_address is set here then the target has a memory
# configuration that supports DynamicMemoryManagement.
init_DMM()
{
block=-1
case "$target" in
"msm7630_surf" | "msm7630_1x" | "msm7630_fusion" | "msm8960")
;;
*)
return
;;
esac
mem="/sys/devices/system/memory"
op=`cat $mem/movable_start_bytes`
case "$op" in
"0")
log -p i -t DMM DMM Disabled. movable_start_bytes not set: $op
;;
"$mem/movable_start_bytes: No such file or directory ")
log -p i -t DMM DMM Disabled. movable_start_bytes does not exist: $op
;;
*)
log -p i -t DMM DMM available. movable_start_bytes at $op
movable_start_bytes=0x`cat $mem/movable_start_bytes`
block_size_bytes=0x`cat $mem/block_size_bytes`
block=$((#${movable_start_bytes}/${block_size_bytes}))
chown -h system.system $mem/memory$block/state
chown -h system.system $mem/probe
chown -h system.system $mem/active
chown -h system.system $mem/remove
case "$target" in
"msm7630_surf" | "msm7630_1x" | "msm7630_fusion")
echo $movable_start_bytes > $mem/probe
case "$?" in
"0")
log -p i -t DMM $movable_start_bytes to physical hotplug succeeded.
;;
*)
log -p e -t DMM $movable_start_bytes to physical hotplug failed.
return
;;
esac
echo online > $mem/memory$block/state
case "$?" in
"0")
log -p i -t DMM \'echo online\' to logical hotplug succeeded.
;;
*)
log -p e -t DMM \'echo online\' to logical hotplug failed.
return
;;
esac
;;
esac
setprop ro.dev.dmm.dpd.start_address $movable_start_bytes
setprop ro.dev.dmm.dpd.block $block
;;
esac
case "$target" in
"msm8960")
return
;;
esac
# For 7X30 targets:
# ro.dev.dmm.dpd.start_address is set when the target has a 2x256Mb memory
# configuration. This is also used to indicate that the target is capable of
# setting EBI-1 to Deep Power Down or Self Refresh.
op=`cat $mem/low_power_memory_start_bytes`
case "$op" in
"0")
log -p i -t DMM Self-Refresh-Only Disabled. low_power_memory_start_bytes not set:$op
;;
"$mem/low_power_memory_start_bytes No such file or directory ")
log -p i -t DMM Self-Refresh-Only Disabled. low_power_memory_start_bytes does not exist:$op
;;
*)
log -p i -t DMM Self-Refresh-Only available. low_power_memory_start_bytes at $op
;;
esac
}
#
# For controlling console and shell on console on 8960 - perist.serial.enable 8960
# On other target use default ro.debuggable property.
#
serial=`getprop persist.serial.enable`
dserial=`getprop ro.debuggable`
case "$target" in
"msm8960")
case "$serial" in
"0")
echo 0 > /sys/devices/platform/msm_serial_hsl.0/console
;;
"1")
echo 1 > /sys/devices/platform/msm_serial_hsl.0/console
start console
;;
*)
case "$dserial" in
"1")
start console
;;
esac
;;
esac
;;
"msm8610" | "msm8974" | "msm8226")
case "$serial" in
"0")
echo 0 > /sys/devices/f991f000.serial/console
;;
"1")
echo 1 > /sys/devices/f991f000.serial/console
start console
;;
*)
case "$dserial" in
"1")
start console
;;
esac
;;
esac
;;
*)
case "$dserial" in
"1")
start console
;;
esac
;;
esac
#
# Allow persistent faking of bms
# User needs to set fake bms charge in persist.bms.fake_batt_capacity
#
fake_batt_capacity=`getprop persist.bms.fake_batt_capacity`
case "$fake_batt_capacity" in
"") ;; #Do nothing here
* )
case $target in
"msm8960")
echo "$fake_batt_capacity" > /sys/module/pm8921_bms/parameters/bms_fake_battery
;;
"msm8974")
echo "$fake_batt_capacity" > /sys/module/qpnp_bms/parameters/bms_fake_battery
;;
"msm8226")
echo "$fake_batt_capacity" > /sys/class/power_supply/battery/capacity
;;
"msm8610")
echo "$fake_batt_capacity" > /sys/module/qpnp_bms/parameters/bms_fake_battery
;;
esac
esac
case "$target" in
"msm7630_surf" | "msm7630_1x" | "msm7630_fusion")
insmod /system/lib/modules/ss_mfcinit.ko
insmod /system/lib/modules/ss_vencoder.ko
insmod /system/lib/modules/ss_vdecoder.ko
chmod -h 0666 /dev/ss_mfc_reg
chmod -h 0666 /dev/ss_vdec
chmod -h 0666 /dev/ss_venc
init_DMM
;;
"msm8960")
init_DMM
;;
esac
| newhor1z0n/furnace_kernel_motorola_falcon | ramdisk/init.qcom.class_core.sh | Shell | gpl-2.0 | 7,762 |
# added 2014-10-01 by Rgerhards
# This file is part of the rsyslog project, released under ASL 2.0
source $srcdir/diag.sh init
source $srcdir/diag.sh startup fac_invld1.conf
source $srcdir/diag.sh tcpflood -m1000 -P 1011
source $srcdir/diag.sh shutdown-when-empty # shut down rsyslogd when done processing messages
source $srcdir/diag.sh wait-shutdown # and wait for it to terminate
source $srcdir/diag.sh seq-check 0 999
source $srcdir/diag.sh exit
| teifler/rsyslog | tests/fac_invld1.sh | Shell | gpl-3.0 | 458 |
#!/usr/bin/env bash
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Tests Arm Cortex-M55 microprocessor code with CMSIS-NN optimizied kernels using FVP based on Arm Corstone-300 software.
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR=${SCRIPT_DIR}/../../../../..
cd "${ROOT_DIR}"
source tensorflow/lite/micro/tools/ci_build/helper_functions.sh
TARGET=cortex_m_corstone_300
TARGET_ARCH=cortex-m55
OPTIMIZED_KERNEL_DIR=cmsis_nn
# TODO(b/143715361): downloading first to allow for parallel builds.
readable_run make -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u OPTIMIZED_KERNEL_DIR=${OPTIMIZED_KERNEL_DIR} TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} third_party_downloads
# Avoid running tests in parallel.
readable_run make -f tensorflow/lite/micro/tools/make/Makefile clean
readable_run make -j$(nproc) -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u OPTIMIZED_KERNEL_DIR=${OPTIMIZED_KERNEL_DIR} TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} build
readable_run make -f tensorflow/lite/micro/tools/make/Makefile CO_PROCESSOR=ethos_u OPTIMIZED_KERNEL_DIR=${OPTIMIZED_KERNEL_DIR} TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} test
| google/CFU-Playground | third_party/tflite-micro/tensorflow/lite/micro/tools/ci_build/test_cortex_m_corstone_300.sh | Shell | apache-2.0 | 1,842 |
#!/bin/bash
# This script uploads sourcemaps to rollbar.com. Sourcemaps
# make it easier to debug client side errors that occur in
# production.
WEBSITE=$(dirname $0)/../website/a/p/p
ROLLBAR_TOKEN=`cat ROLLBAR_TOKEN`
VERSION=`cat VERSION`
FOLDERPATH=$WEBSITE/$VERSION
if [ ! -d $FOLDERPATH ]; then
echo "ERROR: '$FOLDERPATH' dir doesn't exists...sourcemaps won't be uploaded to Rollbar"
exit 1
fi
cd $FOLDERPATH
curl https://api.rollbar.com/api/1/sourcemap \
-F access_token=$ROLLBAR_TOKEN\
-F version=$VERSION \
-F minified_url=https://koding.com/a/p/p/$VERSION/bundle.js \
-F [email protected]
rm -f *.map
| kwagdy/koding-1 | scripts/upload-sm-rollbar.sh | Shell | apache-2.0 | 634 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
cd `dirname "$0"`
case "$1" in
test)
npm install
npm run cover
;;
dist)
npm pack
mkdir -p ../../dist/js
mv avro-js-*.tgz ../../dist/js
;;
clean)
rm -rf coverage
;;
*)
echo "Usage: $0 {test|dist|clean}" >&2
exit 1
esac
exit 0
| ecatmur/avro | lang/js/build.sh | Shell | apache-2.0 | 1,082 |
for f in `ls ../../../receptors/wt-ensemble/*.pdbqt`;do
prefix=`basename $f .pdbqt`
vina --config config3.txt --receptor $f --ligand ../../../ligands/osel.pdbqt --out $prefix.pdbqt --log $prefix.log --cpu 4
done
| ttdtrang/SlidingBindingBox | vina-ensemble/wt/3/run3.sh | Shell | mit | 220 |
#!/bin/bash
FN="xenopuslaeviscdf_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.8/data/annotation/src/contrib/xenopuslaeviscdf_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/xenopuslaeviscdf_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-xenopuslaeviscdf/bioconductor-xenopuslaeviscdf_2.18.0_src_all.tar.gz"
)
MD5="9d09ff76471ae60faf71090e0638f240"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
wget -O- -q $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
| ivirshup/bioconda-recipes | recipes/bioconductor-xenopuslaeviscdf/post-link.sh | Shell | mit | 1,338 |
#!/bin/bash
# Populate site
echo Generating site
mkdir -p out
cp -R site out
mkdir -p out/site/images
cp -R images/tutorial images/concepts out/site/images
cp -R images/faq out/site/images
cp -R images/version-2.0.0 out/site/images/version-2.0.0
for name in `ls *.md`; do
echo Processing ${name} into out/site/${name}
sed -e 's/\$images\$/\/images\/tutorial\//g' ${name} > out/site/${name}
done
# Clean and Build Site with Hakyll
rm -r site/_site
rm -r site/_cache
cd out/site
stack runghc -- site.hs build
cd ../..
| bjhargrave/bndtools | bndtools.manual/generate.sh | Shell | epl-1.0 | 522 |
# Refs of upstream : main(A)
# Refs of workbench: main(A) tags/v123
# git-push : main(B) next(A)
test_expect_success "git-push ($PROTOCOL)" '
git -C workbench push origin \
$B:refs/heads/main \
HEAD:refs/heads/next \
>out 2>&1 &&
make_user_friendly_and_stable_output <out >actual &&
cat >expect <<-EOF &&
remote: # pre-receive hook
remote: pre-receive< <COMMIT-A> <COMMIT-B> refs/heads/main
remote: pre-receive< <ZERO-OID> <COMMIT-A> refs/heads/next
remote: # post-receive hook
remote: post-receive< <COMMIT-A> <COMMIT-B> refs/heads/main
remote: post-receive< <ZERO-OID> <COMMIT-A> refs/heads/next
To <URL/of/upstream.git>
<OID-A>..<OID-B> <COMMIT-B> -> main
* [new branch] HEAD -> next
EOF
test_cmp expect actual &&
git -C "$upstream" show-ref >out &&
make_user_friendly_and_stable_output <out >actual &&
cat >expect <<-EOF &&
<COMMIT-B> refs/heads/main
<COMMIT-A> refs/heads/next
EOF
test_cmp expect actual
'
# Refs of upstream : main(B) next(A)
# Refs of workbench: main(A) tags/v123
# git-push --atomic: main(A) next(B)
test_expect_success "git-push --atomic ($PROTOCOL)" '
test_must_fail git -C workbench push --atomic origin \
main \
$B:refs/heads/next \
>out 2>&1 &&
filter_out_user_friendly_and_stable_output \
-e "/^To / { p; }" \
-e "/^ ! / { p; }" \
<out >actual &&
cat >expect <<-EOF &&
To <URL/of/upstream.git>
! [rejected] main -> main (non-fast-forward)
! [rejected] <COMMIT-B> -> next (atomic push failed)
EOF
test_cmp expect actual &&
git -C "$upstream" show-ref >out &&
make_user_friendly_and_stable_output <out >actual &&
cat >expect <<-EOF &&
<COMMIT-B> refs/heads/main
<COMMIT-A> refs/heads/next
EOF
test_cmp expect actual
'
# Refs of upstream : main(B) next(A)
# Refs of workbench: main(A) tags/v123
# git-push : main(A) next(B)
test_expect_success "non-fast-forward git-push ($PROTOCOL)" '
test_must_fail git \
-C workbench \
-c advice.pushUpdateRejected=false \
push origin \
main \
$B:refs/heads/next \
>out 2>&1 &&
make_user_friendly_and_stable_output <out >actual &&
cat >expect <<-EOF &&
remote: # pre-receive hook
remote: pre-receive< <COMMIT-A> <COMMIT-B> refs/heads/next
remote: # post-receive hook
remote: post-receive< <COMMIT-A> <COMMIT-B> refs/heads/next
To <URL/of/upstream.git>
<OID-A>..<OID-B> <COMMIT-B> -> next
! [rejected] main -> main (non-fast-forward)
EOF
test_cmp expect actual &&
git -C "$upstream" show-ref >out &&
make_user_friendly_and_stable_output <out >actual &&
cat >expect <<-EOF &&
<COMMIT-B> refs/heads/main
<COMMIT-B> refs/heads/next
EOF
test_cmp expect actual
'
# Refs of upstream : main(B) next(B)
# Refs of workbench: main(A) tags/v123
# git-push -f : main(A) NULL tags/v123 refs/review/main/topic(A) a/b/c(A)
test_expect_success "git-push -f ($PROTOCOL)" '
git -C workbench push -f origin \
refs/tags/v123 \
:refs/heads/next \
main \
main:refs/review/main/topic \
HEAD:refs/heads/a/b/c \
>out 2>&1 &&
make_user_friendly_and_stable_output <out >actual &&
cat >expect <<-EOF &&
remote: # pre-receive hook
remote: pre-receive< <COMMIT-B> <COMMIT-A> refs/heads/main
remote: pre-receive< <COMMIT-B> <ZERO-OID> refs/heads/next
remote: pre-receive< <ZERO-OID> <TAG-v123> refs/tags/v123
remote: pre-receive< <ZERO-OID> <COMMIT-A> refs/review/main/topic
remote: pre-receive< <ZERO-OID> <COMMIT-A> refs/heads/a/b/c
remote: # post-receive hook
remote: post-receive< <COMMIT-B> <COMMIT-A> refs/heads/main
remote: post-receive< <COMMIT-B> <ZERO-OID> refs/heads/next
remote: post-receive< <ZERO-OID> <TAG-v123> refs/tags/v123
remote: post-receive< <ZERO-OID> <COMMIT-A> refs/review/main/topic
remote: post-receive< <ZERO-OID> <COMMIT-A> refs/heads/a/b/c
To <URL/of/upstream.git>
+ <OID-B>...<OID-A> main -> main (forced update)
- [deleted] next
* [new tag] v123 -> v123
* [new reference] main -> refs/review/main/topic
* [new branch] HEAD -> a/b/c
EOF
test_cmp expect actual &&
git -C "$upstream" show-ref >out &&
make_user_friendly_and_stable_output <out >actual &&
cat >expect <<-EOF &&
<COMMIT-A> refs/heads/a/b/c
<COMMIT-A> refs/heads/main
<COMMIT-A> refs/review/main/topic
<TAG-v123> refs/tags/v123
EOF
test_cmp expect actual
'
# Refs of upstream : main(A) tags/v123 refs/review/main/topic(A) a/b/c(A)
# Refs of workbench: main(A) tags/v123
test_expect_success "cleanup ($PROTOCOL)" '
(
cd "$upstream" &&
git update-ref -d refs/review/main/topic &&
git update-ref -d refs/tags/v123 &&
git update-ref -d refs/heads/a/b/c
)
'
| tacker66/git | t/t5411/test-0000-standard-git-push.sh | Shell | gpl-2.0 | 4,612 |
#!/bin/bash
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
# prepare files for .deb package
set -e
export PIXELATED_BUILD='package'
mkdir -p dist
# initial npm tasks
./go clean
./go compass
./go handlebars
./go imagemin
./go minify_html
./go add_git_version
./go buildmain
# copy files
cd app
cp --parents 404.html fonts/* locales/**/* bower_components/font-awesome/css/font-awesome.min.css bower_components/font-awesome/fonts/* ../dist
cd -
# concat js files and minify
cat \
app/bower_components/modernizr/modernizr.js \
app/bower_components/lodash/dist/lodash.js \
app/bower_components/jquery/dist/jquery.js \
app/js/lib/highlightRegex.js \
app/bower_components/handlebars/handlebars.min.js \
app/bower_components/typeahead.js/dist/typeahead.bundle.min.js \
app/bower_components/foundation/js/foundation.js \
app/bower_components/foundation/js/foundation/foundation.reveal.js \
app/bower_components/foundation/js/foundation/foundation.offcanvas.js \
app/js/foundation/initialize_foundation.js \
.tmp/app.concatenated.js > dist/app.js
node_modules/.bin/minify dist/app.js > dist/app.min.js
rm dist/app.js
| SamuelToh/pixelated-user-agent | web-ui/config/package.sh | Shell | agpl-3.0 | 1,755 |
#!/bin/bash
# TODO: This follows the initial demo pieces and uses a bash script to
# generate the keepalived config - rework this into a template
# similar to how it is done for the haproxy configuration.
# Includes.
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
# Constants.
readonly CHECK_SCRIPT_NAME="chk_${HA_CONFIG_NAME//-/_}"
readonly CHECK_INTERVAL_SECS="${HA_CHECK_INTERVAL}"
readonly VRRP_SLAVE_PRIORITY=42
readonly DEFAULT_PREEMPTION_STRATEGY="preempt_delay 300"
#
# Generate global config section.
#
# Example:
# generate_global_config arparp
#
function generate_global_config() {
local routername ; routername=$(scrub "$1")
echo "global_defs {"
echo " notification_email {"
for email in ${ADMIN_EMAILS[@]}; do
echo " $email"
done
echo " }"
echo ""
echo " notification_email_from ${EMAIL_FROM:-"[email protected]"}"
echo " smtp_server ${SMTP_SERVER:-"127.0.0.1"}"
echo " smtp_connect_timeout ${SMTP_CONNECT_TIMEOUT:-"30"}"
echo " router_id ${routername}"
echo "}"
}
#
# Generate VRRP checker script configuration section.
# When a check script is provided use it instead of default script
# The default script is suppressed When port is 0
#
# Example:
# generate_script_config
# generate_script_config "10.1.2.3" 8080
#
function generate_script_config() {
local serviceip ; serviceip=${1:-"127.0.0.1"}
local port=${2:-80}
echo ""
echo "vrrp_script ${CHECK_SCRIPT_NAME} {"
if [[ -n "${HA_CHECK_SCRIPT}" ]]; then
echo " script \"${HA_CHECK_SCRIPT}\""
else
if [[ "${port}" == "0" ]]; then
echo " script \"true\""
else
echo " script \"</dev/tcp/${serviceip}/${port}\""
fi
fi
echo " interval ${CHECK_INTERVAL_SECS}"
echo "}"
}
#
# Generate authentication information section.
#
# Example:
# generate_authentication_info
#
function generate_authentication_info() {
local creds=${1:-"R0ut3r"}
echo ""
echo " authentication {"
echo " auth_type PASS"
echo " auth_pass ${creds}"
echo " }"
}
#
# Generate track script section.
#
# Example:
# generate_track_script
#
function generate_track_script() {
echo ""
echo " track_script {"
echo " ${CHECK_SCRIPT_NAME}"
echo " }"
}
#
# Generate multicast + unicast options section based on the values of the
# MULTICAST_SOURCE_IPADDRESS, UNICAST_SOURCE_IPADDRESS and UNICAST_PEERS
# environment variables.
#
# Examples:
# generate_mucast_options
#
# UNICAST_SOURCE_IPADDRESS=10.1.1.1 UNICAST_PEERS="10.1.1.2,10.1.1.3" \
# generate_mucast_options
#
function generate_mucast_options() {
echo ""
if [[ -n "${MULTICAST_SOURCE_IPADDRESS}" ]]; then
echo " mcast_src_ip ${MULTICAST_SOURCE_IPADDRESS}"
fi
if [[ -n "${UNICAST_SOURCE_IPADDRESS}" ]]; then
echo " unicast_src_ip ${UNICAST_SOURCE_IPADDRESS}"
fi
if [[ -n "${UNICAST_PEERS}" ]]; then
echo ""
echo " unicast_peer {"
OLD_IFS=$IFS
IFS=","
for ip in ${UNICAST_PEERS}; do
echo " ${ip}"
done
IFS=$OLD_IFS
echo " }"
fi
}
#
# Generate virtual ip address section.
#
# Examples:
# generate_vip_section "10.245.2.3" "enp0s8"
#
# generate_vip_section "10.1.1.1 10.1.2.2" "enp0s8"
#
# generate_vip_section "10.42.42.42-45, 10.9.1.1"
#
function generate_vip_section() {
local interface ; interface=${2:-"$(get_network_device)"}
echo ""
echo " virtual_ipaddress {"
for ip in ${1}; do
echo " ${ip} dev ${interface}"
done
echo " }"
}
#
# Generate vrrpd instance configuration section.
#
# Examples:
# generate_vrrpd_instance_config arp 1 "10.1.2.3" enp0s8 "252" "master"
#
# generate_vrrpd_instance_config arp 1 "10.1.2.3" enp0s8 "3" "slave"
#
# generate_vrrpd_instance_config ipf-1 4 "10.1.2.3-4" enp0s8 "7"
#
function generate_vrrpd_instance_config() {
local servicename=$1
local iid=${2:-"1"}
local vips=$3
local interface=$4
local priority=${5:-"10"}
local instancetype=${6:-"slave"}
local vipname ; vipname=$(scrub "$1")
local initialstate=""
local preempt=${PREEMPTION:-"${DEFAULT_PREEMPTION_STRATEGY}"}
local vrrpidoffset=${HA_VRRP_ID_OFFSET:-0}
[ "${instancetype}" = "master" ] && initialstate="state MASTER"
local instance_name ; instance_name=$(generate_vrrp_instance_name "${servicename}" "${iid}")
local auth_section ; auth_section=$(generate_authentication_info "${servicename}")
local vip_section ; vip_section=$(generate_vip_section "${vips}" "${interface}")
# Emit instance
echo "
vrrp_instance ${instance_name} {
interface ${interface}
${initialstate}
virtual_router_id $((vrrpidoffset + iid))
priority ${priority}
${preempt}
${auth_section}
$(generate_track_script)
"
if [[ -n $HA_NOTIFY_SCRIPT ]]; then
echo " notify \"${HA_NOTIFY_SCRIPT}\""
fi
echo " $(generate_mucast_options)
${vip_section}
}
"
}
#
# Generate failover configuration.
#
# Examples:
# generate_failover_configuration
#
function generate_failover_config() {
local vips ; vips=$(expand_ip_ranges "${HA_VIPS}")
local vip_groups ; vip_groups="${HA_VIP_GROUPS}"
local interface ; interface=$(get_network_device "${NETWORK_INTERFACE}")
local ipaddr ; ipaddr=$(get_device_ip_address "${interface}")
local port="${HA_MONITOR_PORT//[^0-9]/}"
echo "! Configuration File for keepalived
$(generate_global_config "${HA_CONFIG_NAME}")
$(generate_script_config "${ipaddr}" "${port}")
"
local ipkey ; ipkey=$(echo "${ipaddr}" | cut -f 4 -d '.')
local ipslot=$((ipkey % 128))
local nodecount
if [[ "${HA_REPLICA_COUNT}" -gt 0 ]]; then
nodecount="${HA_REPLICA_COUNT}"
else
nodecount="1"
fi
local idx=$((ipslot % nodecount))
idx=$((idx + 1))
local counter=1
local previous="none"
local vip_counter=0
local total_vips=( $vips )
local vips_per_group=1
local vips_mod=0
if [[ $vip_groups -gt 0 ]]; then
vips_per_group=$((${#total_vips[@]} / vip_groups))
vips_mod=$((${#total_vips[@]} % vip_groups))
fi
while [[ "${vip_counter}" -lt "${#total_vips[@]}" ]]; do
local cur_vip_count=vips_per_group
if [[ ${vips_mod} -gt 0 ]]; then
((cur_vip_count++))
((vips_mod--))
fi
vip_group=("${total_vips[@]:vip_counter:cur_vip_count}")
vip_counter=$((vip_counter + cur_vip_count))
local offset=$((RANDOM % 32))
local priority=$((ipslot % 64 + offset))
local instancetype="slave"
local n=$((counter % idx))
if [[ ${n} -eq 0 ]]; then
instancetype="master"
if [[ "${previous}" == "master" ]]; then
# Inverse priority + reset, so that we can flip-flop priorities.
priority=$((ipslot + 1))
previous="flip-flop"
else
priority=$((255 - ipslot))
previous=${instancetype}
fi
fi
generate_vrrpd_instance_config "${HA_CONFIG_NAME}" "${counter}" "${vip_group[*]}" \
"${interface}" "${priority}" "${instancetype}"
((counter++))
done
}
| maxamillion/origin | images/ipfailover/keepalived/lib/config-generators.sh | Shell | apache-2.0 | 7,053 |
#!/bin/sh
# Verify behavior of env.
# Copyright (C) 2009-2016 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ env
# A simple shebang program to call "echo" from symlinks like "./-u" or "./--".
echo "#!$abs_top_builddir/src/echo simple_echo" > simple_echo \
|| framework_failure_
chmod a+x simple_echo || framework_failure_
# Verify we can run the shebang which is not the case if
# there are spaces in $abs_top_builddir.
./simple_echo || skip_ "Error running simple_echo script"
# Verify clearing the environment
a=1
export a
env - > out || fail=1
compare /dev/null out || fail=1
env -i > out || fail=1
compare /dev/null out || fail=1
env -u a -i -u a -- > out || fail=1
compare /dev/null out || fail=1
env -i -- a=b > out || fail=1
echo a=b > exp || framework_failure_
compare exp out || fail=1
# These tests verify exact status of internal failure.
env --- # unknown option
test $? = 125 || fail=1
env -u # missing option argument
test $? = 125 || fail=1
env sh -c 'exit 2' # exit status propagation
test $? = 2 || fail=2
env . # invalid command
test $? = 126 || fail=1
env no_such # no such command
test $? = 127 || fail=1
# POSIX is clear that environ may, but need not be, sorted.
# Environment variable values may contain newlines, which cannot be
# observed by merely inspecting output from env.
# Cygwin requires a minimal environment to launch new processes: execve
# adds missing variables SYSTEMROOT and WINDIR, which show up in a
# subsequent env. Cygwin also requires /bin to always be part of PATH,
# and attempts to unset or reduce PATH may cause execve to fail.
#
# For these reasons, it is more portable to grep that our desired changes
# took place, rather than comparing output of env over an entire environment.
if env | grep '^ENV_TEST' >/dev/null ; then
skip_ "environment has potential interference from ENV_TEST*"
fi
ENV_TEST1=a
export ENV_TEST1
>out || framework_failure_
env ENV_TEST2= > all || fail=1
grep '^ENV_TEST' all | LC_ALL=C sort >> out || framework_failure_
env -u ENV_TEST1 ENV_TEST3=c > all || fail=1
grep '^ENV_TEST' all | LC_ALL=C sort >> out || framework_failure_
env ENV_TEST1=b > all || fail=1
grep '^ENV_TEST' all | LC_ALL=C sort >> out || framework_failure_
env ENV_TEST2= env > all || fail=1
grep '^ENV_TEST' all | LC_ALL=C sort >> out || framework_failure_
env -u ENV_TEST1 ENV_TEST3=c env > all || fail=1
grep '^ENV_TEST' all | LC_ALL=C sort >> out || framework_failure_
env ENV_TEST1=b env > all || fail=1
grep '^ENV_TEST' all | LC_ALL=C sort >> out || framework_failure_
cat <<EOF >exp || framework_failure_
ENV_TEST1=a
ENV_TEST2=
ENV_TEST3=c
ENV_TEST1=b
ENV_TEST1=a
ENV_TEST2=
ENV_TEST3=c
ENV_TEST1=b
EOF
compare exp out || fail=1
# PATH modifications affect exec.
mkdir unlikely_name || framework_failure_
cat <<EOF > unlikely_name/also_unlikely || framework_failure_
#!/bin/sh
echo pass
EOF
chmod +x unlikely_name/also_unlikely || framework_failure_
returns_ 127 env also_unlikely || fail=1
test x$(PATH=$PATH:unlikely_name env also_unlikely) = xpass || fail=1
test x$(env PATH="$PATH":unlikely_name also_unlikely) = xpass || fail=1
# Explicitly put . on the PATH for the rest of this test.
PATH=$PATH:
export PATH
# Use -- to end options (but not variable assignments).
# On some systems, execve("-i") invokes a shebang script ./-i on PATH as
# '/bin/sh -i', rather than '/bin/sh -- -i', which doesn't do what we want.
# Avoid the issue by using a shebang to 'echo' passing a second parameter
# before the '-i'. See the definition of simple_echo before.
# Test -u, rather than -i, to minimize PATH problems.
ln -s "simple_echo" ./-u || framework_failure_
case $(env -u echo echo good) in
good) ;;
*) fail=1 ;;
esac
case $(env -u echo -- echo good) in
good) ;;
*) fail=1 ;;
esac
case $(env -- -u pass) in
*pass) ;;
*) fail=1 ;;
esac
# After options have ended, the first argument not containing = is a program.
env a=b -- true
test $? = 127 || fail=1
ln -s "simple_echo" ./-- || framework_failure_
case $(env a=b -- true || echo fail) in
*true) ;;
*) fail=1 ;;
esac
# No way to directly invoke program name containing =.
cat <<EOF >./c=d || framework_failure_
#!/bin/sh
echo pass
EOF
chmod +x c=d || framework_failure_
test "x$(env c=d echo fail)" = xfail || fail=1
test "x$(env -- c=d echo fail)" = xfail || fail=1
test "x$(env ./c=d echo fail)" = xfail || fail=1
test "x$(env sh -c 'exec "$@"' sh c=d echo fail)" = xpass || fail=1
test "x$(sh -c '\c=d echo fail')" = xpass && #dash 0.5.4 fails so check first
{ test "x$(env sh -c '\c=d echo fail')" = xpass || fail=1; }
# catch unsetenv failure, broken through coreutils 8.0
returns_ 125 env -u a=b true || fail=1
returns_ 125 env -u '' true || fail=1
Exit $fail
| yuxuanchen1997/coreutils | tests/misc/env.sh | Shell | gpl-3.0 | 5,385 |
#!/bin/bash
[ -r ../shell_helper.sh ] || { echo "Cannot source shell_helper.sh"; exit -1; }
source ../shell_helper.sh
python this_test/run_test.py
die 0
| islog/leosac | test_helper/test-wiegand-pin/run_test.sh | Shell | agpl-3.0 | 156 |
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Called with following variables set:
# - CORE_PATH is absolute path to @apache-mynewt-core
# - BSP_PATH is absolute path to hw/bsp/bsp_name
# - BIN_BASENAME is the path to prefix to target binary,
# .elf appended to name is the ELF file
# - FEATURES holds the target features string
# - EXTRA_JTAG_CMD holds extra parameters to pass to jtag software
# - RESET set if target should be reset when attaching
# - NO_GDB set if we should not start gdb to debug
#
. $CORE_PATH/hw/scripts/openocd.sh
FILE_NAME=$BIN_BASENAME.elf
CFG="-f board/stm32f429discovery.cfg"
# Exit openocd when gdb detaches.
EXTRA_JTAG_CMD="$EXTRA_JTAG_CMD; stm32f4x.cpu configure -event gdb-detach {if {[stm32f4x.cpu curstate] eq \"halted\"} resume;shutdown}"
openocd_debug
| IMGJulian/incubator-mynewt-core | hw/bsp/stm32f429discovery/stm32f429discovery_debug.sh | Shell | apache-2.0 | 1,555 |
#!/bin/bash
hbase shell << EOF
disable 'hbvoter'
drop 'hbvoter'
create 'hbvoter', 'onecf', 'twocf', 'threecf', 'fourcf', {SPLITS => ['1','2','3','4','5','6','7','8','9']}
exit
EOF
| vmarkman/drill-test-framework | framework/resources/Datasources/hive_storage/hbase/createHbaseTable.sh | Shell | apache-2.0 | 181 |
#!/bin/bash
# $Id$
base=`dirname $0`/..
cd $base
VERSION=`grep 'VERSION =' setup.py | cut -d "'" -f2`
# Source dists
python setup.py sdist --formats=gztar,zip
# Eggs
python2.4 setup.py bdist_egg --exclude-source-files
python2.5 setup.py bdist_egg --exclude-source-files
python2.6 setup.py bdist_egg --exclude-source-files
# Build docs archive
python setup.py sdist --manifest-only
rm dist/pyglet-$VERSION-docs.zip
grep -v ^pyglet MANIFEST | zip dist/pyglet-$VERSION-docs.zip -@
| seeminglee/pyglet64 | tools/gendist.sh | Shell | bsd-3-clause | 483 |
#!/bin/bash
# test many concurrent tcp connections
# addd 2016-03-28 by RGerhards, released under ASL 2.0
. $srcdir/diag.sh init
. $srcdir/diag.sh generate-conf
. $srcdir/diag.sh add-conf '
module(load="../plugins/imtcp/.libs/imtcp")
input(type="imtcp" port="13514")
template(name="outfmt" type="string" string="%msg:9:16:lowercase%\n")
:msg, contains, "msgnum:" action(type="omfile" template="outfmt"
file="rsyslog.out.log")
'
. $srcdir/diag.sh startup
. $srcdir/diag.sh tcpflood -m9
. $srcdir/diag.sh shutdown-when-empty
. $srcdir/diag.sh wait-shutdown
. $srcdir/diag.sh seq-check 0 8
. $srcdir/diag.sh exit
| sematext/rsyslog | tests/template-pos-from-to-lowercase.sh | Shell | gpl-3.0 | 624 |
#!/bin/bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
(set -o igncr) 2>/dev/null && set -o igncr; # comment is needed
##
# Set any variables that my be needed higher up the chain
##
export shell_extension=
##
# Bring in the BRANCH environment variables
##
. ../all/environment.sh
export platform=linux
export CXX=arm-none-linux-gnueabi-g++
export CC=arm-none-linux-gnueabi-gcc
export AR=arm-none-linux-gnueabi-ar
export LD=arm-none-linux-gnueabi-ld
export shell_release=avmshell_neon_arm
export shell_release_debugger=avmshell_neon_arm
export shell_debug=avmshell_neon_arm_d
export shell_debug_debugger=avmshell_neon_arm_d
export shell_selftest=avmshell_neon_arm
export ssh_proc_names="avmshell_neon_arm avmshell_neon_arm_d"
# Override this, default is avmshell* and since this slave runs on a machine
# with other slaves, the process cleaner /can/ find avmshell processes,
# BUT they will NEVER belong to this slave since the shell is run on a device
# not on the host machine. Reset this to something that will never be found/killed
export proc_names="fake_never_find_me"
#export PYTHON_RUNTESTS=python3
export threads=2
export SSH_SHELL_REMOTE_HOST0=asteambeagle5
export SSH_SHELL_REMOTE_USER0=build
export SSH_SHELL_REMOTE_BASEDIR0=/home/build
export SSH_SHELL_REMOTE_DIR0=/home/build/app1
export SSH_SHELL_REMOTE_HOST1=asteambeagle5
export SSH_SHELL_REMOTE_USER1=build
export SSH_SHELL_REMOTE_BASEDIR1=/home/build
export SSH_SHELL_REMOTE_DIR1=/home/build/app2
| adobe-flash/avmplus | build/buildbot/slaves/linux-arm-ssh/scripts/environment.sh | Shell | mpl-2.0 | 1,636 |
#!/bin/bash
RUN='123456'
PERIOD='LHC10a'
PASS='pass1'
YEAR='2010'
ALICE_ITS='$ALICE_ROOT/ITS'
TMPPLACE='/tmp'
TMPFOLDER='1'
EXECFOLDER='$HOME/macroQAshifter'
MAXFILES='300'
echo "Run Number :[${RUN}]"
read
if [ "$REPLY" != "" ]; then
RUN=$REPLY
echo "Run $RUN"
fi
echo "Period :[${PERIOD}]"
read
if [ "$REPLY" != "" ]; then
PERIOD=$REPLY
echo "Period $PERIOD"
fi
echo "Pass : [${PASS}]"
read
if [ "$REPLY" != "" ]; then
PASS=$REPLY
echo "Pass $PASS "
fi
echo "Year :[${YEAR}]"
read
if [ "$REPLY" != "" ]; then
YEAR=$REPLY
echo "Year $YEAR"
fi
echo "folder with macros :[${EXECFOLDER}]"
read
if [ "$REPLY" != "" ]; then
EXECFOLDER=$REPLY
echo "Folder: $EXECFOLDER"
fi
echo "local or lxplus (1=local 2=lxplus) :[${TMPFOLDER}]"
read
if [ "$REPLY" != "" ]; then
TMPFOLDER=$REPLY
fi
if [ "$TMPFOLDER" == "1" ]; then
TMPPLACE='/tmp'
else
TMPPLACE='/tmp/$USERNAME'
fi
GOOD=130
echo "Max number of files (Insert a number >0) :[${MAXFILES}]"
read
#case $REPLY in
# *[a-zA-Z]*|*[!0-9]*|*[-]*)
# echo "Wrong: it is not a number > 0";;
# *[0-9]*)
# MAXFILES=$REPLY
# echo "Max number of files: $MAXFILES"
# export GOOD=0
# ;;
#esac
if [ "$REPLY" != "" ]; then
MAXFILES=$REPLY
echo "Max number of files: $MAXFILES"
fi
if [ ls -l "run$RUN" >/dev/null 2>&1 ]; then
echo "directory run$RUN exists "
else
mkdir "run$RUN"
fi
cd "run$RUN"
if [ ls -l $PASS > /dev/null 2>&1 ]; then
echo "directory $PASS exixsts"
else
mkdir $PASS
cd $PASS
fi
time aliroot -l <<EOI|tee merge$RUN.log
EOF
.x $EXECFOLDER/ReadQASDD.C($RUN,$YEAR,"${PERIOD}","${PASS}",$MAXFILES);
.q
EOI
time aliroot -l <<EOI|tee plot$RUN.log
.x $EXECFOLDER/PlotQASDD.C("File.QA.${YEAR}.${PERIOD}.${PASS}.Run.${RUN}.root");
.q
EOI
rm File.QA.${YEAR}.${PERIOD}.${PASS}.Run.${RUN}.root
if [ls -l "images" >/dev/null 2>&1 ]; then
echo "directory images exists"
else
mkdir images
fi
mv *.ps images/.
cd images
for i in *.ps;
do
gv $i &
sleep 2
done
echo "Plots Done!!"
rm -rf $TMPPLACE/*.root
cd ../../../
| ecalvovi/AliRoot | ITS/macrosSDD/ShowSDDQA.sh | Shell | bsd-3-clause | 2,058 |
#!/bin/sh
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "rsync -rp ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -rp "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*)
echo "cp -R ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
cp -R "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
;;
esac
}
| chriscsc/motion-plot | examples/vendor/Pods/Pods-resources.sh | Shell | mit | 1,429 |
#!/bin/sh
if [ -f Makefile ]; then
echo "Making make distclean..."
make distclean
fi
echo "Removing autogenned files..."
rm -f config.guess config.sub configure install-sh missing mkinstalldirs Makefile.in ltmain.sh stamp-h.in */Makefile.in ltconfig stamp-h config.h.in
echo "Done."
| xzwang/fem | tslib/autogen-clean.sh | Shell | gpl-2.0 | 285 |
__cont_source_scripts()
{
local i
local dir="$1"
for i in "$dir"/*.sh; do
if test -r "$i"; then
. "$i"
fi
done
}
# CONT_SOURCE_HOOKS HOOKDIR [PROJECT]
# -----------------------------------
# Source '*.sh' files from the following directories (in this order):
# a. /usr/share/cont-layer/PROJECT/HOOK/
# b. /usr/share/cont-volume/PROJECT/HOOK/
#
# The PROJECT argument is optional because it may be set globally by
# $CONT_PROJECT environment variable. The need for PROJECT argument is
# basically to push people to install script into theirs own directories,
# which will allow easier multi-project containers maintenance.
cont_source_hooks()
{
local i dir
local hook="$1"
local project="$CONT_PROJECT"
local dir
test -z "$hook" && return
test -n "$2" && project="$2"
for dir in /usr/share/cont-layer /usr/share/cont-volume; do
dir="$dir/$project/$hook"
cont_debug2 "loading scripts from $dir"
__cont_source_scripts "$dir"
done
}
__cont_msg()
{
echo "$*" >&2
}
__cont_dbg()
{
test -z "$CONT_DEBUG" && CONT_DEBUG=0
test "$CONT_DEBUG" -lt "$1" && return
local lvl="$1"
shift
__cont_msg "debug_$lvl: $*"
}
cont_info() { __cont_msg " * $*" ; }
cont_warn() { __cont_msg "warn: $*" ; }
cont_error() { __cont_msg "error: $*"; }
cont_fatal() { __cont_msg "fatal: $*"; exit 1; }
cont_debug() { __cont_dbg 1 "$*" ; }
cont_debug2() { __cont_dbg 2 "$*" ; }
cont_debug3() { __cont_dbg 3 "$*" ; }
__cont_encode_env()
{
local i
for i in $1
do
eval local val="\$$i"
printf ": \${%s=%q}\n" "$i" "$val"
done
}
# CONT_STORE_ENV VARIABLES FILENAME
# ---------------------------------
# Create source-able script conditionally setting specified VARIABLES by
# inheritting the values from current environment; Create the file on path
# FILENAME. Already existing variables will not be changed by sourcing the
# resulting script. The argument VARIABLES expects list of space separated
# variable names.
#
# Usage:
# $ my_var=my_value
# $ my_var2="my value2"
# $ cont_store_env "my_var my_var2" ~/.my-environment
# $ cat ~/.my-environment
# : ${my_var=my_value}
# : ${my_var2=my\ value2}
cont_store_env()
{
cont_debug "creating env file '$2'"
__cont_encode_env "$1" > "$2" \
|| cont_warn "can't store environment $1 into $2 file"
}
__cont_source_scripts "/usr/share/cont-lib/autoload"
| docent-net/Fedora-Dockerfiles | postgresql/root/usr/share/cont-lib/cont-lib.sh | Shell | gpl-2.0 | 2,473 |
if [ $# -lt 1 ]; then
echo "Usage: $0 CRL_path"
exit 1
fi
ln -s $1 `openssl crl -hash -noout -in $1`.r0
| mhrivnak/pulp | playpen/certs/create_crl_sym_link.sh | Shell | gpl-2.0 | 113 |
#!/bin/bash
#
# Checks to make sure SSLv3 is not allowed by a server.
#
THRIFTHOST=localhost
THRIFTPORT=9090
while [[ $# -ge 1 ]]; do
arg="$1"
argIN=(${arg//=/ })
case ${argIN[0]} in
-h|--host)
THRIFTHOST=${argIN[1]}
shift # past argument
;;
-p|--port)
THRIFTPORT=${argIN[1]}
shift # past argument
;;
*)
# unknown option ignored
;;
esac
shift # past argument or value
done
function nosslv3
{
local nego
local negodenied
local opensslv
opensslv=$(openssl version | cut -d' ' -f2)
if [[ $opensslv > "1.0" ]]; then
echo "[pass] OpenSSL 1.1 or later - no need to check ssl3"
return 0
fi
# echo "openssl s_client -connect $THRIFTHOST:$THRIFTPORT -CAfile ../keys/CA.pem -ssl3 2>&1 < /dev/null"
nego=$(openssl s_client -connect $THRIFTHOST:$THRIFTPORT -CAfile ../keys/CA.pem -ssl3 2>&1 < /dev/null)
negodenied=$?
if [[ $negodenied -ne 0 ]]; then
echo "[pass] SSLv3 negotiation disabled"
echo $nego
return 0
fi
echo "[fail] SSLv3 negotiation enabled! stdout:"
echo $nego
return 1
}
nosslv3
exit $?
| yuewko/themis | vendor/github.com/apache/thrift/test/features/nosslv3.sh | Shell | apache-2.0 | 1,112 |
#!/bin/bash
mkdir -p $PREFIX/bin
for i in transIndel_*.py; do
sed -i.bak '1s|^|#!/usr/bin/env python\'$'\n|g' ${i};
chmod 755 ${i};
cp ${i} $PREFIX/bin/;
done
| cokelaer/bioconda-recipes | recipes/transindel/build.sh | Shell | mit | 167 |
#!/bin/sh
SPOTYPING_DIR=${PREFIX}/share/SpoTyping
mkdir -p $SPOTYPING_DIR
cp -r SpoTyping-v${PKG_VERSION}-commandLine/* $SPOTYPING_DIR
mkdir -p ${PREFIX}/bin
PLOT_CMD=${PREFIX}/bin/SpoTyping_plot.r
echo '#!/usr/bin/env Rscript' > $PLOT_CMD
cat SpoTyping-v${PKG_VERSION}-commandLine/SpoTyping_plot.r >> $PLOT_CMD
chmod a+x $PLOT_CMD
MAIN_CMD=${PREFIX}/bin/SpoTyping.py
echo '#!/usr/bin/env python' >$MAIN_CMD
cat SpoTyping-v${PKG_VERSION}-commandLine/SpoTyping.py >> $MAIN_CMD
chmod a+x $MAIN_CMD
| cokelaer/bioconda-recipes | recipes/spotyping/build.sh | Shell | mit | 499 |
#!/bin/bash
CSHOST="localhost"
CSPORT="988"
USR="user"
PWD="passwd"
NETCAT="nc"
DELAY=5
get_geo()
{
eval "`echo "$2" | sed -e \"s/^.*${1} /${1}=/g\" -e 's/;.*$//g'`"
}
do_init()
{
clear
sline="`stty -a 2>/dev/null | grep rows 2>/dev/null`"
get_geo rows "$sline"
get_geo columns "$sline"
[ "$rows" -eq 0 ] && rows=25
[ "$columns" -eq 0 ] && columns=80
rows=`expr $rows - 1`
export rows columns
tput init 2>/dev/null
TI_ED="`tput ed 2>/dev/null`"
TI_SC="`tput sc 2>/dev/null`"
TI_RC="`tput rc 2>/dev/null`"
TI_B0="`tput setb 0 2>/dev/null`"
TI_B1="`tput setb 5 2>/dev/null`"
TI_B2="`tput setb 1 2>/dev/null`"
TI_IL="`tput il1 2>/dev/null`"
TI_DL="`tput dl1 1 2>/dev/null`"
TI_EL="`tput el 2>/dev/null`"
export TI_ED TI_B0 TI_B1 TI_B2 TI_IL TI_DL TI_SC TI_RC TI_EL
}
monitor()
{
$NETCAT -u $CSHOST $CSPORT | awk -W interactive -F"|" '
BEGIN{
line="---------------------------------------------------------------------";
nuser=0;
tabsize=(ENVIRON["columns"]-length(line))/2;
tab=sprintf("%-*.*s", tabsize, tabsize, "");
rows=ENVIRON["rows"];
il=ENVIRON["TI_IL"];
dl=ENVIRON["TI_DL"];
sc=ENVIRON["TI_SC"];
rc=ENVIRON["TI_RC"];
b0=ENVIRON["TI_B0"];
b1=ENVIRON["TI_B1"];
b2=ENVIRON["TI_B2"];
ed=ENVIRON["TI_ED"];
el=ENVIRON["TI_EL"];
csr(0, rows);
printf("\n%s%s\n", b2, ed);
print(tab "Nr User A C Modus Online Sender");
print(tab line);
csr(5+nuser, rows);
cup(5+nuser, 0);
printf("%s%s", b0, ed);
cup(rows, 0);
}
function csr(row1, row2)
{
system("tput csr "row1" "row2);
}
function cup(crow, ccol)
{
system("tput cup "crow" "ccol);
}
/^\[IB....\]/{
nuser=0;
}
/^\[I.....\]/{
if (($2!="c") && ($2!="m"))
next;
printf("%s", sc);
cup(4+nuser, 0);
ot=$12/60;
otm=ot%60; ot/=60;
oth=ot%24; ot/=24;
if (ot<1)
ots=sprintf("%d:%02dh", oth, otm);
else
ots=sprintf("%dt %dh", ot, oth);
austate=0+$5;
if (austate<0) austate=-austate;
printf("%s%s%s%2d %-12.12s%d %d %-10.10s %8.8s %s\n", b2, el,
tab, $3, $4, austate, $6, $9, ots, $14);
printf("%s", el);
nuser++;
csr(5+nuser, rows);
printf("%s%s", rc, b0);
next;
}
/^\[LOG...\]/{
printf("%s%s\n", substr($0, 20, 8), substr($0, 35));
next;
}
{
next;
}'
}
do_exit()
{
trap - 1 2 15
tput csr 0 $rows 2>/dev/null
tput sgr0 2>/dev/null
clear
exit 0
}
do_init
trap do_exit 1 2 15
[ -n "$1" ] && CSHOST="$1"
[ -n "$2" ] && CSPORT="$2"
while true
do
(
while true
do
echo "login $USR $PWD"
sleep 1
echo "log on"
sleep 1
echo "status"
sleep $DELAY
done
) | monitor
done
| bas-t/oscam | Distribution/monitor/mpcsmon.sh | Shell | gpl-3.0 | 2,895 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ pd.hg.focus
| joachimwolff/bioconda-recipes | recipes/bioconductor-pd.hg.focus/pre-unlink.sh | Shell | mit | 58 |
echo 'Username?'
read username
echo 'Password?'
read -s password # -s flag hides password text
echo 'Repo name?'
read reponame
echo -e "The username ${username}\n\n"
echo -e "The password ${password}\n\n"
echo -e "The reponame ${reponame}\n\n"
curl --user $username:$password https://api.bitbucket.org/1.0/repositories/ --data name=$reponame --data is_private='true'
git remote add origin [email protected]:$username/$reponame.git
git push -u origin --all
git push -u origin --tags | lightblueseas/linuxstuff | src/main/resources/git/impgitrep_iterate_interactive.sh | Shell | mit | 498 |
sudo cp raspberry_garden.conf /etc/init.d/raspberry_garden
sudo chmod a+x /etc/init.d/raspberry_garden
sudo update-rc.d raspberry_garden defaults
| plokk/raspberry-garden | install.sh | Shell | mit | 147 |
#!/bin/sh
export HOSTED_ZONE_NAME=$(cat $(pwd)/config/main.json | jq -r ".hosted_zone_name")
export ENVIRONMENT=$(cat $(pwd)/config/main.json | jq -r ".environment")
export COLOUR=$(cat $(pwd)/config/main.json | jq -r ".colour")
export ENVIRONMENT_SECRETS_PATH=$(pwd)/secrets/environments/${ENVIRONMENT}/${COLOUR}
export MANAGER_A=${ENVIRONMENT}-${COLOUR}-swarm-manager-a.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${MANAGER_A}:2376
export DOCKER_TLS=1
export DOCKER_CERT_PATH=${ENVIRONMENT_SECRETS_PATH}/swarm
docker swarm init --advertise-addr $(host ${ENVIRONMENT}-${COLOUR}-swarm-manager-a.${HOSTED_ZONE_NAME} | grep -m1 " has address " | awk '{ print $4 }')
export MANAGER_TOKEN=$(docker swarm join-token manager | grep "docker swarm join" | awk '{ print $5 }')
export WORKER_TOKEN=$(docker swarm join-token worker | grep "docker swarm join" | awk '{ print $5 }')
export MANAGER_B=${ENVIRONMENT}-${COLOUR}-swarm-manager-b.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${MANAGER_B}:2376
docker swarm join --token $MANAGER_TOKEN ${MANAGER_A}:2377
export MANAGER_C=${ENVIRONMENT}-${COLOUR}-swarm-manager-c.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${MANAGER_C}:2376
docker swarm join --token $MANAGER_TOKEN ${MANAGER_A}:2377
export WORKER_A=${ENVIRONMENT}-${COLOUR}-swarm-worker-int-a.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${WORKER_A}:2376
docker swarm join --token $WORKER_TOKEN ${MANAGER_A}:2377
export WORKER_B=${ENVIRONMENT}-${COLOUR}-swarm-worker-int-b.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${WORKER_B}:2376
docker swarm join --token $WORKER_TOKEN ${MANAGER_A}:2377
export WORKER_C=${ENVIRONMENT}-${COLOUR}-swarm-worker-int-c.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${WORKER_C}:2376
docker swarm join --token $WORKER_TOKEN ${MANAGER_A}:2377
export WORKER_A=${ENVIRONMENT}-${COLOUR}-swarm-worker-ext-a.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${WORKER_A}:2376
docker swarm join --token $WORKER_TOKEN ${MANAGER_A}:2377
export WORKER_B=${ENVIRONMENT}-${COLOUR}-swarm-worker-ext-b.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${WORKER_B}:2376
docker swarm join --token $WORKER_TOKEN ${MANAGER_A}:2377
export WORKER_C=${ENVIRONMENT}-${COLOUR}-swarm-worker-ext-c.${HOSTED_ZONE_NAME}
export DOCKER_HOST=tcp://${WORKER_C}:2376
docker swarm join --token $WORKER_TOKEN ${MANAGER_A}:2377
| nextbreakpoint/infrastructure-as-code | swarm_join.sh | Shell | mit | 2,322 |
#!/usr/bin/env bash
#
# This script is executed inside the builder image
export LC_ALL=C.UTF-8
set -e
PASS_ARGS="$*"
source ./ci/dash/matrix.sh
if [ "$RUN_INTEGRATIONTESTS" != "true" ]; then
echo "Skipping integration tests"
exit 0
fi
export LD_LIBRARY_PATH=$BUILD_DIR/depends/$HOST/lib
cd build-ci/dashcore-$BUILD_TARGET
if [ "$SOCKETEVENTS" = "" ]; then
# Let's switch socketevents mode to some random mode
R=$((RANDOM%3))
if [ "$R" == "0" ]; then
SOCKETEVENTS="select"
elif [ "$R" == "1" ]; then
SOCKETEVENTS="poll"
else
SOCKETEVENTS="epoll"
fi
fi
echo "Using socketevents mode: $SOCKETEVENTS"
EXTRA_ARGS="--dashd-arg=-socketevents=$SOCKETEVENTS"
set +e
./test/functional/test_runner.py --ci --combinedlogslen=4000 ${TEST_RUNNER_EXTRA} --failfast --nocleanup --tmpdir=$(pwd)/testdatadirs $PASS_ARGS $EXTRA_ARGS
RESULT=$?
set -e
echo "Collecting logs..."
BASEDIR=$(ls testdatadirs)
if [ "$BASEDIR" != "" ]; then
mkdir testlogs
TESTDATADIRS=$(ls testdatadirs/$BASEDIR)
for d in $TESTDATADIRS; do
[[ "$d" ]] || break # found nothing
[[ "$d" != "cache" ]] || continue # skip cache dir
mkdir testlogs/$d
PYTHONIOENCODING=UTF-8 ./test/functional/combine_logs.py -c ./testdatadirs/$BASEDIR/$d > ./testlogs/$d/combined.log
PYTHONIOENCODING=UTF-8 ./test/functional/combine_logs.py --html ./testdatadirs/$BASEDIR/$d > ./testlogs/$d/combined.html
cd testdatadirs/$BASEDIR/$d
LOGFILES="$(find . -name 'debug.log' -or -name "test_framework.log")"
cd ../../..
for f in $LOGFILES; do
d2="testlogs/$d/$(dirname $f)"
mkdir -p $d2
cp testdatadirs/$BASEDIR/$d/$f $d2/
done
done
fi
mv testlogs ../../
exit $RESULT
| thelazier/dash | ci/dash/test_integrationtests.sh | Shell | mit | 1,703 |
#!/bin/bash
if test $(which brew)
then
echo "Installing vscode..."
brew install --cask visual-studio-code
echo "Installing vscode extensions..."
# Run `code --list-extensions` to check current extensions
declare -a CODE_EXTENSIONS=(
TsumiNa.Seti-theme
donjayamanne.githistory
esbenp.prettier-vscode
GitHub.vscode-pull-request-github
jpoissonnier.vscode-styled-components
ms-python.python
ms-vscode.atom-keybindings
qinjia.seti-icons
sensourceinc.vscode-sql-beautify
vscodevim.vim
)
for ext in ${CODE_EXTENSIONS[@]}; do
code --install-extension $ext
done
fi
exit 0
| kmctown/dotfiles | vscode/install.sh | Shell | mit | 654 |
#!/bin/bash
DB_USER="patrickbalestra"
DB_NAME="patrickbalestra"
DB_PORT=5433
set -e
psql -U "$DB_USER" -d "$DB_NAME" -p $DB_PORT -a -f seed_categories.sql || (echo "🚨 Failed seed categories"; exit 1;)
psql -U "$DB_USER" -d "$DB_NAME" -p $DB_PORT -a -f seed_manufacturers.sql || (echo "🚨 Failed seed manufacturers"; exit 1;)
psql -U "$DB_USER" -d "$DB_NAME" -p $DB_PORT -a -f seed_bridges.sql || (echo "🚨 Failed seed bridges"; exit 1;)
psql -U "$DB_USER" -d "$DB_NAME" -p $DB_PORT -a -f seed_lights.sql || (echo "🚨 Failed seed lights"; exit 1;)
psql -U "$DB_USER" -d "$DB_NAME" -p $DB_PORT -a -f seed_outlets.sql || (echo "🚨 Failed seed outlets"; exit 1;)
psql -U "$DB_USER" -d "$DB_NAME" -p $DB_PORT -a -f seed_regions.sql || (echo "🚨 Failed seed regions"; exit 1;)
psql -U "$DB_USER" -d "$DB_NAME" -p $DB_PORT -a -f seed_accessories_count.sql || (echo "🚨 Failed seed accessories count"; exit 1;)
echo "✅ Seed completed ✅"
| BalestraPatrick/HomeKitty | Scripts/seed.sh | Shell | mit | 958 |
#!/bin/sh
echo "Purging old build...."
rm -rf dist
rm -rf build
echo "Creating new app...."
python setup.py py2app
cp HVR* dist/gORAnalysis.app/Contents/Resources/.
echo "Done!"
| BioSeq/Genetics-Of-Race-Mac-App | compile.sh | Shell | mit | 181 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3261-1
#
# Security announcement date: 2015-05-15 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:23 UTC
#
# Operating System: Debian 8 (Jessie)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libmodule-signature-perl:0.73-1+deb8u1
#
# Last versions recommanded by security team:
# - libmodule-signature-perl:0.73-1+deb8u2
#
# CVE List:
# - CVE-2015-3406
# - CVE-2015-3407
# - CVE-2015-3408
# - CVE-2015-3409
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libmodule-signature-perl=0.73-1+deb8u2 -y
| Cyberwatch/cbw-security-fixes | Debian_8_(Jessie)/x86_64/2015/DSA-3261-1.sh | Shell | mit | 722 |
#!/bin/bash
set -e
UDIR=${Q_SRC_ROOT}/UTILS/lua
test -d $UDIR
rm -rf ../gen_inc/; mkdir -p ../gen_inc/
rm -rf ../gen_src; mkdir -p ../gen_src
lua $UDIR/cli_extract_func_decl.lua mmap.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua is_valid_chars_for_num.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua f_mmap.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua f_munmap.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua bytes_to_bits.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua bits_to_bytes.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua get_bit.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua set_bit.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua clear_bit.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua copy_bits.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua write_bits_to_file.c ../gen_inc
lua $UDIR/cli_extract_func_decl.lua get_bits_from_array.c ../gen_inc
lua bin_search_generator.lua
#--------
# TODO: Improve below
rm -f _x
echo "mmap.c " >> _x
echo "is_valid_chars_for_num.c " >> _x
echo "f_mmap.c " >> _x
echo "f_munmap.c " >> _x
echo "get_bit.c " >> _x
echo "set_bit.c " >> _x
echo "clear_bit.c " >> _x
echo "copy_bits.c " >> _x
echo "bytes_to_bits.c " >> _x
echo "bits_to_bytes.c " >> _x
echo "write_bits_to_file.c " >> _x
echo "get_bits_from_array.c " >> _x
#-------------------
while read line; do
echo $line
gcc -c $line $QC_FLAGS -I../gen_inc -I../inc/
done< _x
#-------------------
cd ../gen_src/
ls *.c > _x
while read line; do
echo $line
gcc -c $line $QC_FLAGS -I../gen_inc -I../inc/
done< _x
gcc $Q_LINK_FLAGS ../gen_src/*.o ../src/*.o -o libutils.so
cp libutils.so $Q_ROOT/lib/
cd -
#-------------------
echo "Completed $0 in $PWD"
| NerdWalletOSS/Q | UTILS/src/DEPRECATED/gen_files.sh | Shell | mit | 1,680 |
#!/usr/bin/env bash
# ensure that we have the server software
if [ ! -f /data/minecraft_server.jar ]; then
echo "Downloading minecraft server ..."
curl --fail --silent "https://s3.amazonaws.com/Minecraft.Download/versions/1.8.1/minecraft_server.1.8.1.jar" -o /data/minecraft_server.jar
fi
# accept eula
if [ ! -f /data/eula.txt ]; then
echo "Adding eula ..."
echo "eula=true" > /data/eula.txt;
fi
cd /data; java -Xmx2G -jar minecraft_server.jar nogui
| monsendag/minecraft-server | server.sh | Shell | mit | 463 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2013:1476
#
# Security announcement date: 2013-10-30 04:43:50 UTC
# Script generation date: 2017-01-01 21:10:49 UTC
#
# Operating System: CentOS 6
# Architecture: i386
#
# Vulnerable packages fix on version:
# - xulrunner.i686:17.0.10-1.el6.centos
# - xulrunner-devel.i686:17.0.10-1.el6.centos
#
# Last versions recommanded by security team:
# - xulrunner.i686:17.0.10-1.el6.centos
# - xulrunner-devel.i686:17.0.10-1.el6.centos
#
# CVE List:
# - CVE-2013-5590
# - CVE-2013-5595
# - CVE-2013-5597
# - CVE-2013-5599
# - CVE-2013-5600
# - CVE-2013-5601
# - CVE-2013-5602
# - CVE-2013-5604
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install xulrunner.i686-17.0.10 -y
sudo yum install xulrunner-devel.i686-17.0.10 -y
| Cyberwatch/cbw-security-fixes | CentOS_6/i386/2013/CESA-2013:1476.sh | Shell | mit | 901 |
#!/usr/bin/env bash
set -e
if [[ -z ${1} ]]; then
echo "usage: scripts/get_swagger_json.sh <version>"
exit 1
fi
curl -f "http://kubernetes.io/swagger-spec/api/${1}/" -o resources/swagger/${1}.json
| yanatan16/clj-kubernetes-api | scripts/get_swagger_json.sh | Shell | mit | 207 |
mkdir -p build
#添加-fpic 这个compiler flag,告诉编译器,
#生成的对象模块采用浮动的(可重定位的)地址。缩微词 pic 代表“位置无关代码”(position independent code)
g++ -c say_util.cpp -fpic -o build/say_util.o
g++ -c say_hello_func.cpp -fpic -o build/say_hello_func.o
g++ -shared build/say_util.o build/say_hello_func.o -o build/libsay.so
rm build/*.o
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./build/
g++ say_hello_main.cpp build/libsay.so -o build/say_hello_main_with_so
| YcheLanguageStudio/CPP11Study | Guidance/GccUsage/build_say_hello_with_shared_object.sh | Shell | mit | 524 |
#!/bin/bash
# SysScout - A simple menu driven shell script to get information about your Linux-based System.
# Author: Josh Brunty
# Email: josh [dot] brunty [at] marshall [dot] edu
# Twitter: @joshbrunty @MUDigForensics
# Version 1.0.3
# Updated 31October2016
# v.1.0.3 fix- fixed minor issue with date output (line 78)
# https://github.com/joshbrunty/SysScout
# Define variables
# Display pause prompt
# Displays $Message upon enter key read
function pause(){
local message="$@"
[ -z $message ] && message="Press [Enter] key to continue..."
read -p "$message" readEnterKey
}
# Display the startup menu on screen
function show_menu(){
echo "-------------------------------------------"
echo "
_____ _____ _
/ ____| / ____| | |
| (___ _ _ ___ | (___ ___ ___ _ _ | |_
\___ \ | | | |/ __| \___ \ / __|/ _ \ | | | || __|
____) || |_| |\__ \ ____) || (__| (_) || |_| || |_
|_____/ \__, ||___/|_____/ \___|\___/ \__,_| \__|
__/ |v.1.0.3
|___/ "
echo "-------------------------------------------"
echo "A Network Forensics/Incident Response Tool"
echo "By: Josh Brunty: josh [dot] brunty [at] marshall [dot] edu"
echo "-------------------------------------------"
echo "Current Local Machine Date & Time : $(date)"
echo "---------------------------"
echo " Main Menu"
echo "---------------------------"
echo "1. Operating System Info"
echo "2. Time Info"
echo "3. HOST and DNS Info"
echo "4. Network Info"
echo "5. Who is Online"
echo "6. Last Logged In Users"
echo "7. Memory Information"
echo "8. Exit"
}
# Display the header message
# $1 - message
function write_header(){
local h="$@"
echo "---------------------------------------------------------------"
echo " ${h}"
echo "---------------------------------------------------------------"
}
# Get info about Local Machine Operating System
function os_info(){
write_header " Operating System Information "
echo "Operating system : $(uname -no)"
echo "Operating System Version : $(uname -mv)"
#pause "Press [Enter] key to continue..."
pause
}
# Get info about local machine time, date, and timezone
function time_info(){
write_header " Time Information "
echo "Local Machine Time : $(date +%R)"
echo "Local Machine Timezone : $(date +%Z)"
echo "Local Machine Date : $(date +"%m-%d-%y")"
#pause "Press [Enter] key to continue..."
pause
}
# Get information about localhost
function host_info(){
local dnsips=$(sed -e '/^$/d' /etc/resolv.conf | awk '{if (tolower($1)=="nameserver") print $2}')
write_header " Hostname and DNS information "
echo "Hostname : $(hostname -s)"
echo "DNS domain : $(hostname -d)"
echo "Fully qualified domain name : $(hostname -f)"
echo "Network address (IP) : $(hostname -i)"
echo "DNS name servers (DNS IP) : ${dnsips}"
pause
}
# Network Inferface/Routing/MAC Address info (i.e. IP & NetStat)
function net_info(){
devices=$(netstat -i | cut -d" " -f1 | egrep -v "^Kernel|Iface|lo")
write_header " Network information "
echo "Total network interfaces found : $(wc -w <<<${devices})"
echo "-----------------------"
echo "--- IP Address Info ---"
echo "-----------------------"
ip -4 address show
echo "-----------------------"
echo "--- Network Routing ---"
echo "-----------------------"
netstat -nr
echo "--------------------------------------"
echo "--- Interface Traffic information ---"
echo "--------------------------------------"
netstat -i
echo "------------------------------"
echo "--- MAC/Hardware Addresses ---"
echo "------------------------------"
cat /sys/class/net/*/address
pause
}
# Display a list of users currently logged on
# Display a list of recently logged in users
function user_info(){
local cmd="$1"
case "$cmd" in
who) write_header " Who is online "; who -H; pause ;;
last) write_header " List of last logged in users "; last ; pause ;;
esac
}
# Display used and free memory info
function mem_info(){
write_header " Free and used memory "
free -m
echo "---------------------------------"
echo "--- Virtual Memory Statistics ---"
echo "---------------------------------"
vmstat
echo "----------------------------------------"
echo "--- Top 5 Memory Utilizing Processes ---"
echo "----------------------------------------"
ps auxf | sort -nr -k 4 | head -5
pause
}
# Get input via the keyboard and make a decision.
function read_input(){
local c
read -p "Enter your choice [ 1 - 8 ]: " c
case $c in
1) os_info ;;
2) time_info ;;
3) host_info ;;
4) net_info ;;
5) user_info "who" ;;
6) user_info "last" ;;
7) mem_info ;;
8) echo "Happy Forensicating. Go Herd! Follow us on Twitter: @joshbrunty @MUDigForensics"; exit 0 ;;
*)
echo "Please select between 1 to 8: "
pause
esac
}
# ignore CTRL+C, CTRL+Z and quit signals using the trap command. This prohibits unwanted interrupts
trap '' SIGINT SIGQUIT SIGTSTP
# logic for program input
while true
do
clear
show_menu # display memu
read_input # wait for user input
done
| joshbrunty/SysScout | SysScout.sh | Shell | mit | 5,206 |
#!/bin/sh
cd "$(dirname "$0")"
if [ "$(ps -C "mono" -o "cmd" --no-headers | grep "EmuHawk.exe")" ]; then
echo "EmuHawk is already running, exiting..."
exit 0
fi
libpath=""
if [ "$(command -v lsb_release)" ]; then
case "$(lsb_release -i | cut -c17- | tr -d "\n")" in
"Arch"|"ManjaroLinux") libpath="/usr/lib/wine";;
"Debian"|"LinuxMint") libpath="/usr/lib/x86_64-linux-gnu/wine";;
"Ubuntu") libpath="/usr/lib/x86_64-linux-gnu/wine"; export MONO_WINFORMS_XIM_STYLE=disabled;; # see https://bugzilla.xamarin.com/show_bug.cgi?id=28047#c9
esac
else
printf "Distro does not provide LSB release info API! (You've met with a terrible fate, haven't you?)\n"
fi
if [ -z "$libpath" ]; then
printf "%s\n" "Unknown distro, assuming WINE library location is /usr/lib/wine..."
libpath="/usr/lib/wine"
fi
LD_LIBRARY_PATH="$libpath" mono ./EmuHawk.exe >EmuHawkMono_laststdout.txt
| ircluzar/RTC3 | Real-Time Corruptor/BizHawk_RTC/Assets/EmuHawkMono.sh | Shell | mit | 876 |
#!/bin/bash
ls /sys/class/net 2> /dev/null | while read interface
do
speed=$(cat /sys/class/net/$interface/speed 2> /dev/null)
if [[ -z "$speed" ]]; then
continue
fi
# the speed is measured in megabytes
echo "net,interface=$interface,speed=$speed interface_speed=$speed"
done
| monitoring-tools/telegraf-plugins | netspeed/netspeed.sh | Shell | mit | 306 |
#!/bin/bash
echo 10 > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio10/direction
echo 1 > /sys/class/gpio/gpio10/value
sleep 1
echo 0 > /sys/class/gpio/gpio10/value
echo 10 > /sys/class/gpio/unexport
| mech0s/room-defence-system | testscripts/gunfire.sh | Shell | mit | 216 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3283-1
#
# Security announcement date: 2015-06-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:26 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: armv7l
#
# Vulnerable packages fix on version:
# - cups:1.5.3-5+deb7u6
# - libcups2:1.5.3-5+deb7u6
# - libcupsimage2:1.5.3-5+deb7u6
# - libcupscgi1:1.5.3-5+deb7u6
# - libcupsdriver1:1.5.3-5+deb7u6
# - libcupsmime1:1.5.3-5+deb7u6
# - libcupsppdc1:1.5.3-5+deb7u6
# - cups-client:1.5.3-5+deb7u6
# - libcups2-dev:1.5.3-5+deb7u6
# - libcupsimage2-dev:1.5.3-5+deb7u6
# - libcupscgi1-dev:1.5.3-5+deb7u6
# - libcupsdriver1-dev:1.5.3-5+deb7u6
# - libcupsmime1-dev:1.5.3-5+deb7u6
# - libcupsppdc1-dev:1.5.3-5+deb7u6
# - cups-bsd:1.5.3-5+deb7u6
# - cups-common:1.5.3-5+deb7u6
# - cups-ppdc:1.5.3-5+deb7u6
# - cups-dbg:1.5.3-5+deb7u6
# - cupsddk:1.5.3-5+deb7u6
#
# Last versions recommanded by security team:
# - cups:1.5.3-5+deb7u6
# - libcups2:1.5.3-5+deb7u6
# - libcupsimage2:1.5.3-5+deb7u6
# - libcupscgi1:1.5.3-5+deb7u6
# - libcupsdriver1:1.5.3-5+deb7u6
# - libcupsmime1:1.5.3-5+deb7u6
# - libcupsppdc1:1.5.3-5+deb7u6
# - cups-client:1.5.3-5+deb7u6
# - libcups2-dev:1.5.3-5+deb7u6
# - libcupsimage2-dev:1.5.3-5+deb7u6
# - libcupscgi1-dev:1.5.3-5+deb7u6
# - libcupsdriver1-dev:1.5.3-5+deb7u6
# - libcupsmime1-dev:1.5.3-5+deb7u6
# - libcupsppdc1-dev:1.5.3-5+deb7u6
# - cups-bsd:1.5.3-5+deb7u6
# - cups-common:1.5.3-5+deb7u6
# - cups-ppdc:1.5.3-5+deb7u6
# - cups-dbg:1.5.3-5+deb7u6
# - cupsddk:1.5.3-5+deb7u6
#
# CVE List:
# - CVE-2015-1158
# - CVE-2015-1159
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade cups=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcups2=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupsimage2=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupscgi1=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupsdriver1=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupsmime1=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupsppdc1=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade cups-client=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcups2-dev=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupsimage2-dev=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupscgi1-dev=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupsdriver1-dev=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupsmime1-dev=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade libcupsppdc1-dev=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade cups-bsd=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade cups-common=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade cups-ppdc=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade cups-dbg=1.5.3-5+deb7u6 -y
sudo apt-get install --only-upgrade cupsddk=1.5.3-5+deb7u6 -y
| Cyberwatch/cbw-security-fixes | Debian_7_(Wheezy)/armv7l/2015/DSA-3283-1.sh | Shell | mit | 3,061 |
echo "Import Pocci Template"
${BIN_DIR}/oneoff nodejs bash ./import-pocci-template.sh
| xpfriend/pocci-template-examples | services/core/zabbix/update-container.sh | Shell | mit | 86 |
#!/bin/sh
basedir=$(cd `dirname $0`; pwd)
export GOPATH=$(pwd)
export GOBIN=$(pwd)/bin
go get github.com/go-martini/martini
go install src/main.go
| Delostik/SimpleWebHook | build.sh | Shell | mit | 151 |
#export NNN_FCOLORS='0000E631000000000000000'
alias nnn='nnn -e'
#alias ls='nnn -e'
NNN_FIFO='/tmp/nnn.fifo'
| hypebeast/dotfiles | nnn/.oh-my-zsh/custom/nnn.zsh | Shell | mit | 109 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/CocoaSecurity/CocoaSecurity.framework"
install_framework "${PODS_ROOT}/../../RKBluetoothLEKit/RKBluetoothLE_iOS.framework"
install_framework "$BUILT_PRODUCTS_DIR/ReactiveObjC/ReactiveObjC.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/CocoaSecurity/CocoaSecurity.framework"
install_framework "${PODS_ROOT}/../../RKBluetoothLEKit/RKBluetoothLE_iOS.framework"
install_framework "$BUILT_PRODUCTS_DIR/ReactiveObjC/ReactiveObjC.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
| yuanzj/RKBluetoothLEKit | Example/Pods/Target Support Files/Pods-RKBluetoothLEKit_Tests/Pods-RKBluetoothLEKit_Tests-frameworks.sh | Shell | mit | 4,055 |
#!/usr/bin/env bash
echo "$@" | pbcopy
| dunn/mb-dotfiles | bin/copy.sh | Shell | mit | 40 |
#!/usr/bin/env bash
# always immediately exit upon error
set -e
# start in project root
cd "`dirname $0`/.."
./bin/require-clean-working-tree.sh
git reset HEAD --hard
read -p "Enter the version you want to publish, with no 'v' (for example '1.0.1'): " version
if [[ ! "$version" ]]
then
echo "Aborting."
exit 1
fi
# push the current branch (assumes tracking is set up) and the tag
git push
git push origin "$version"
success=0
# save reference to current branch
current_branch=$(git symbolic-ref --quiet --short HEAD)
# temporarily checkout the tag's commit, publish to BOWER
bower unregister matlab-utils
bower register matlab-utils https://github.com/bmelo/matlab-utils.git
echo "Success."
| bmelo/matlab-utils | bin/publish-release.sh | Shell | mit | 704 |
#!/bin/sh
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helper script for pull-tester.
#Param 1: path to bitcoin srcroot
#Param ...: arguments for build-test.sh
if [ $# -lt 1 ]; then
echo "usage: $0 [bitcoin srcroot] build-test arguments..."
fi
killall -q globalboost-cli
killall -q globalboostd
cd $1
shift
./autogen.sh
./configure
./qa/pull-tester/build-tests.sh "$@"
| getcoin/globalboosty | qa/pull-tester/pull-tester.sh | Shell | mit | 524 |
# remotely run latest version of this script
# wget -O - https://github.com/boardstretcher/bash-redis/blob/master/install-arch.sh | bash
# installation on vanilla arch linux
# virtual machine provided by rackspace
if [[ $EUID -ne 0 ]]; then
echo "script must be run as root"
exit
fi
if [ ! -f /etc/arch-release ]; then
echo "must be run on arch linux"
exit
fi
pacman -Syu --noconfirm
pacman --noconfirm -S inetutils
rm /usr/bin/python
ln -s /usr/bin/python2 /usr/bin/python
wget http://nodejs.org/dist/v0.10.17/node-v0.10.17.tar.gz
wget http://redis.googlecode.com/files/redis-2.6.14.tar.gz
tar zxvf node-v0.10.17.tar.gz
tar zxvf redis-2.6.14.tar.gz
cd redis-2.6.14/src
make && make install && cd
redis-server &
cd node-v0.10.17
./configure
make && make install && cd
npm install -g redis-commander
redis-commander &
echo "you will want to reboot to apply system updates"
| boardstretcher/perf2redis | install-arch.sh | Shell | mit | 885 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2743-4
#
# Security announcement date: 2015-10-05 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:49 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: i686
#
# Vulnerable packages fix on version:
# - firefox:41.0.1+build2-0ubuntu0.14.04.1
#
# Last versions recommanded by security team:
# - firefox:50.0+build2-0ubuntu0.14.04.2
#
# CVE List:
# - CVE-2015-4500
# - CVE-2015-4501
# - CVE-2015-4502
# - CVE-2015-4504
# - CVE-2015-4506
# - CVE-2015-4507
# - CVE-2015-4508
# - CVE-2015-4509
# - CVE-2015-4510
# - CVE-2015-4512
# - CVE-2015-4516
# - CVE-2015-4517
# - CVE-2015-4521
# - CVE-2015-4522
# - CVE-2015-7174
# - CVE-2015-7175
# - CVE-2015-7176
# - CVE-2015-7177
# - CVE-2015-7180
# - CVE-2015-4519
# - CVE-2015-4520
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade firefox=50.0+build2-0ubuntu0.14.04.2 -y
| Cyberwatch/cbw-security-fixes | Ubuntu_14.04_LTS/i686/2015/USN-2743-4.sh | Shell | mit | 1,055 |
#!/bin/zsh
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
# PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
PS1='${debian_chroot:+($debian_chroot)}%b%{$fg[green]%}%n@%m%{$reset_color%}:%{$fg[blue]%}%~%{$reset_color%}\$ '
else
PS1='${debian_chroot:+($debian_chroot)}%n@%m:%~\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
precmd () {print -Pn "\e]0;${debian_chroot:+($debian_chroot)}%n@%m: %~\a"}
;;
*)
;;
esac
| mendlik/dotfiles | zsh/.zsh/prompts/basic.zsh | Shell | mit | 1,464 |
#!/bin/sh
docker rm -f edvid
docker run -P -d -p 80:5000 --name edvid --link mongo:mongo danheidel/edvid
| danheidel/education-video.net | startedvid.sh | Shell | mit | 107 |
#!/usr/bin/env bash
work_dir="$(pwd)"
tools_dir="$(cd "$(dirname "$0")" && pwd)"
tmp="/tmp/$$"
URL_REDIRECTORS="$(cat << FIN
1drv.ms
amzn.to
bit.ly
boog.io
bugzil.la
g.co
gigaz.in
go.ascii.jp
goo.gl
fb.me
is.gd
kuku.lu
macaf.ee
nico.ms
nico.sc
num.to
ow.ly
p.tl
prt.nu
r10.to
s.nikkei.com
sdrv.ms
t.asahi.com
t.co
tiny.cc
tinyurl.com
urx.nu
ustre.am
wolfr.am
y2u.be
youtu.be
FIN
)"
log() {
[ "$DEBUG" = '' ] && return 0
echo "$*" 1>&2
}
exist_command() {
type "$1" > /dev/null 2>&1
}
load_keys() {
if [ "$CONSUMER_KEY" = '' -a \
-f "$work_dir/tweet.client.key" ]
then
log 'Using client key at the current directory.'
source "$work_dir/tweet.client.key"
fi
if [ "$CONSUMER_KEY" = '' -a \
-f ~/.tweet.client.key ]
then
log 'Using client key at the home directory.'
source ~/.tweet.client.key
fi
if [ "$CONSUMER_KEY" = '' -a \
-f "$tools_dir/tweet.client.key" ]
then
log 'Using client key at the tools directory.'
source "$tools_dir/tweet.client.key"
fi
export MY_SCREEN_NAME
export MY_LANGUAGE
export CONSUMER_KEY
export CONSUMER_SECRET
export ACCESS_TOKEN
export ACCESS_TOKEN_SECRET
}
case $(uname) in
Darwin|*BSD|CYGWIN*)
esed="sed -E"
;;
*)
esed="sed -r"
;;
esac
ensure_available() {
local fatal_error=0
load_keys
if [ "$MY_SCREEN_NAME" = '' ]
then
echo 'FATAL ERROR: You need to specify your screen name via an environment variable "MY_SCREEN_NAME".' 1>&2
fatal_error=1
fi
if [ "$MY_LANGUAGE" = '' ]
then
echo 'FATAL ERROR: You need to specify your language (like "en") via an environment variable "MY_LANGUAGE".' 1>&2
fatal_error=1
fi
if [ "$CONSUMER_KEY" = '' ]
then
echo 'FATAL ERROR: You need to specify a consumer key via an environment variable "CONSUMER_KEY".' 1>&2
fatal_error=1
fi
if [ "$CONSUMER_SECRET" = '' ]
then
echo 'FATAL ERROR: You need to specify a consumer secret via an environment variable "CONSUMER_SECRET".' 1>&2
fatal_error=1
fi
if [ "$ACCESS_TOKEN" = '' ]
then
echo 'FATAL ERROR: You need to specify an access token via an environment variable "ACCESS_TOKEN".' 1>&2
fatal_error=1
fi
if [ "$ACCESS_TOKEN_SECRET" = '' ]
then
echo 'FATAL ERROR: You need to specify an access token secret via an environment variable "ACCESS_TOKEN_SECRET".' 1>&2
fatal_error=1
fi
if ! exist_command nkf
then
echo 'FATAL ERROR: A required command "nkf" is missing.' 1>&2
fatal_error=1
fi
if ! exist_command curl
then
echo 'FATAL ERROR: A required command "curl" is missing.' 1>&2
fatal_error=1
fi
if ! exist_command openssl
then
echo 'FATAL ERROR: A required command "openssl" is missing.' 1>&2
fatal_error=1
fi
if ! exist_command jq
then
echo 'FATAL ERROR: A required command "jq" is missing.' 1>&2
fatal_error=1
fi
[ $fatal_error = 1 ] && exit 1
}
check_errors() {
if echo "$1" | grep '^\[' > /dev/null
then
return 0
fi
if [ "$(echo "$1" | jq -r '.errors | length')" = '0' ]
then
return 0
else
return 1
fi
}
post() {
ensure_available
local media_params=''
local OPTIND OPTARG OPT
while getopts m: OPT
do
case $OPT in
m )
media_params="media_ids=$OPTARG"
shift 2
;;
esac
done
local params="$(cat << FIN
status $*
$media_params
FIN
)"
local result="$(echo "$params" |
call_api POST https://api.twitter.com/1.1/statuses/update.json)"
echo "$result"
check_errors "$result"
}
url_encode() {
while read -r line
do
echo "$line" |
nkf -W8MQ |
sed 's/=$//' |
tr '=' '%' |
paste -s -d '\0' - |
sed -e 's/%7E/~/g' \
-e 's/%5F/_/g' \
-e 's/%2D/-/g' \
-e 's/%2E/./g'
done
}
to_encoded_list() {
local delimiter="$1"
[ "$delimiter" = '' ] && delimiter='\&'
local transformed="$( \
sort -k 1 -t ' ' |
grep -v '^\s*$' |
url_encode |
sed 's/%20/=/' |
paste -s -d "$delimiter" - |
tr -d '\n')"
echo "$transformed"
log "to_encoded_list: $transformed"
}
call_api() {
local method=$1
local url=$2
local file=$3
local params=''
if [ -p /dev/stdin ]
then
params="$(cat)"
fi
local oauth="$(echo "$params" | generate_oauth_header "$method" "$url")"
local headers="Authorization: OAuth $oauth"
params="$(echo "$params" | to_encoded_list)"
log "METHOD : $method"
log "URL : $url"
log "HEADERS: $headers"
log "PARAMS : $params"
local file_params=''
if [ "$file" != '' ]
then
local file_param_name="$(echo "$file" | $esed 's/=.+$//')"
local file_path="$(echo "$file" | $esed 's/^[^=]+=//')"
file_params="--form $file_param_name=@$file_path"
log "FILE : $file_path (as $file_param_name)"
fi
local debug_params=''
if [ "$DEBUG" != '' ]
then
debug_params='--dump-header /dev/stderr --verbose'
fi
local curl_params
if [ "$method" = 'POST' ]
then
local main_params=''
if [ "$params" = '' ]
then
params='""'
fi
if [ "$file_params" = '' ]
then
main_params="--data \"$params\""
else
main_params="--form \"$params\""
fi
curl_params="--header \"$headers\" \
--silent \
$main_params \
$file_params \
$debug_params \
$url"
else
curl_params="--get \
--header \"$headers\" \
--data \"$params\" \
--silent \
--http1.1 \
$debug_params \
$url"
fi
curl_params="$(echo "$curl_params" | tr -d '\n' | $esed 's/ +/ /g')"
log "curl $curl_params"
eval "curl $curl_params"
}
generate_oauth_header() {
local method=$1
local url=$2
local common_params="$(common_params)"
local signature=$(cat - <(echo "$common_params") | generate_signature "$method" "$url")
local header=$(cat <(echo "$common_params") <(echo "oauth_signature $signature") |
to_encoded_list ',' |
tr -d '\n')
echo -n "$header"
log "HEADER: $header"
}
generate_signature() {
local method=$1
local url=$2
local signature_key="${CONSUMER_SECRET}&${ACCESS_TOKEN_SECRET}"
local encoded_url="$(echo "$url" | url_encode)"
local signature_source="${method}&${encoded_url}&$( \
to_encoded_list |
url_encode |
tr -d '\n')"
log "SIGNATURE SOURCE: $signature_source"
# generate signature
local signature=$(echo -n "$signature_source" |
openssl sha1 -hmac $signature_key -binary |
openssl base64 |
tr -d '\n')
echo -n "$signature"
log "SIGNATURE: $signature"
}
common_params() {
cat << FIN
oauth_consumer_key $CONSUMER_KEY
oauth_nonce $(date +%s%N)
oauth_signature_method HMAC-SHA1
oauth_timestamp $(date +%s)
oauth_token $ACCESS_TOKEN
oauth_version 1.0
FIN
}
kill_descendants() {
local target_pid=$1
local children=$(ps --no-heading --ppid $target_pid -o pid)
for child in $children
do
kill_descendants $child
done
if [ $target_pid != $$ ]
then
kill $target_pid 2>&1 > /dev/null
fi
}
if [ "$(basename "$0")" = "tweet.sh" ]
then
command="$1"
shift
self_pid=$$
trap 'kill_descendants $self_pid; exit 0' HUP INT QUIT KILL TERM
case "$command" in
post|tweet|tw )
post "$@"
;;
esac
fi
| 84115/twooo | tweet.sh | Shell | mit | 7,265 |
#!/usr/bin/env bash
set -euo pipefail
# Test if $1 is available
isavailable() {
type "$1" &>/dev/null
}
LOGFILE="/tmp/dotfiles.log"
echo "Running '$0' $(date)" | tee -a $LOGFILE
make all
| benmezger/dotfiles | install.sh | Shell | mit | 191 |
#!/bin/bash
ip2dec () {
local a b c d ip=$@
IFS=. read -r a b c d <<< "$ip"
printf '%d' "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))"
}
IPS=`dashd masternode list | grep : | cut -d \" -f 2 | cut -d : -f 1`
printf '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<array>\n'
for ip in $IPS;
do
printf "<integer>"
ip2dec $ip
printf "</integer>\n"
done
printf '</array>\n</plist>'
| UdjinM6/dashwallet | BreadWallet/masternodeList.sh | Shell | mit | 539 |
#!/bin/bash
BATCHFILE="$1"
if [ ! -f "$BATCHFILE" ]; then
echo "Cannot find batch file (batch: $BATCHFILE)" >&2
exit 1
fi
source $BATCHFILE
DATAFILE="$(Rscript gen_data.R $BATCHSET)"
if [ $? -ne 0 ] || [ ! -f "$DATAFILE" ]; then
echo "Cannot generate data file (batch: $BATCHFILE)" >&2
exit 1
fi
METHODS_TORUN="scclust_EXU_CSE scclust_LEX_ANY"
if [ "$TORUN" == "level1" ]; then
METHODS_TORUN="$METHODS_TORUN opt_kmatch"
fi
if [ "$TORUN" == "level1" ] || [ "$TORUN" == "level2" ]; then
METHODS_TORUN="$METHODS_TORUN opt_pairmatch"
fi
if [ "$TORUN" == "level1" ] || [ "$TORUN" == "level2" ] || [ "$TORUN" == "level3" ]; then
METHODS_TORUN="$METHODS_TORUN opt_fullmatch"
fi
if [ "$TORUN" == "level1" ] || [ "$TORUN" == "level2" ] || [ "$TORUN" == "level3" ] || [ "$TORUN" == "level4" ]; then
METHODS_TORUN="$METHODS_TORUN gre_kmatch gre_pairmatch rep_pairmatch"
fi
for match_method in $METHODS_TORUN; do
./time.sh R --vanilla --slave "--args $match_method $DATAFILE" < do_matching.R
if [ $? -ne 0 ]; then
echo "Error when running batch (batch: $BATCHFILE, method: $match_method)" >&2
exit 1
fi
done
## Current version of optmatch doesn't work with Rscript.
## For future versions, use Rscript if possible.
#for match_method in $METHODS_TORUN; do
# ./time.sh Rscript do_matching.R $match_method $DATAFILE
# if [ $? -ne 0 ]; then
# echo "Error when running batch (batch: $BATCHFILE, method: $match_method)" >&2
# exit 1
# fi
#done
rm $DATAFILE
| fsavje/sim_gfm | complexity/batch.sh | Shell | mit | 1,465 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2012:1483
#
# Security announcement date: 2012-11-22 06:01:09 UTC
# Script generation date: 2017-02-03 21:12:36 UTC
#
# Operating System: CentOS 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - thunderbird.x86_64:10.0.11-1.el5.centos
#
# Last versions recommanded by security team:
# - thunderbird.x86_64:45.7.0-1.el5.centos
#
# CVE List:
# - CVE-2012-4201
# - CVE-2012-4202
# - CVE-2012-4207
# - CVE-2012-4209
# - CVE-2012-4214
# - CVE-2012-4215
# - CVE-2012-4216
# - CVE-2012-5829
# - CVE-2012-5830
# - CVE-2012-5833
# - CVE-2012-5835
# - CVE-2012-5839
# - CVE-2012-5840
# - CVE-2012-5841
# - CVE-2012-5842
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install thunderbird.x86_64-45.7.0 -y
| Cyberwatch/cbw-security-fixes | CentOS_5/x86_64/2012/CESA-2012:1483.sh | Shell | mit | 907 |
#!/bin/bash
DATADIR="/home/jack/src/SideCoin/.sidecoin"
rm -rf "$DATADIR"
mkdir -p "$DATADIR"/regtest
touch "$DATADIR/regtest/debug.log"
tail -q -n 1 -F "$DATADIR/regtest/debug.log" | grep -m 1 -q "Done loading" &
WAITER=$!
PORT=`expr $BASHPID + 10000`
"/home/jack/src/SideCoin/src/sidecoind" -connect=0.0.0.0 -datadir="$DATADIR" -rpcuser=user -rpcpassword=pass -listen -keypool=3 -debug -debug=net -logtimestamps -port=$PORT -regtest -rpcport=`expr $PORT + 1` &
SIDECOIND=$!
#Install a watchdog.
(sleep 10 && kill -0 $WAITER 2>/dev/null && kill -9 $SIDECOIND $$)&
wait $WAITER
if [ -n "$TIMEOUT" ]; then
timeout "$TIMEOUT"s "$@" $PORT
RETURN=$?
else
"$@" $PORT
RETURN=$?
fi
(sleep 15 && kill -0 $SIDECOIND 2>/dev/null && kill -9 $SIDECOIND $$)&
kill $SIDECOIND && wait $SIDECOIND
# timeout returns 124 on timeout, otherwise the return value of the child
exit $RETURN
| AugurProject/sidecoin | qa/pull-tester/run-sidecoind-for-test.sh | Shell | mit | 880 |
function ramSummarizer() {
# Get current RAM usage in MB (integer)
ram_used=$(ps -u $USER -o rss= | awk '{rss += $1} END {printf("%0.f", rss / 1000)}')
echo "\033[34m$ram_used MB\033[0m used"
}
registerSummarizer "RAM" ramSummarizer
| lukasbestle/my-welcome | summarizers/05-ram.sh | Shell | mit | 239 |
#!/bin/bash
# This file is managed by salt, do not edit.
[ -f /etc/default/softether-vpnclient ] && source /etc/default/softether-vpnclient
/opt/vpnclient/vpnclient start
PID=$?
sleep 2
ifup $IFUP_INTERFACE
exit 0
| pcdummy/saltstack-softether-formula | softether/files/systemd-start-script.sh | Shell | mit | 215 |
#!/bin/bash
set -o xtrace
scripts/bootstrap-roles.sh
scripts/bootstrap-inventory.sh
scripts/bootstrap-jira.sh
| pantarei/ansible-playbook-jira | scripts/bootstrap-aio.sh | Shell | mit | 112 |
#!/bin/sh
#install global
sudo apt-get update && sudo apt-get install -y git curl tmux vim i3 dmenu dunst zsh build-essential vim thunderbird htop arc-theme lxappearance weechat
#powerline fonts
# git clone https://github.com/powerline/fonts.git
# cd fonts || exit
# ./install.sh
#
# #oh-my-zsh
# sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
#
# #termite
# curl -sL https://raw.githubusercontent.com/Corwind/termite-install/master/termite-install.sh | sudo -E sh
#
# #node
# curl -sL https://deb.nodesource.com/setup_7.x | sudo -E bash -
#
# #resolve npm permission problems
# mkdir ~/.npm-global
# npm config set prefix "$HOME/.npm-global"
# export PATH=~/.npm-global/bin:$PATH
# source "$HOME/.zshrc"
#
# #ssh
# # cp .ssh ~
# # ssh-add
#
# #import gpg
# # gpg --import private.key
#
# #rust (pb avec zsh:: export SHELL=/bin/bash puis repasser en SHELL=/usr/bin/zsh et suivre instrcutions (sourcer cargo))
# curl https://sh.rustup.rs -sSf | sh
#
# #YARN
# curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
# echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
#
# #heroku
# sudo add-apt-repository "deb https://cli-assets.heroku.com/branches/stable/apt ./"
# curl -L https://cli-assets.heroku.com/apt/release.key | sudo apt-key add -
#
# #arc-icons
# git clone https://github.com/horst3180/arc-icon-theme --depth 1 && cd arc-icon-theme || exit
# ./autogen.sh --prefix=/usr
# sudo make install
#
# #spotify
# sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys BBEBDCB318AD50EC6865090613B00F1FD2C19886
# echo deb http://repository.spotify.com stable non-free | sudo tee /etc/apt/sources.list.d/spotify.list
#
# sudo apt update
# sudo apt install -y spotify-client heroku python-pip yarn nodejs
#
# sudo pip install py3status
| malko42/dotfiles | src/script_init.sh | Shell | mit | 1,871 |
#!/usr/bin/env bash
# -*- coding: utf-8 -*-
# vi: set expandtab shiftwidth=4 :
# auther: shun kawai
#===============================
# smart-install.sh
#===============================
# err masage
function usage_exit() {
echo 'Usage: smart-install packa-gename'
exit 1
}
function already_installed() {
echo $package' is already installed'
exit 0
}
# functions
function install_Ubuntu() {
`dpkg -l $package > /dev/null 2>&1`
if [ $? -eq 0 ]; then
already_installed
fi
sudo apt install $package -y
}
function install_CentOS() {
echo $package
}
# main
if [ $# -ne 1 ]; then
usage_exit
fi
package=$1
case `get-os` in
Ubuntu)
install_Ubuntu
;;
CentOS)
install_CentOS
;;
*)
echo 'Unknown OS'
echo ''
usage_exit
;;
esac
| mypaceshun/practice | bin/smart-install.sh | Shell | mit | 833 |
#!/bin/sh
# Exit if an error is encountered
set -e -x
# Unzip EAP to the version-generic home directory
# TODO make more generic so this doesn't have to change for upgrades
echo "unzipping files..."
unzip -q ${INSTALL_DIR}/jboss-eap-7.2.0.zip -d ${EAP_PARENT}
mv ${EAP_PARENT}/jboss-eap-7.2 ${EAP_PARENT}/jboss-eap
chmod +x ${EAP_HOME}/bin/*.sh
# install the patch
$EAP_HOME/bin/jboss-cli.sh --command="patch apply $INSTALL_DIR/jboss-eap-7.2.6-patch.zip"
# Create ActiveMQ module
#mv ${INSTALL_DIR}/activemq-rar*.rar ${EAP_HOME}/standalone/deployments/activemq-rar.rar
# Put adjusted configuration files into the appropriate directory. Some will be adjusted at startup
#cp -rf domain/host*.xml ${EAP_HOME}/domain/configuration/
#rm -rf host*.xml
echo "export JBOSS_USER_APP_MODULES_HOME=\"\$EAP_PARENT/modules/app-modules\"" >> ${EAP_HOME}/bin/standalone.conf
echo "export JBOSS_USER_SEC_MODULES_HOME=\"\$EAP_PARENT/modules/sec-modules\"" >> ${EAP_HOME}/bin/standalone.conf
echo "export JBOSS_USER_DATABASES_MODULES_HOME=\"\$EAP_PARENT/modules/db-modules\"" >> ${EAP_HOME}/bin/standalone.conf
echo "export JBOSS_MODULES_HOME=\"\${EAP_HOME}/modules\"" >> ${EAP_HOME}/bin/standalone.conf
echo "export JBOSS_MODULEPATH=\"\$EAP_MODULES:\$JBOSS_MODULES_HOME:\$JBOSS_USER_APP_MODULES_HOME:\$JBOSS_USER_SEC_MODULES_HOME:\$JBOSS_USER_DATABASES_MODULES_HOME\"" >> ${EAP_HOME}/bin/standalone.conf
echo "export JBOSS_USER_APP_MODULES_HOME=\"\$EAP_PARENT/modules/app-modules\"" >> ${EAP_HOME}/bin/domain.conf
echo "export JBOSS_USER_SEC_MODULES_HOME=\"\$EAP_PARENT/modules/sec-modules\"" >> ${EAP_HOME}/bin/domain.conf
echo "export JBOSS_USER_DATABASES_MODULES_HOME=\"\$EAP_PARENT/modules/db-modules\"" >> ${EAP_HOME}/bin/domain.conf
echo "export JBOSS_MODULES_HOME=\"\${EAP_HOME}/modules\"" >> ${EAP_HOME}/bin/domain.conf
echo "export JBOSS_MODULEPATH=\"\$EAP_MODULES:\$JBOSS_MODULES_HOME:\$JBOSS_USER_APP_MODULES_HOME:\$JBOSS_USER_SEC_MODULES_HOME:\$JBOSS_USER_DATABASES_MODULES_HOME\"" >> ${EAP_HOME}/bin/domain.conf
# Necessary so that the flattening doesn't keep these
rm -rf ${INSTALL_DIR}
# Move the startup scripts to EAP_HOME
mv entrypoint.sh ${EAP_HOME}/
| jlgrock/docker.jboss-eap | eap-files/resources/install_eap.sh | Shell | mit | 2,163 |
#!/bin/sh
LIME_FETCH_URL=http://trac.symfony-project.org/browser/tools/lime/trunk/lib/lime.php?format=txt
LIME_TEST=`dirname $0`/test/lib/lime.php
if [ ! -e $LIME_TEST ];
then
wget -O $LIME_TEST $LIME_FETCH_URL
fi
| kjim/phrack | sync_vendor_libs.sh | Shell | mit | 219 |
#!/bin/bash
set -x
ANDROID_TARGET=android-19
ANDROID_ABI=armeabi-v7a
echo no | android create avd --force -n test -t $ANDROID_TARGET --abi $ANDROID_ABI
emulator -avd test -no-skin -no-audio -no-window &
adb wait-for-device
adb shell input keyevent 82 &
find /home/travis/build/ -name "*.apk"
adb devices -l
if [ "$(adb shell dumpsys power | grep mScreenOn= | grep -oE '(true|false)')" == false ] ; then
echo "Screen is off. Turning on."
adb shell input keyevent 26 # wakeup
adb shell input touchscreen swipe 930 380 1080 380 # unlock
echo "OK, should be on now."
else
echo "Screen is already on."
echo "Turning off."
adb shell input keyevent 26 # sleep
adb shell input keyevent 26 # wakeup
adb shell input touchscreen swipe 930 380 1080 380 # unlock
fi
adb get-state
adb shell pm list packages
adb install /home/travis/build/snowch/android-tdd-playground/build/apk/android-tdd-playground-debug-unaligned.apk
adb shell pm list packages
adb shell am start -a android.intent.action.MAIN -n org.pestrada.android_tdd_playground/.MainActivity
adb shell netstat -nalt
curl -v http://$(adb shell netcfg | grep 'eth0' | awk '{ print $3 }' | cut -d'/' -f1):8182
| snowch/android-tdd-playground | connect_to_device.sh | Shell | mit | 1,206 |
#!/bin/bash
function usage() {
echo "Usage: $0 token chat message ..."
echo " $0 token chat @filename"
exit ${1:-0}
}
[ -z "$3" ] && usage
### Just hard coded parameter order...
token=$1
chat_id=$2
### Is the 3rd given parameter a file name (starting with @)?
if [ ${3:0:1} == @ ]; then
### Use file direct as message
text=${3:1}
else
### Use all remaining parameters as message
shift 2 # token and chat id
text=$(mktemp)
trap 'rm $text' 0
echo "$@" >$text
fi
### Send message, returns JSON string
curl --data chat_id=$chat_id --data-urlencode text@$text https://api.telegram.org/bot$token/sendMessage
| KKoPV/PVLng-scripts | bin/telegram.sh | Shell | mit | 653 |
#!/usr/bin/env bash
#set -x
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $CURRENT_DIR/../tmuxomatic.sh
source $CURRENT_DIR/../helpers.sh
tmuxomatic__begin begin_hook
begin_with_conf "benchmark"
init_pane
tmuxomatic__exec "cat ./test/fixtures/grep-output"
invoke_fingers
tmuxomatic send-keys "C-c"
tmuxomatic__end end_hook
| Morantron/tmux-fingers | test/benchmarks/basic_benchmark.sh | Shell | mit | 354 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2012:0734
#
# Security announcement date: 2012-06-13 20:26:37 UTC
# Script generation date: 2016-10-20 21:19:33 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - java-1.6.0-sun.x86_64:1.6.0.33-1jpp.1.el6_2
# - java-1.6.0-sun-demo.x86_64:1.6.0.33-1jpp.1.el6_2
# - java-1.6.0-sun-devel.x86_64:1.6.0.33-1jpp.1.el6_2
# - java-1.6.0-sun-jdbc.x86_64:1.6.0.33-1jpp.1.el6_2
# - java-1.6.0-sun-plugin.x86_64:1.6.0.33-1jpp.1.el6_2
# - java-1.6.0-sun-src.x86_64:1.6.0.33-1jpp.1.el6_2
# - java-1.6.0-sun.i686:1.6.0.33-1jpp.1.el6_2
# - java-1.6.0-sun-devel.i686:1.6.0.33-1jpp.1.el6_2
#
# Last versions recommanded by security team:
# - java-1.6.0-sun.x86_64:1.6.0.131-1jpp.1.el6_8
# - java-1.6.0-sun-demo.x86_64:1.6.0.131-1jpp.1.el6_8
# - java-1.6.0-sun-devel.x86_64:1.6.0.131-1jpp.1.el6_8
# - java-1.6.0-sun-jdbc.x86_64:1.6.0.131-1jpp.1.el6_8
# - java-1.6.0-sun-plugin.x86_64:1.6.0.131-1jpp.1.el6_8
# - java-1.6.0-sun-src.x86_64:1.6.0.131-1jpp.1.el6_8
# - java-1.6.0-sun.i686:1.6.0.131-1jpp.1.el6_8
# - java-1.6.0-sun-devel.i686:1.6.0.131-1jpp.1.el6_8
#
# CVE List:
# - CVE-2012-0551
# - CVE-2012-1711
# - CVE-2012-1713
# - CVE-2012-1716
# - CVE-2012-1717
# - CVE-2012-1718
# - CVE-2012-1719
# - CVE-2012-1721
# - CVE-2012-1722
# - CVE-2012-1723
# - CVE-2012-1724
# - CVE-2012-1725
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install java-1.6.0-sun.x86_64-1.6.0.131 -y
sudo yum install java-1.6.0-sun-demo.x86_64-1.6.0.131 -y
sudo yum install java-1.6.0-sun-devel.x86_64-1.6.0.131 -y
sudo yum install java-1.6.0-sun-jdbc.x86_64-1.6.0.131 -y
sudo yum install java-1.6.0-sun-plugin.x86_64-1.6.0.131 -y
sudo yum install java-1.6.0-sun-src.x86_64-1.6.0.131 -y
sudo yum install java-1.6.0-sun.i686-1.6.0.131 -y
sudo yum install java-1.6.0-sun-devel.i686-1.6.0.131 -y
| Cyberwatch/cbw-security-fixes | Red_Hat_6/x86_64/2012/RHSA-2012:0734.sh | Shell | mit | 2,029 |
#!/bin/bash
#
# env vars:
# - ELECBUILD_NOCACHE: if set, forces rebuild of docker image
# - ELECBUILD_COMMIT: if set, do a fresh clone and git checkout
set -e
PROJECT_ROOT="$(dirname "$(readlink -e "$0")")/../.."
PROJECT_ROOT_OR_FRESHCLONE_ROOT="$PROJECT_ROOT"
CONTRIB="$PROJECT_ROOT/contrib"
CONTRIB_WINE="$CONTRIB/build-wine"
. "$CONTRIB"/build_tools_util.sh
DOCKER_BUILD_FLAGS=""
if [ ! -z "$ELECBUILD_NOCACHE" ] ; then
info "ELECBUILD_NOCACHE is set. forcing rebuild of docker image."
DOCKER_BUILD_FLAGS="--pull --no-cache"
fi
info "building docker image."
docker build \
$DOCKER_BUILD_FLAGS \
-t electrum-wine-builder-img \
"$CONTRIB_WINE"
# maybe do fresh clone
if [ ! -z "$ELECBUILD_COMMIT" ] ; then
info "ELECBUILD_COMMIT=$ELECBUILD_COMMIT. doing fresh clone and git checkout."
FRESH_CLONE="$CONTRIB_WINE/fresh_clone/electrum-ltc" && \
rm -rf "$FRESH_CLONE" && \
umask 0022 && \
git clone "$PROJECT_ROOT" "$FRESH_CLONE" && \
cd "$FRESH_CLONE"
git checkout "$ELECBUILD_COMMIT"
PROJECT_ROOT_OR_FRESHCLONE_ROOT="$FRESH_CLONE"
else
info "not doing fresh clone."
fi
info "building binary..."
docker run -it \
--name electrum-wine-builder-cont \
-v "$PROJECT_ROOT_OR_FRESHCLONE_ROOT":/opt/wine64/drive_c/electrum-ltc \
--rm \
--workdir /opt/wine64/drive_c/electrum-ltc/contrib/build-wine \
electrum-wine-builder-img \
./make_win.sh
# make sure resulting binary location is independent of fresh_clone
if [ ! -z "$ELECBUILD_COMMIT" ] ; then
mkdir --parents "$PROJECT_ROOT/contrib/build-wine/dist/"
cp -f "$FRESH_CLONE/contrib/build-wine/dist"/*.exe "$PROJECT_ROOT/contrib/build-wine/dist/"
fi
| pooler/electrum-ltc | contrib/build-wine/build.sh | Shell | mit | 1,706 |
#!/bin/bash
# Use unofficial Bash Strict Mode
set -euo pipefail
IFS=$'\n\t'
# LOG
NC='\033[0m' # No Color
RED='\033[0;31m'
YELLOW='\033[0;33m'
GREEN='\033[0;32m'
function error() {
typeset message
message=$1
printf "${RED}[ERROR]${NC} - $message\n"
}
function warning() {
typeset message
message=$1
printf "${YELLOW}[WARNING]${NC} - $message\n"
}
function info() {
typeset message
message=$1
printf "${GREEN}[INFO]${NC} - $message\n"
}
# Homebrew install
function brewInstall() {
typeset appName
appName=$1
# check whether Homebrew is installed
if ! hash brew 2>/dev/null
then
echo "Homebrew isn't installed. Please install it first."
return 1
fi
if ! brew list $appName 1> /dev/null 2> /dev/null
then
info "start install $appName "
brew install $appName
else
info "$appName is already installed"
fi
return 0
}
# Homebrew upgrade
function brewUpgrade() {
typeset appName
appName=$1
# check whether Homebrew is installed
if ! hash brew 2>/dev/null
then
echo "Homebrew isn't installed. Please install it first."
return 1
fi
if brew list $appName 1> /dev/null 2> /dev/null
then
info "start upgrade $appName, make sure execute \"brew update\" first"
echo $(brew upgrade $appName)
else
warning "$appName is not installed"
fi
return 0
}
# Register config in shell startup script
function registerInZshrc() {
typeset filePath
typeset comments
filePath=$1
comments=$2
if [ ! -e $filePath ]
then
warning "$filePath not exists"
return 0
fi
if grep -p "$filePath" ~/.zshrc 1> /dev/null
then
warning "$filePath is already registered"
return 0
fi
info "start register $filePath"
echo "" >> ~/.zshrc
echo "# $comments" >> ~/.zshrc
echo "source $filePath" >> ~/.zshrc
return 0
}
# create directory
function createDir() {
typeset dirPath
dirPath=$1
if [ ! -d $dirPath ]
then
info "create folder \"$dirPath\""
mkdir -p "$dirPath"
else
warning "folder \"$dirPath\" already exists"
fi
}
# install golang.org/x/pkg
function installGoXPkg() {
typeset pkgName
typeset pkgURL
pkgName=$1
pkgURL=$2
pkgPath="$GOPATH/src/$pkgName"
createDir "$(dirname "$pkgPath")" # create parent directory of pkgPath
if [ -d "$pkgPath" ]
then
info "update $pkgName"
pushd "$pkgPath" 1 > /dev/null
git pull
popd 1 > /dev/null
else
info "download $pkgName"
git clone "$pkgURL" "$pkgPath" 2> /dev/null || info "$pkgName is already installed"
fi
}
| EthanCai/mac-os-scripts | lib/common.sh | Shell | mit | 2,760 |
#!/bin/zsh
#
# ____ _ _____ _ _
# | _ \ ___ | |_| ___(_) | ___ ___
# | | | |/ _ \| __| |_ | | |/ _ \/ __|
# | |_| | (_) | |_| _| | | | __/\__ \
# |____/ \___/ \__|_| |_|_|\___||___/
#
# DotFiles v0.2.447
# https://dotfiles.io
#
# Description: Mac OS X Dotfiles - Simply designed to fit your shell life.
#
# Custom Configurations
#
# Copyright (c) Sebastien Rousseau 2021. All rights reserved
# Licensed under the MIT license
#
# Load custom configurations
for config in $HOME/zsh/configurations/[^.#]*.zsh; do
source $config
done
| reedia/bash_profile | src/dotfiles/zsh/configurations.zsh | Shell | mit | 649 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/RPChatUI/RPChatUI.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/RPChatUI/RPChatUI.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
| priyanka16/RPChatUI | Example/Pods/Target Support Files/Pods-RPChatUI_Example/Pods-RPChatUI_Example-frameworks.sh | Shell | mit | 3,707 |
#!/bin/sh
command -v npm >/dev/null 2>&1 || { echo >&2 "This script requires npm but it's not available in this context. Ending without running pre-commit hook."; exit 0; }
npm run lint &&
npm t
| MyFoodBag/eslint-config-mfb-node | pre-commit.sh | Shell | mit | 196 |
#!/bin/bash
# this should work no matter where it is called from
# that is, this will tell you what is this script's
# base directory, indepentent from the working dir
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo $DIR
| mvendra/sandboxes | bash/scripts_own_dir.sh | Shell | mit | 237 |
#! /bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pull=off
source ${DIR}/lib.sh
if [ $# -lt 1 ]; then
echo "You must supply a password for the ti user account."
exit 2
fi
PW=$1 | figshare/Total-Impact | deploy/test.sh | Shell | mit | 204 |
#!/bin/sh
node "$ASKPASS_MAIN" "$@"
| desktop/dugite | test/auth/ask-pass.sh | Shell | mit | 37 |
#!/bin/bash
clear
RED='\033[0;31m'
NC='\033[0m' # No Color
outname="sysinfo.out";
main="sysinfo.c"
gcccmd="gcc -Wall $main -o $outname"
rm $outname
printf "${RED}Build started...${NC}\n"
$gcccmd
printf "${RED}Build finished...${NC}\n"
printf "${RED}Running...${NC}\n"
./$outname
printf "${RED}Finished.${NC}\n"
| adrianlita/academic | sysinfo/build.sh | Shell | mit | 316 |
#!/usr/bin/env bash
set -euo pipefail
command -v port >/dev/null 2>&1 || { echo "I require port but it's not installed, or not found in PATH. Aborting." >&2; exit 1; }
sudo port -v sync &&
sudo port -v upgrade outdated &&
sudo port -v rev-upgrade
exit 0 | milamd/sh | macportsUpdate.sh | Shell | mit | 256 |
#!/usr/bin/env bash
# Define different RVMs
declare -a RVMS=(\
'ruby-1.9.3-p484@databox' \
'ruby-2.0.0-p353@databox' \
# Add others here
)
# Loop over RSMs and exec RSpec
for r in "${RVMS[@]}"; do
set -o verbose
echo "Testing $r"
rvm $r exec bundle install --quiet
rvm $r exec bundle exec rspec --fail-fast --format=progress
done
| sraka1/databox | test_rvms.sh | Shell | mit | 346 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2016:2142
#
# Security announcement date: 2016-11-02 16:58:44 UTC
# Script generation date: 2017-01-16 21:17:45 UTC
#
# Operating System: Red Hat 5
# Architecture: i386
#
# Vulnerable packages fix on version:
# - bind97.i386:9.7.0-21.P2.el5_11.9
# - bind97-chroot.i386:9.7.0-21.P2.el5_11.9
# - bind97-debuginfo.i386:9.7.0-21.P2.el5_11.9
# - bind97-devel.i386:9.7.0-21.P2.el5_11.9
# - bind97-libs.i386:9.7.0-21.P2.el5_11.9
# - bind97-utils.i386:9.7.0-21.P2.el5_11.9
#
# Last versions recommanded by security team:
# - bind97.i386:9.7.0-21.P2.el5_11.10
# - bind97-chroot.i386:9.7.0-21.P2.el5_11.10
# - bind97-debuginfo.i386:9.7.0-21.P2.el5_11.10
# - bind97-devel.i386:9.7.0-21.P2.el5_11.10
# - bind97-libs.i386:9.7.0-21.P2.el5_11.10
# - bind97-utils.i386:9.7.0-21.P2.el5_11.10
#
# CVE List:
# - CVE-2016-8864
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install bind97.i386-9.7.0 -y
sudo yum install bind97-chroot.i386-9.7.0 -y
sudo yum install bind97-debuginfo.i386-9.7.0 -y
sudo yum install bind97-devel.i386-9.7.0 -y
sudo yum install bind97-libs.i386-9.7.0 -y
sudo yum install bind97-utils.i386-9.7.0 -y
| Cyberwatch/cbw-security-fixes | Red_Hat_5/i386/2016/RHSA-2016:2142.sh | Shell | mit | 1,298 |
#!/bin/bash
# clean existing vim files and symlinks
rm -f $HOME/.vimrc
rm -rf $HOME/.vim
rm -rf $PWD/src/.vim/bundle
rm -rf $PWD/src/.vim/.netrwhist
# clean existing .tmux.conf
rm -rf $HOME/.tmux.conf
# clean existing zsh related files
rm -rf $HOME/.zprezto
rm -rf $HOME/.zlogin $HOME/.zlogout $HOME/.zpreztorc $HOME/.zprofile $HOME/.zshenv $HOME/.zshrc
| renuvair/dotfiles | clean.sh | Shell | mit | 358 |
#!/usr/bin/env clrwrap
sqlsharp-pnet.exe
| jjenki11/blaze-chem-rendering | qca_designer/lib/ml-pnet-0.8.1/tools/SqlSharp/sqlsharp-run.sh | Shell | mit | 41 |
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
"$DIR"/java-daemon/start-daemon.sh $@ | ltg-uic/wc-java-notifier-bot | src/main/java-daemon/bin/startup.sh | Shell | mit | 112 |
#!/usr/bin/env bash
#########################################################################
# Script Name: yad2ogg
# Script Version: 1.0.0
# Script Date: 12 Juli 2016
#########################################################################
#
# Based on the idea of dir2ogg
#
#########################################################################
# MIT License
#
# Copyright (c) 2016 Ingmar Delsink
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#########################################################################
# Explanation
# yad2ogg -i input_folder -o destination --copyfile "cover.jpg"
#########################################################################
# --- global parameters ---------------------------------------------
set -e # kill script if a command fails
set -o nounset # unset values give error
set -o pipefail # prevents errors in a pipeline from being masked
# --- include files -------------------------------------------------
SCRIPT_PATH="$(dirname $( realpath ${BASH_SOURCE[0]} ) )"
SCRIPT_PATH_LOCAL="${SCRIPT_PATH}"
LIBS_PATH="${SCRIPT_PATH}/libs"
LIBS_PATH_LOCAL="${SCRIPT_PATH}"
source "${LIBS_PATH}/b-log/b-log.sh" # logging
source "${LIBS_PATH}/queue.sh" # simple queue interface
source "${LIBS_PATH}/mutex.sh" # simple mutex interface
source "${LIBS_PATH}/processes.sh" # simple concurrent process management
source "${LIBS_PATH}/value-storage.sh" # global value storage interface
source "${LIBS_PATH}/find-files.sh" # find files
# --- global variables ----------------------------------------------
VERSION=1.0.0
APPNAME="yad2ogg"
function usage() {
# @description prints the short usage of the script
echo "Usage: ${APPNAME}.sh [options]"
echo ""
echo " -a --ALL Convert all supported file types"
echo " -c --copyfile file Copy files over from original directory to "
echo " destination directory eg. '*.cue or *.jpg'."
echo " -C --command The default convert command. (default: ffmpeg)"
echo " When ffmpeg is not available, this can be set to avconv"
echo " -f --filetypes type File types to convert eg. 'wav flac ...'"
echo " -f 'alac' --ALAC convert files of type alac"
echo " -f 'flac' --FLAC convert files of type flac"
echo " -f 'mp3' --MP3 convert files of type mp3"
echo " -f 'm4a' --M4A convert files of type m4a"
echo " -f 'ogg' --OGG convert files of type ogg"
echo " -f 'wav' --WAV convert files of type wav"
echo " -g --gui Use a simple UI instead of logging output to stdout. (dialog)"
echo " -h --help Show usage"
echo " -i --input dir Input/source directory (defaults to current directory)"
echo " -j --jobs n Number of concurrent convert jobs (default is 1)"
echo " -l --logfile file Log to a file"
echo " -m --metadata Don't keep metadata(tags) from the original files"
echo " -o --output dir Destination/output directory (defaults to input directory)"
echo " -p --parameters param Extra conversion parameters"
echo " -q --quality n Quality switch where n is a number (default 5.0)"
echo " -s --syslog param Log to syslog \"logger 'param' log-message\""
echo " For example: \"-s '-t my-awsome-tag'\" will result in:"
echo " \"logger -t my-awsome-tag log-message\""
echo " -v --verbose Add more verbosity"
echo " -V --version Displays the script version"
echo " -w --overwrite Overwrite existing files"
echo " -z --sync Synchronize the output folder to the input folder."
echo " If a file exists in the output folder but not in the "
echo " input folder, it will be removed."
echo " Extensions will be ignored so that a converted file"
echo " will 'match' the original file"
echo " -Z --sync-hidden Same as --sync but includes hidden files/folders"
echo ""
}
INPUT_DIR="./" # input directory
OUTPUT_DIR="${INPUT_DIR:-"./"}" # output directory
QUALITY="5" # the quality for the converter switch
# "Most users agree -q 5 achieves transparency, if the source is the original or lossless."
# taken from: http://wiki.hydrogenaud.io/index.php?title=Recommended_Ogg_Vorbis
PARAMETERS="" # optional parameters for the converter
JOBS=1 # number of concurrent jobs (default 1)
VERBOSITY=$LOG_LEVEL_NOTICE # set starting log level
LOG_FILE="" # file to log to (default empty, so disabled)
SYSLOG_PARAM="" # syslog parameters (default empty, so disabled)
COPY_FILES=() # files to copy over from the source directory
USE_GUI=false # use the GUI?
KEEP_METADATA=true # keep metadata(tags)
OVERWRITE_EXISTING=false # overwrite existing files
COUNTERPART_SYNC=false # synchronize
COUNTERPART_HIDDEN=false # include hidden files from counterpart check
DEFAULT_CONV_COMM="ffmpeg" # default convert command
FILETYPES=() # file types to convert
# file types supported
readonly SUPORTED_FILETYPES=(
# lossless
wav
flac
alac
# lossy
mp3
ogg
m4a
)
# location in supported list
readonly WAV_LIST=0
readonly FLAC_LIST=1
readonly ALAC_LIST=2
readonly MP3_LIST=3
readonly OGG_LIST=4
readonly M4A_LIST=5
# directories
readonly ROOT_DIR="/tmp"
readonly APP_DIR="${ROOT_DIR}/${APPNAME}"
readonly LOCKFILE_DIR="${APP_DIR}/lock"
readonly QUEUE_STORAGE="${APP_DIR}/queue"
readonly VARIABLE_STORAGE="${APP_DIR}/variable_storage"
# queues
readonly FILES_TO_PROCESS_QUEUE="files_to_process" # holds files to process
readonly FILES_TO_COPY_OVER_QUEUE="files_to_copy_over" # holds files to copy over
readonly CONVERTER_PROCESSES_QUEUE="converter_processes" # holds PID's of processes
readonly GUI_PROCESSES_QUEUE="converter_status_processes" # holds PID of the converter status
readonly GUI_NOTIFICATIONS_QUEUE="gui_notifications" # all notifications that need to be displayed in the UI
# mutex names
readonly MUTEX_READ_FILES_TO_PROCESS="get_convert_file"
# some error codes to use in the file
readonly ERR_NO_MORE_FILES="no more files"
readonly ERR_MISSING_PARAMETER="missing parameter"
readonly ERR_MUTEX_TIMEOUT="mutex timeout"
readonly ERR_TYPE_NOT_SUPORTED="type not supported"
# variable access names
readonly GUI_TITLE="gui_title"
readonly GUI_TOTAL_COUNT="gui_total_count"
readonly GUI_PART_COUNT="gui_part_count"
# set aliases
shopt -s expand_aliases
alias GUI_TITLE="value_set ${GUI_TITLE}"
alias GUI_NOTIFY="queue_add ${GUI_NOTIFICATIONS_QUEUE}"
alias GUI_NOTIFY_CLEAR="\
queue_add ${GUI_NOTIFICATIONS_QUEUE} ' '; \
queue_add ${GUI_NOTIFICATIONS_QUEUE} ' '; \
queue_add ${GUI_NOTIFICATIONS_QUEUE} ' '; \
queue_add ${GUI_NOTIFICATIONS_QUEUE} ' '; "
alias GUI_TOTAL_COUNT="value_set ${GUI_TOTAL_COUNT}"
alias GUI_PART_COUNT="value_set ${GUI_PART_COUNT}"
# --- options processing --------------------------------------------
if [ $# -eq 0 ] ; then # nothing past to the script
usage
exit 1;
fi
for arg in "$@"; do # transform long options to short ones
shift
case "$arg" in
"--help") set -- "$@" "-h" ;;
"--version") set -- "$@" "-V" ;;
"--verbose") set -- "$@" "-v" ;;
"--gui") set -- "$@" "-g" ;;
"--logfile") set -- "$@" "-l" ;;
"--syslog") set -- "$@" "-s" ;;
"--input") set -- "$@" "-i" ;;
"--output") set -- "$@" "-o" ;;
"--quality") set -- "$@" "-q" ;;
"--jobs") set -- "$@" "-j" ;;
"--copyfile") set -- "$@" "-c" ;;
"--sync") set -- "$@" "-z" ;;
"--sync-hidden") set -- "$@" "-Z" ;;
"--metadata") set -- "$@" "-m" ;;
"--overwrite") set -- "$@" "-w" ;;
"--command") set -- "$@" "-C" ;;
# filetypes
"--filetypes") set -- "$@" "-f" ;;
"--ALL") set -- "$@" "-a" ;;
# lossless
"--WAV") set -- "$@" "-f${SUPORTED_FILETYPES[$WAV_LIST]}" ;;
"--FLAC") set -- "$@" "-f${SUPORTED_FILETYPES[$FLAC_LIST]}" ;;
"--ALAC") set -- "$@" "-f${SUPORTED_FILETYPES[$ALAC_LIST]}" ;;
# lossy
"--MP3") set -- "$@" "-f${SUPORTED_FILETYPES[$MP3_LIST]}" ;;
"--OGG") set -- "$@" "-f${SUPORTED_FILETYPES[$OGG_LIST]}" ;;
"--M4A") set -- "$@" "-f${SUPORTED_FILETYPES[$M4A_LIST]}" ;;
*) set -- "$@" "$arg"
esac
done
# get options
while getopts "hVvgl:s:i:o:q:p:j:c:C:zZmwf:a" optname
do
case "$optname" in
"h")
usage
exit 0;
;;
"V")
echo "${APPNAME} v${VERSION}"
exit 0;
;;
"v")
VERBOSITY=$(($VERBOSITY+100)) # increment log level
;;
"g")
USE_GUI=true
;;
"l")
LOG_FILE="${OPTARG}"
;;
"s")
SYSLOG_PARAM="${OPTARG}"
;;
"i")
INPUT_DIR="${OPTARG}"
;;
"o")
OUTPUT_DIR="${OPTARG}"
;;
"q")
QUALITY="${OPTARG}"
;;
"p")
PARAMETERS="${OPTARG}"
;;
"j")
JOBS="${OPTARG}"
;;
"c")
COPY_FILES[${#COPY_FILES[@]}]="${OPTARG}"
;;
"C")
DEFAULT_CONV_COMM="${OPTARG}"
;;
"z")
COUNTERPART_SYNC=true
;;
"Z")
COUNTERPART_SYNC=true
COUNTERPART_HIDDEN=true
;;
"m")
KEEP_METADATA=true
;;
"w")
OVERWRITE_EXISTING=true
;;
"f")
FILETYPES[${#FILETYPES[@]}]="${OPTARG}"
;;
"a")
FILETYPES=${SUPORTED_FILETYPES[*]}
;;
*)
FATAL "unknown error while processing options"
exit 1;
;;
esac
done
shift "$((OPTIND-1))" # shift out all the already processed options
# --- start body ----------------------------------------------------
#############################
# error printing
#############################
function error() {
local parent_lineno="${1:-}"
local message="${2:-}"
local code="${3:-1}"
if [[ -n "$message" ]] ; then
ERROR "Error on or near line ${parent_lineno}: ${message}; exiting with status ${code}"
else
ERROR "Error on or near line ${parent_lineno}; exiting with status ${code}"
fi
exit "${code}"
}
#############################
# program finished
#############################
function finish {
# @description finish the program by cleaning up it's resources
# clean app directory, if fail don't care
rm -r "${APP_DIR}" || true
}
#############################
# conversion command
#############################
get_conversion_command() {
# @description returns a conversion command
# based on the file type.
# the reason for this setup is so that the optimal command(s)
# per file type can be selected
# @param $1 input file
# @param $2 output file
# @param $2 quality switch (integer)
# @param $3 extra parameters for the conversion
local file=${1:-}
local output_file=${2:-}
local quality=${3:-5}
local parameters=${4:-}
local file_type=""
local conversion_command=() # external accessible after call
conversion_output_dir="" # external accessible after call
if [ -z "${file}" ]; then
echo "${ERR_MISSING_PARAMETER}"
return 1 # empty file!
else
file_type=${file##*.} # set file type
fi
#output_file=${file/$INPUT_DIR/$OUTPUT_DIR} # change input for output dir
printf -v file "%q" "$file" # filter out special characters
printf -v output_file "%q" "$output_file" # filter out special characters
case $file_type in
${SUPORTED_FILETYPES[$WAV_LIST]} )
if [ "${KEEP_METADATA}" = true ] ; then
parameters+=' -map_metadata 0'
else
parameters+=' -map_metadata -1'
fi
conversion_command=("${DEFAULT_CONV_COMM}" -i "${file}" -acodec libvorbis -aq "${quality}" "${parameters}" "${output_file}")
;;
${SUPORTED_FILETYPES[$FLAC_LIST]} )
if [ "${KEEP_METADATA}" = true ] ; then
parameters+=' -map_metadata 0'
else
parameters+=' -map_metadata -1'
fi
conversion_command=("${DEFAULT_CONV_COMM}" -i "${file}" -acodec libvorbis -aq "${quality}" "${parameters}" "${output_file}")
;;
${SUPORTED_FILETYPES[$ALAC_LIST]} )
if [ "${KEEP_METADATA}" = true ] ; then
parameters+=' -map_metadata 0'
else
parameters+=' -map_metadata -1'
fi
conversion_command=("${DEFAULT_CONV_COMM}" -i "${file}" -acodec libvorbis -aq "${quality}" "${parameters}" "${output_file}")
;;
${SUPORTED_FILETYPES[$MP3_LIST]} )
if [ "${KEEP_METADATA}" = true ] ; then
parameters+=' -map_metadata 0'
else
parameters+=' -map_metadata -1'
fi
conversion_command=("${DEFAULT_CONV_COMM}" -i "${file}" -acodec libvorbis -aq "${quality}" "${parameters}" "${output_file}")
;;
${SUPORTED_FILETYPES[$OGG_LIST]} )
if [ "${KEEP_METADATA}" = true ] ; then
parameters+=' -map_metadata 0'
else
parameters+=' -map_metadata -1'
fi
conversion_command=("${DEFAULT_CONV_COMM}" -i "${file}" -acodec libvorbis -aq "${quality}" "${parameters}" "${output_file}")
;;
${SUPORTED_FILETYPES[$M4A_LIST]} )
if [ "${KEEP_METADATA}" = true ] ; then
parameters+=' -map_metadata 0'
else
parameters+=' -map_metadata -1'
fi
conversion_command=("${DEFAULT_CONV_COMM}" -i "${file}" -acodec libvorbis -aq "${quality}" "${parameters}" "${output_file}")
;;
*)
conversion_command=$ERR_TYPE_NOT_SUPORTED
;;
esac
echo "${conversion_command[@]}"
return 0
}
#############################
# file to convert
#############################
function get_file_to_convert() {
# @description get a file from the queue
# This is using a mutex so that the queue is only read by one process
local filename=""
local timeout=5 #seconds
local retry_timeout=$(bc -l <<< "scale = 2; $timeout/10.0") || true
local retry_count=0
local current_timeout=0
while true; do # wait to get mutex, with timeout
if mutex_lock "${MUTEX_READ_FILES_TO_PROCESS}" ; then
filename=$(queue_read "${FILES_TO_PROCESS_QUEUE}")
if [ -z "${filename}" ]; then
filename=${ERR_NO_MORE_FILES}
fi
mutex_free "${MUTEX_READ_FILES_TO_PROCESS}" # free the mutex
break
else
current_timeout=$(bc -l <<< "scale = 2; $retry_timeout*$retry_count") || true
if [[ ${current_timeout%%.*} -gt $timeout ]]; then
echo ${ERR_MUTEX_TIMEOUT}
return 0
fi
((retry_count++)) || true
sleep $retry_timeout || true
fi
done
echo "${filename}" # return the filename
}
#############################
# converter process
#############################
function process_convert() {
# @description convert files from the queue
# - get a file from the queue
# - get a command based on the file
# - setup output directory
# - run command
# - check file
# - repeat when queue is empty
# * some other things to remember *
# * on INT signal, finish the conversion
# * check EVERYTHING, this script needs to run for HOURS!
local PROCESS_PID=$BASHPID
local PROCESS_PPID=$PPID
local convert_command=""
local file=""
local output_file=""
local file_directory=""
local file_output_directory=""
local err_ret_code=0
local err_ret_message=""
value_set "${PROCESS_PID}_TERMINATE" "false" # set default value
function terminate_process() {
local PROCESS_PID=${1:-}
value_set "${PROCESS_PID}_TERMINATE" "true" # set terminate variable
}
trap "terminate_process ${PROCESS_PID}" TERM INT # on INT, let the task finish and then exit
trap 'error ${LINENO}' ERR # on error, print error
DEBUG "conversion process with PID: $PROCESS_PID started"
GUI_TITLE "Converting files"
while true; do
# get file to convert
file=$(get_file_to_convert) || true # this can fail, just accept it
if [ -z "${file}" ]; then
file=${ERR_NO_MORE_FILES}
fi
if [ "$file" == "$ERR_NO_MORE_FILES" ]; then
INFO "no more files left to process"
break # stop process
elif [ "$file" == "$ERR_MUTEX_TIMEOUT" ]; then
NOTICE "$PROCESS_PID| mutex timeout" # retry
else
output_file=${file/$INPUT_DIR/$OUTPUT_DIR} # change input for output dir
output_file="${output_file%.*}.ogg" # replace extension
# get convert command
convert_command=$(get_conversion_command "${file}" "${output_file}" "${QUALITY}" "${PARAMETERS}") || true
if [ -z "${convert_command}" ]; then
WARN "got no command to run"
elif [ "${convert_command}" == "${ERR_MISSING_PARAMETER}" ]; then
WARN "missing parameters"
elif [ "${convert_command}" == "${ERR_TYPE_NOT_SUPORTED}" ]; then
ERROR "type not supported"
else
file_output_directory=${file/$INPUT_DIR/$OUTPUT_DIR} # change input for output dir
file_output_directory="${file_output_directory%/*}" # directory part of file
INFO "processing: $(basename "$file")"
GUI_NOTIFY "${file}"
GUI_PART_COUNT $(queue_size "${FILES_TO_PROCESS_QUEUE}")
# make directory
if [ ! -d "${file_output_directory}" ]; then
mkdir -p "${file_output_directory}" || true
fi
# check overwrite
if [ "${OVERWRITE_EXISTING}" = true ] ; then
convert_command="echo \"y\" | ${convert_command}"
else
convert_command="echo \"n\" | ${convert_command}"
fi
DEBUG "$PROCESS_PID| command: ${convert_command}"
convert_command+=" -loglevel error" # add log flag to command
# run command and catch error message
err_ret_message=$(eval "${convert_command}" 2>&1 ) || err_ret_code=$?
# check return code of process
if [ ! "${err_ret_code}" = 0 ] ; then
# command returned error
if [[ "${err_ret_message}" =~ (^File .* already exists. Exiting.$) ]] || \
[[ "${err_ret_message}" =~ (^File .* already exists. Overwrite \? \[y\/N] Not overwriting - exiting$) ]]; then
DEBUG "file already exists, skipping ${file}"
else
ERROR "error while processing: ${file}"
INFO "error message: ${err_ret_message}"
INFO "return code of command was: $err_ret_code"
# remove failed file
INFO "removing file because conversion failed: ${output_file}"
rm "${output_file}" || true
fi
err_ret_message=""
err_ret_code=0
fi
fi
fi
TERMINATE=$(value_get ${PROCESS_PID}_TERMINATE) || true
if [ "${TERMINATE}" = true ]; then
break
fi
done
DEBUG "process $PROCESS_PID stops now"
return 0
}
#############################
# Graphical User Interface
#############################
function process_gui() {
# @description this displays the UI in terminal or via an UI interface called 'dialog'
local PROCESS_PID=$BASHPID
local start_time=""
local end_time=""
local elapsed_time=""
local old_elapsed_time=""
local title=""
local sub_title=""
local totalcount=0
local partcount=0
local old_title=""
local old_totalcount=0
local old_partcount=0
local notification=""
local notifications=()
local message="" # mess to display
local old_message=""
local percentage=0
local gui_update="0.5" #sec
local term_update="10" #sec
local terminal_cols=0
local old_terminal_cols=0
local terminal_lines=0
local old_terminal_lines=0
local terminal_width=0
local terminal_height=0
export DIALOGRC="${SCRIPT_PATH_LOCAL}/.dialogrc" # export rc file
start_time=$(date +%s)
value_set "${PROCESS_PID}_TERMINATE" "false" # set default value
function terminate_process() {
DEBUG "set GUI term value"
local PROCESS_PID=${1:-}
value_set "${PROCESS_PID}_TERMINATE" "true" # set terminate variable
}
trap "terminate_process ${PROCESS_PID}" TERM INT # on TERM or INT, let the task finish and then exit
trap 'error ${LINENO}' ERR # on error, print error
DEBUG "start GUI"
while true; do
old_terminal_cols=$terminal_cols
terminal_cols=$(tput cols) || true
old_terminal_lines=$terminal_lines
terminal_lines=$(tput lines) || true
if [ "$terminal_lines" -ne "$old_terminal_lines" ]; then
terminal_height=$(((${terminal_lines:-200} / 10)*9)) # about 90%
fi
if [ "$terminal_cols" -ne "$old_terminal_cols" ]; then
terminal_width=$(((${terminal_cols:-200} / 10)*9)) # about 90%
fi
title=$(value_get "${GUI_TITLE}") || true
totalcount=$(value_get "${GUI_TOTAL_COUNT}") || true
partcount=$(value_get "${GUI_PART_COUNT}") || true
notification=$(queue_read "${GUI_NOTIFICATIONS_QUEUE}") || true
while [ ! -z "${notification}" ]; do
notifications[0]="${notifications[1]:-}"
notifications[1]="${notifications[2]:-}"
notifications[2]="${notifications[3]:-}"
notifications[3]="${notification}"
notification=$(queue_read "${GUI_NOTIFICATIONS_QUEUE}")
done
printf -v elapsed_time "%02d:%02d:%02d" \
$(($(( $(date +%s)-$start_time ))/3600)) \
$(($(( $(date +%s)-$start_time ))%3600/60)) \
$(($(( $(date +%s)-$start_time ))%60))
if [ "${totalcount}" == 0 ]; then
sub_title=""
percentage=0
elif [ -z "${partcount}" ] || [ -z "${totalcount}" ]; then
sub_title=""
percentage=0
else
sub_title="$partcount out of $totalcount left to process."
percentage=$(((totalcount-partcount)*100/totalcount))
fi
if [ ! "${title}" = "${old_title}" ] || \
[ ! "${message}" = "${old_message}" ] || \
[ ! "${totalcount}" = "${old_totalcount}" ] || \
[ ! "${partcount}" = "${old_partcount}" ] || \
[ ! "${elapsed_time}" = "${old_elapsed_time}" ]; then
# write to log every term_update sec
if [ "$((($(date +%s)-$start_time)%$term_update))" = "0" ] || [ "${USE_GUI}" = false ]; then
NOTICE "Elapsed time: $elapsed_time | ${percentage}% | ${title} | ${sub_title}"
fi
if [ "${USE_GUI}" = true ]; then
message="\Z4${sub_title}\Zn\n"
message+="\n"
message+="${notifications[0]:-}\n"
message+="${notifications[1]:-}\n"
message+="${notifications[2]:-}\n"
message+="${notifications[3]:-}\n"
message+="\n"
message+="\ZbElapsed time since start: $elapsed_time\Zn"
echo $percentage | dialog --colors --title "${title}" --gauge "${message}" ${terminal_height:-14} ${terminal_width:-100} 0
sleep $gui_update || true
else
sleep $term_update || true
fi
fi
TERMINATE=$(value_get ${PROCESS_PID}_TERMINATE) || true
if [ "${TERMINATE}" = true ]; then
end_time=$(date +%s)
printf -v elapsed_time "%02d:%02d:%02d" \
$(($(( $end_time-$start_time ))/3600)) \
$(($(( $end_time-$start_time ))%3600/60)) \
$(($(( $end_time-$start_time ))%60))
start_time=$(date -d@$start_time '+%m/%d/%Y %H:%M:%S') || true
end_time=$(date -d@$end_time '+%m/%d/%Y %H:%M:%S') || true
message="\nStart time: $start_time\nEnd time: $end_time\nTime taken: ${elapsed_time}"
if [ "${USE_GUI}" = true ]; then
echo 100 | dialog --title "${APPNAME} is now done" --gauge "${message}" ${terminal_height:-14} ${terminal_width:-100} 0
fi
NOTICE "${APPNAME} is now done"
NOTICE "Start time: $start_time"
NOTICE "End time: $end_time"
NOTICE "Time taken: ${elapsed_time}"
break
fi
old_title="${title}"
old_message="${message}"
old_partcount="${partcount}"
old_totalcount="${totalcount}"
old_elapsed_time="${elapsed_time}"
done
DEBUG "GUI stops now"
return 0
}
#############################
# copy over files
#############################
function copy_files_over() {
# @description copy over files
# @param $1 source
# @param $2 dest
# @param rest of the parameters filenames
local source=${1:-"./"}
local dest=${2:-"./"}
shift 2
local files=${@:-""}
local filename=""
local extension=""
local err_ret_code=0
local err_ret_message=""
local readonly copy_queue="files_to_copy_over"
GUI_TITLE "Copying files over"
GUI_NOTIFY_CLEAR
DEBUG "source: $source"
DEBUG "dest: $dest"
DEBUG "files: ${files[@]}"
for file in ${files[@]}; do
filename=$(basename "$file")
extension="${filename##*.}"
filename="${filename%.*}"
INFO "searching for the files to copy over"
GUI_NOTIFY "searching for file(s): ${file}"
find_files "${source}" "${filename}" "${extension}" "${copy_queue}"
GUI_TOTAL_COUNT $(queue_size ${copy_queue})
GUI_PART_COUNT 0
INFO "copying over files"
# process queue
file_to_process=$(queue_read ${copy_queue})
while [ ! -z "${file_to_process}" ]; do
DEBUG "copy file: ${file_to_process}"
GUI_NOTIFY "${file_to_process}"
GUI_PART_COUNT $(queue_size ${copy_queue})
file_output_directory=${file_to_process/$source/$dest} # change input for output dir
file_output_directory="${file_output_directory%/*}" # directory part of file
# make directory
if [ ! -d "${file_output_directory}" ]; then
mkdir -p "${file_output_directory}" || true
fi
output_file=${file_to_process/$source/$dest}
INFO "copying file: '${file_to_process}'"
DEBUG "to: '${output_file}'"
err_ret_message=$(cp "${file_to_process}" "${output_file}" 2>&1 ) || err_ret_code=$?
if [ ! "${err_ret_code}" = 0 ] ; then
if [ ! -z "${err_ret_message}" ]; then
ERROR "copy command returned message: ${err_ret_message}"
fi
ERROR "error while processing: ${file_to_process}"
DEBUG "return code of command is: ${err_ret_code}"
fi
file_to_process=$(queue_read ${copy_queue}) # read new file
done
done
}
#############################
# synchronize files
#############################
function fuzzy_counterpart_check() {
# @description check if a 'counterpart' file exists in another directory
# This will check if a folder has a counterpart file in another directory.
# The 'counterpart' file is a file which has the same name and path as the file it checks to.
# The fuzzy part is that the extension will not be checked.
# @param $1 from; the base folder
# @param $2 to; the folder to check
# @param $3 include hidden files/folders [true/false]
local base_directory=${1:-}
local check_directory=${2:-}
local hidden=${3:-}
local base_file=""
local check_file=""
local err_ret_code=0
local found_files=0
local find_param=""
local queue_check_file="fuzzy_counterpart_check"
# check directories
if [ -z "${base_directory}" ]; then
ERROR "the from/base directory parameter for the fuzzy counterpart check was not set"
return 1
fi
if [ -z "${check_directory}" ]; then
ERROR "the to/check directory parameter for the fuzzy counterpart check was not set"
return 1
fi
if [ ! -r "${base_directory}" ]; then # check base directory for read access
ERROR "the from/base directory for the fuzzy counterpart check cannot be read"
return 1
fi
if [ ! -w "${check_directory}" ]; then # check check directory for write access
ERROR "the to/check directory for the fuzzy counterpart check is not writable"
return 1
fi
# set hidden
if [ ! "${hidden}" = true ] ; then
find_param="*/\.*" # exclude hidden files/folders
fi
GUI_TITLE "Synchronize files"
GUI_TOTAL_COUNT 0
GUI_PART_COUNT 0
GUI_NOTIFY "looking for all the files in the output folder, this may take a while"
# find files of all types
find_files "${check_directory}" "*" "*" "${queue_check_file}" "${find_param}"
GUI_TOTAL_COUNT $(queue_size ${queue_check_file})
check_file=$(queue_read "${queue_check_file}")
while [ ! -z "${check_file}" ]; do
DEBUG "counterpart check: $check_file"
GUI_NOTIFY "${check_file}"
GUI_PART_COUNT $(queue_size "${queue_check_file}")
# - replace check dir with base dir
base_file="${check_file/$check_directory/$base_directory}"
base_file="${base_file%.*}" # remove extension
DEBUG "counterpart check to: ${base_file}"
# - check if exists based on filename, ignoring all extension (fuzzy part)
found_files=$(ls "${base_file}".* 2> /dev/null | wc -l) || true
DEBUG "found counterpart files: ${found_files}"
if [ "${found_files}" -eq 0 ] ; then
DEBUG "file ${check_file} has no counterpart"
INFO "removing ${check_file}"
rm "${check_file}" || true # remove check file
else
DEBUG "file ${check_file} has counterpart"
fi
found_files=0
check_file=$(queue_read "${queue_check_file}")
done
# remove empty directories
DEBUG "removing empty directories"
GUI_NOTIFY "removing empty directories"
find "${check_directory}" -type d -empty -delete
}
function ctrl_c() {
# @description do things when the SIGINT is trapped
DEBUG "** Trapped CTRL-C"
INFO "requested termination"
processes_signal ${CONVERTER_PROCESSES_QUEUE} 'SIGINT'
kill -SIGTERM "${GUI_PID}" # stop GUI
wait || true # wait for all child processes to finish
exit 1
}
trap 'ctrl_c' INT
# --- main ----------------------------------------------------------
GUI_PID=0
# logger setup
B_LOG --stdout true
B_LOG --file "${LOG_FILE}" --file-prefix-enable --file-suffix-enable # log in a file
B_LOG --syslog "${SYSLOG_PARAM}" # log to syslog
B_LOG --log-level ${VERBOSITY} # set log level
# check folders
if [ ! -r "${INPUT_DIR}" ]; then # check input directory for read access
FATAL "the input directory cannot be read"
exit 1
fi
if [ ! -w "${OUTPUT_DIR}" ]; then # check output directory for write access
FATAL "the output directory is not writable"
exit 1
fi
# set traps
trap 'error ${LINENO}' ERR # on error, print error
trap finish EXIT # on exit, clean up resources
# set stdio output for GUI
if [ "${USE_GUI}" = true ]; then
B_LOG --stdout false
else
B_LOG --stdout true
fi
# set default GUI values
GUI_TITLE "${APPNAME} v${VERSION}"
GUI_NOTIFY_CLEAR
process_gui& # start the GUI
GUI_PID=$!
# looking for files
NOTICE "finding files and start conversion"
INFO "looking for files with the filetypes: ${FILETYPES[*]:-}"
GUI_TITLE "looking for files to convert"
GUI_NOTIFY_CLEAR
GUI_NOTIFY "this may take a while"
find_files "${INPUT_DIR}" "*" "${FILETYPES[*]:-}" "${FILES_TO_PROCESS_QUEUE}" # find the files needed for processing
GUI_TOTAL_COUNT "$(queue_size "${FILES_TO_PROCESS_QUEUE}")"
GUI_PART_COUNT 0
# converting the files
INFO "starting the conversion process(es)"
GUI_TITLE "starting the conversion process(es)"
GUI_NOTIFY_CLEAR
processes_start 'process_convert' "${JOBS}" "${CONVERTER_PROCESSES_QUEUE}" # start the conversion processes
wait $(queue_look_sl "${CONVERTER_PROCESSES_QUEUE}") || true # wait for converter processes
# copying over files
NOTICE "copying over files"
INFO "copying over the following files: ${COPY_FILES[@]:-}"
copy_files_over "${INPUT_DIR}" "${OUTPUT_DIR}" "${COPY_FILES[@]:-}"
NOTICE "done copying"
# syncing/counterpart checking files
if [ "${COUNTERPART_SYNC}" = true ] ; then
NOTICE "checking for counterpart files"
if [ "${COUNTERPART_HIDDEN}" = true ] ; then
fuzzy_counterpart_check "${INPUT_DIR}" "${OUTPUT_DIR}" true
else
fuzzy_counterpart_check "${INPUT_DIR}" "${OUTPUT_DIR}" false
fi
fi
# stop the program
NOTICE "${APPNAME} is now done"
kill -SIGTERM "${GUI_PID}" # stop GUI
wait || true
# --- done ----------------------------------------------------------
| idelsink/yad2ogg | yad2ogg.sh | Shell | mit | 35,300 |
#!/bin/sh
set -e
set -u
set -o pipefail
if [ -z ${UNLOCALIZED_RESOURCES_FOLDER_PATH+x} ]; then
# If UNLOCALIZED_RESOURCES_FOLDER_PATH is not set, then there's nowhere for us to copy
# resources to, so exit 0 (signalling the script phase was successful).
exit 0
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
case "${TARGETED_DEVICE_FAMILY:-}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
3)
TARGET_DEVICE_ARGS="--target-device tv"
;;
4)
TARGET_DEVICE_ARGS="--target-device watch"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
install_resource()
{
if [[ "$1" = /* ]] ; then
RESOURCE_PATH="$1"
else
RESOURCE_PATH="${PODS_ROOT}/$1"
fi
if [[ ! -e "$RESOURCE_PATH" ]] ; then
cat << EOM
error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script.
EOM
exit 1
fi
case $RESOURCE_PATH in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.framework)
echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\"" || true
xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH"
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
*)
echo "$RESOURCE_PATH" || true
echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Core/AppStateReactor.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Core/CoreDataStack.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Core/DALService.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Extensions/NSManagedObject+Skopelos.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Extensions/NSObject+Introspection.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Extensions/String+Bool.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols/CommandModelProtocol.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols/CoreDataStackProtocol.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols/DALProtocol.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols/QueryModelProtocol.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Skopelos.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Core"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Extensions"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols"
install_resource "${PODS_ROOT}/../../UnitTests/Assets/DataModel.xcdatamodeld"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Core/AppStateReactor.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Core/CoreDataStack.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Core/DALService.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Extensions/NSManagedObject+Skopelos.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Extensions/NSObject+Introspection.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Extensions/String+Bool.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols/CommandModelProtocol.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols/CoreDataStackProtocol.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols/DALProtocol.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols/QueryModelProtocol.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Skopelos.swift"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Core"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Extensions"
install_resource "${PODS_ROOT}/../../Skopelos/Classes/Protocols"
install_resource "${PODS_ROOT}/../../UnitTests/Assets/DataModel.xcdatamodeld"
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "${XCASSET_FILES:-}" ]
then
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "${PODS_ROOT}*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
if [ -z ${ASSETCATALOG_COMPILER_APPICON_NAME+x} ]; then
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
else
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" --app-icon "${ASSETCATALOG_COMPILER_APPICON_NAME}" --output-partial-info-plist "${TARGET_TEMP_DIR}/assetcatalog_generated_info_cocoapods.plist"
fi
fi
| albertodebortoli/Skopelos | Example/Pods/Target Support Files/Skopelos/Skopelos-Unit-Tests-resources.sh | Shell | mit | 8,913 |
#!/bin/bash
rm combined/*
g++ new_temp_test.cpp ../../src/functions.cpp ../../src/globals.cpp ../../src/classes.cpp -lgsl -lgslcblas -o new_temp_test.o
| homeslike/OpticalTweezer | test/new_temp_test/compile_new_temp_test.sh | Shell | mit | 154 |
/Applications/NaturalDocs/NaturalDocs -r -i hardware -o HTML docs -p docProject
| snhack/LogoBot | hardware/document.sh | Shell | mit | 80 |
set -e
export DEBIAN_FRONTEND=noninteractive
export DOKKU_REPO=${DOKKU_REPO:-"https://github.com/h4ck4life/dokku.git"}
apt-get update
apt-get install -y git make curl
cd ~ && test -d dokku || git clone $DOKKU_REPO
cd dokku && test $DOKKU_BRANCH && git checkout origin/$DOKKU_BRANCH || true
make all
echo
echo "Be sure to upload a public key for your user:"
echo " cat ~/.ssh/id_rsa.pub | ssh root@$HOSTNAME \"gitreceive upload-key progrium\""
| h4ck4life/dokku | bootstrap.sh | Shell | mit | 447 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.