code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
# Find Java
if [ "$JAVA_HOME" = "" ] ; then
JAVA="java -server"
else
JAVA="$JAVA_HOME/bin/java -server"
fi
PREFIX=$( echo `dirname $0`/.. )
LIB_DIR=$PREFIX/lib
# Set Java options
if [ "$JAVA_OPTIONS" = "" ] ; then
JAVA_OPTIONS=" \
-XX:+UseConcMarkSweepGC \
-d64"
fi
export BASE_DIR=$*
# Launch the application
cd $PREFIX
export PREFIX
export CLASSPATH=$( echo $LIB_DIR/*.jar . | sed 's/ /:/g')
exec $JAVA $JAVA_OPTIONS com.cloudata.keyvalue.KeyValueServer $*
|
justinsb/cloudata
|
cloudata-structured/src/main/bin/structured.sh
|
Shell
|
apache-2.0
| 496 |
#!/bin/bash
set -euo pipefail
if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then
# Create SSH key for GCE
if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then
ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N ''
ssh-add "{{ openshift_gcp_ssh_private_key }}" || true
fi
# Check if the public key is in the project metadata, and if not, add it there
if [ -f "{{ openshift_gcp_ssh_private_key }}.pub" ]; then
pub_file="{{ openshift_gcp_ssh_private_key }}.pub"
pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
else
keyfile="${HOME}/.ssh/google_compute_engine"
pub_file="${keyfile}.pub"
mkdir -p "${HOME}/.ssh"
cp "{{ openshift_gcp_ssh_private_key }}" "${keyfile}"
chmod 0600 "${keyfile}"
ssh-keygen -y -f "${keyfile}" > "${pub_file}"
pub_key=$(cut -d ' ' -f 2 < "${pub_file}")
fi
key_tmp_file='/tmp/ocp-gce-keys'
if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then
if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then
gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
fi
echo -n 'cloud-user:' >> "$key_tmp_file"
cat "${pub_file}" >> "$key_tmp_file"
gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
rm -f "$key_tmp_file"
fi
fi
metadata=""
if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then
if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then
echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)"
exit 1
fi
metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}"
fi
if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then
if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then
echo "User data file missing at {{ openshift_gcp_user_data_file }}"
exit 1
fi
if [[ -n "${metadata}" ]]; then
metadata+=","
else
metadata="--metadata-from-file="
fi
metadata+="user-data={{ openshift_gcp_user_data_file }}"
fi
# Select image or image family
image="{{ openshift_gcp_image }}"
if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then
if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then
echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'"
exit 1
fi
image="family/${image}"
fi
### PROVISION THE INFRASTRUCTURE ###
dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
exit 1
fi
# Create network
if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto"
else
echo "Network '{{ openshift_gcp_network_name }}' already exists"
fi
# Firewall rules in a form:
# ['name']='parameters for "gcloud compute firewall-rules create"'
# For all possible parameters see: gcloud compute firewall-rules create --help
range=""
if [[ -n "{{ openshift_node_port_range }}" ]]; then
range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
fi
declare -A FW_RULES=(
['icmp']='--allow icmp'
['ssh-external']='--allow tcp:22'
['ssh-internal']='--allow tcp:22 --source-tags bastion'
['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
)
for rule in "${!FW_RULES[@]}"; do
( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]}
else
echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists"
fi ) &
done
# Master IP
( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global
else
echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists"
fi ) &
# Internal master IP
( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}"
else
echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists"
fi ) &
# Router IP
( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}"
else
echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists"
fi ) &
{% for node_group in openshift_gcp_node_group_config %}
# configure {{ node_group.name }}
(
if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
--machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
--tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ 'ocp-bootstrap,' if (node_group.bootstrap | default(False)) else '' }}{{ node_group.tags }}" \
--boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
--scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
--image "{{ node_group.image | default('${image}') }}" ${metadata} \
--metadata "bootstrap={{ node_group.bootstrap | default(False) | bool | to_json }},cluster-id={{ openshift_gcp_prefix + openshift_gcp_clusterid }},node-group={{ node_group.name }}"
else
echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
fi
# Create instance group
if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \
--zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
else
echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists"
fi
) &
{% endfor %}
for i in `jobs -p`; do wait $i; done
# Configure the master external LB rules
(
# Master health check
if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
else
echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists"
fi
gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \
--zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}"
# Master backend service
if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}"
gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}"
else
echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists"
fi
# Master tcp proxy target
if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend"
else
echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists"
fi
# Master forwarding rule
if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target"
else
echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists"
fi
) &
# Configure the master internal LB rules
(
# Internal master health check
if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
else
echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists"
fi
# Internal master target pool
if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}"
else
echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists"
fi
# Internal master forwarding rule
if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool"
else
echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists"
fi
) &
# Configure the infra node rules
(
# Router health check
if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
else
echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists"
fi
# Router target pool
if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}"
else
echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists"
fi
# Router forwarding rule
if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool"
else
echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists"
fi
) &
for i in `jobs -p`; do wait $i; done
# set the target pools
(
if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then
gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
else
gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
fi
) &
# configure DNS
(
# Retry DNS changes until they succeed since this may be a shared resource
while true; do
dns="${TMPDIR:-/tmp}/dns.yaml"
rm -f $dns
# DNS record for master lb
if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
if [[ ! -f $dns ]]; then
gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
fi
gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
else
echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
fi
# DNS record for internal master lb
if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
if [[ ! -f $dns ]]; then
gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
fi
gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
else
echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
fi
# DNS record for router lb
if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
if [[ ! -f $dns ]]; then
gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
fi
gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
else
echo "DNS record for '{{ wildcard_zone }}' already exists"
fi
# Commit all DNS changes, retrying if preconditions are not met
if [[ -f $dns ]]; then
if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
rc=$?
if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
continue
fi
exit $rc
fi
fi
break
done
) &
# Create bucket for registry
(
if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}"
else
echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists"
fi
) &
# wait until all node groups are stable
{% for node_group in openshift_gcp_node_group_config %}
{% if node_group.wait_for_stable | default(False) or not (node_group.bootstrap | default(False)) %}
# wait for stable {{ node_group.name }}
( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
{% else %}
# not waiting for {{ node_group.name }} due to bootstrapping
{% endif %}
{% endfor %}
for i in `jobs -p`; do wait $i; done
|
akubicharm/openshift-ansible
|
roles/openshift_gcp/templates/provision.j2.sh
|
Shell
|
apache-2.0
| 20,625 |
#!/usr/bin/env bash
if [ $# -le 3 ]; then
echo "test01_logimage_reuse.sh <avatarnode0Local> <avatarnode0NFS> <avatarnode1Local> <avatarnode1NFS>"
exit 1
fi
echo
HOSTLIST="${HADOOP_HOME}/conf/masters"
mun=0
for IP in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`; do
if [ $mun == 0 ] ;
then
avatarnode0IP=$IP
fi
if [ $mun == 1 ] ;
then
avatarnode1IP=$IP
fi
mun=`expr $mun + 1`
done
#avatarnode0IP=$1
avatarnode0Local=$1
avatarnode0NFS=$2
#avatarnode1IP=$4
avatarnode1Local=$3
avatarnode1NFS=$4
echo avatarnode0IP = $avatarnode0IP
echo avatarnode0Local = $avatarnode0Local
echo avatarnode0NFS = $avatarnode0NFS
echo avatarnode1IP = $avatarnode1IP
echo avatarnode1Local = $avatarnode1Local
echo avatarnode1NFS = $avatarnode1NFS
echo HADOOP_HOME = $HADOOP_HOME
echo
sleep 3
echo \******************************************
echo \* 1. delete editlog and fsimage \*
echo \******************************************
echo
sleep 3
ssh $avatarnode0IP rm -r -f $avatarnode0Local/*
ssh $avatarnode1IP rm -r -f $avatarnode1Local/*
ssh $avatarnode0IP rm -r -f $avatarnode0NFS/*
ssh $avatarnode1IP rm -r -f $avatarnode1NFS/*
#ssh $avatarnode0IP rm -r -f /home/wl826214/hadoop/local/*
#ssh $avatarnode1IP rm -r -f /home/wl826214/hadoop/local/*
#rm -r -f /home/wl826214/sharedir/share0/*
#rm -r -f /home/wl826214/sharedir/share1/*
echo
echo \******************************************
echo \* 2. format editlog and fsimage \*
echo \******************************************
echo
sleep 3
#ssh $avatarnode0IP $HADOOP_HOME/bin/hadoop org.apache.hadoop.hdfs.server.namenode.AvatarNode -zero -format
ssh $avatarnode0IP $HADOOP_HOME/bin/hadoop org.apache.hadoop.hdfs.server.namenode.AvatarNode -zero -format
sleep 3
#ssh $avatarnode1IP $HADOOP_HOME/bin/hadoop org.apache.hadoop.hdfs.server.namenode.AvatarNode -one -format
ssh $avatarnode1IP $HADOOP_HOME/bin/hadoop org.apache.hadoop.hdfs.server.namenode.AvatarNode -one -format
sleep 3
echo
echo \******************************************************************
echo \* 3. start avatarnode0 as primary and avatarnode1 as standby\*
echo \******************************************************************
echo
sleep 3
$HADOOP_HOME/bin/a-start-avatar-0p.sh
sleep 3
$HADOOP_HOME/bin/a-start-avatar-1s.sh
echo
echo \******************************************
echo \* 4. mkdir on avatarnode0 \*
echo \******************************************
echo
sleep 3
ssh $avatarnode0IP $HADOOP_HOME/bin/hadoop dfs -ls
sleep 3
ssh $avatarnode0IP $HADOOP_HOME/bin/hadoop dfs -mkdir 000
sleep 3
ssh $avatarnode0IP $HADOOP_HOME/bin/hadoop dfs -ls
sleep 3
echo
echo \******************************************
echo \* 5. stop avatarnode0 \*
echo \******************************************
echo
sleep 3
$HADOOP_HOME/bin/a-stop-avatar.sh -zero
sleep 3
echo
echo \**************************************************
echo \* 6. set avatarnode1 from standby to primary*
echo \**************************************************
echo
sleep 3
$HADOOP_HOME/bin/a-avatar-s-set-p.sh
sleep 3
echo
echo \******************************************
echo \* 7. mkdir on avatarnode1 \*
echo \******************************************
echo
sleep 3
ssh $avatarnode1IP $HADOOP_HOME/bin/hadoop dfs -ls
sleep 3
ssh $avatarnode1IP $HADOOP_HOME/bin/hadoop dfs -mkdir 111
sleep 3
ssh $avatarnode1IP $HADOOP_HOME/bin/hadoop dfs -ls
sleep 3
echo
echo \******************************************
echo \* 8. stop avatarnode1 \*
echo \******************************************
echo
sleep 3
$HADOOP_HOME/bin/a-stop-avatar.sh -one
sleep 3
echo
echo \******************************************
echo \* 9. copy editlog and image from \*
echo \* avatarnode1 to avatarnode0 \*
echo \******************************************
echo
sleep 3
ssh $avatarnode1IP scp -r $avatarnode1Local/* $avatarnode0IP:$avatarnode0Local/
sleep 3
ssh $avatarnode1IP scp -r $avatarnode1Local/* $avatarnode0IP:$avatarnode0NFS/
sleep 3
echo
echo
echo \******************************************
echo \* 10. restart avatarnode0 as primary\*
echo \******************************************
echo
sleep 3
ssh $avatarnode0IP $HADOOP_HOME/bin/hadoop org.apache.hadoop.hdfs.server.namenode.AvatarNode -zero
sleep 3
echo
echo \******************************************
echo \* 11. restart avatarnode1 as standby\*
echo \******************************************
echo
sleep 3
$HADOOP_HOME/bin/a-start-avatar-1s.sh
sleep 3
echo
echo \******************************************
echo \* 12. ls on avatarnode1 \*
echo \******************************************
echo
sleep 3
ssh $avatarnode1IP $HADOOP_HOME/bin/hadoop dfs -ls
sleep 3
echo
echo \******************************************
echo \* 13. stop avatarnode1 \*
echo \******************************************
echo
sleep 3
$HADOOP_HOME/bin/a-stop-avatar.sh -one
echo
echo \******************************************
echo \* there will be a error \*
echo \******************************************
echo
|
submergerock/avatar-hadoop
|
build/hadoop-0.20.1-dev/bin/test01_logimage_reuse.sh
|
Shell
|
apache-2.0
| 5,119 |
#!/bin/bash
##########################################################################
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012
# Vincent C. Passaro ([email protected])
# Shannon Mitchell ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
##########################################################################
###################### Fotis Networks LLC ###############################
# By Vincent C. Passaro #
# Fotis Networks LLC #
# Vincent[.]Passaro[@]fotisnetworks[.]com #
# www.fotisnetworks.com #
###################### Fotis Networks LLC ###############################
#
# _____________________________________________________________________
# | Version | Change Information | Author | Date |
# |__________|_______________________|____________________|____________|
# | 1.0 | Initial Script | Vincent C. Passaro | 1-Aug-2012 |
# | | Creation | | |
# |__________|_______________________|____________________|____________|
#
#######################DISA INFORMATION##################################
# Group ID (Vulid): V-1029
# Group Title: GEN006160
# Rule ID: SV-37879r1_rule
# Severity: medium
# Rule Version (STIG-ID): GEN006160
# Rule Title: The /etc/smbpasswd file must be owned by root.
#
# Vulnerability Discussion: If the "smbpasswd" file is not owned by root,
# it may be maliciously accessed or modified, potentially resulting in the
# compromise of Samba accounts.
#
# Responsibility: System Administrator
# IAControls: ECLP-1
#
# Check Content:
#
# Check the ownership of the "smbpasswd" file.
# ls -l /etc/samba/passdb.tdb /etc/samba/secrets.tdb
# If the "smbpasswd" file is not owned by root, this is a finding.
#
# Fix Text:
#
# Use the chown command to configure the files maintained by smbpasswd.
# For instance:
# chown root /etc/samba/passdb.tdb /etc/samba/secrets.tdb
#######################DISA INFORMATION##################################
# Global Variables
PDI=GEN006160
# Start-Lockdown
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5/dev/GEN006160.sh
|
Shell
|
apache-2.0
| 3,085 |
#!/bin/bash
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
set -e
usage() {
echo "run_ctverif.sh install_dir"
exit 1
}
if [ "$#" -ne "1" ]; then
usage
fi
INSTALL_DIR=$1
export CTVERIF_DIR="${1}/verifying-constant-time"
SMACK_DIR="${1}/smack"
#Put the dependencies are on the path
# Disabling ShellCheck using https://github.com/koalaman/shellcheck/wiki/Directive
# Turn of Warning in one line as https://github.com/koalaman/shellcheck/wiki/SC1090
# shellcheck disable=SC1090
source "${INSTALL_DIR}/smack.environment"
export PATH="${SMACK_DIR}/bin:${SMACK_DIR}/build:${PATH}"
#Test that they are really there
which smack || echo "can't find smack"
which boogie || echo "can't find z3"
which llvm2bpl || echo "can't find llvm2bpl"
#copy the current version of the file to the test
cd "${BASE_S2N_DIR}/tests/ctverif"
cp "${BASE_S2N_DIR}/utils/s2n_safety.c" .
make clean
#run the test. We expect both to pass, and none to fail
FAILED=0
EXPECTED_PASS=2
EXPECTED_FAIL=0
make 2>&1 | ./count_success.pl $EXPECTED_PASS $EXPECTED_FAIL || FAILED=1
if [ $FAILED == 1 ];
then
printf "\\033[31;1mFAILED ctverif\\033[0m\\n"
exit -1
else
printf "\\033[32;1mPASSED ctverif\\033[0m\\n"
fi
|
jldodds/s2n
|
.travis/run_ctverif.sh
|
Shell
|
apache-2.0
| 1,712 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
frakti::test::e2e() {
frakti::log::progress "build e2e test binary\n"
frakti::golang::build_binaries "k8s.io/frakti/test/e2e/e2e.test"
# Find the ginkgo binary build.
local ginkgo=$(frakti::util::find-binary "ginkgo")
local e2e_test=$(frakti::util::find-binary "e2e.test")
frakti::log::progress "run frakti e2e test case\n"
export PATH=$(dirname "${e2e_test}"):"${PATH}"
sudo "${ginkgo}" "${e2e_test}"
}
frakti::test::find_dirs() {
(
cd ${FRAKTI_ROOT}
find -L . -not \( \
\( \
-path './out/*' \
-o -path './test/e2e/*' \
-o -path './vendor/*' \
\) -prune \
\) -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./||' | sort -u
)
}
|
resouer/frakti
|
hack/lib/test.sh
|
Shell
|
apache-2.0
| 1,321 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=MinGW-Windows
CND_CONF=Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=dll
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libenet-1.3.11.a
OUTPUT_BASENAME=libenet-1.3.11.a
PACKAGE_TOP_DIR=enet-1.3.11/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/enet-1.3.11/lib"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/enet-1.3.11.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/enet-1.3.11.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
chazu/ASCIIWar
|
dep/pyenet/enet/nbproject/Package-Release.bash
|
Shell
|
bsd-3-clause
| 1,480 |
#!/usr/bin/env sh
set -e
echo ' Downloading selenium'
docker pull selenium/standalone-firefox:2.53.1
echo ' Running selenium'
docker run -d -p 4444:4444 --network=host selenium/standalone-firefox:2.53.1
|
nmacd85/drupal-nicoledawn
|
vendor/behat/mink-selenium2-driver/bin/run-selenium-remote.sh
|
Shell
|
gpl-2.0
| 210 |
#!/bin/bash
# we can use glob since the archive contains a single directory
pushd download/*/ > /dev/null
python3 index2ddg.py index-functions-cpp.xml ../../output.txt
popd > /dev/null
LC_ALL=C sort output.txt -o output.txt
|
rasikapohankar/zeroclickinfo-fathead
|
lib/fathead/cppreference_doc/parse.sh
|
Shell
|
apache-2.0
| 225 |
#!/usr/bin/env bash
# **install_prereqs.sh**
# Install system package prerequisites
#
# install_prereqs.sh [-f]
#
# -f Force an install run now
FORCE_PREREQ=0
while getopts ":f" opt; do
case $opt in
f)
FORCE_PREREQ=1
;;
esac
done
# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
# or in a sub-shell
if [[ -z "$TOP_DIR" ]]; then
# Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
# Import common functions
source $TOP_DIR/functions
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME``
# and ``DISTRO``
GetDistro
# Needed to get ``ENABLED_SERVICES``
source $TOP_DIR/stackrc
# Prereq dirs are here
FILES=$TOP_DIR/files
fi
# Minimum wait time
PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs}
PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2}
PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS))
NOW=$(date "+%s")
LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0")
DELTA=$(($NOW - $LAST_RUN))
if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then
echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining) "
echo "and FORCE_PREREQ not set; exiting..."
return 0
fi
# Make sure the proxy config is visible to sub-processes
export_proxy_variables
# Install Packages
# ================
# Install package requirements
PACKAGES=$(get_packages general,$ENABLED_SERVICES)
PACKAGES="$PACKAGES $(get_plugin_packages)"
if is_ubuntu && echo $PACKAGES | grep -q dkms ; then
# Ensure headers for the running kernel are installed for any DKMS builds
PACKAGES="$PACKAGES linux-headers-$(uname -r)"
fi
install_package $PACKAGES
if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then
if is_ubuntu || is_fedora; then
install_package rsyslog-relp
elif is_suse; then
install_package rsyslog-module-relp
else
exit_distro_not_supported "rsyslog-relp installation"
fi
fi
if python3_enabled; then
install_python3
export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null)
else
export PYTHON=$(which python 2>/dev/null)
fi
# Mark end of run
# ---------------
date "+%s" >$PREREQ_RERUN_MARKER
date >>$PREREQ_RERUN_MARKER
|
group-policy/devstack
|
tools/install_prereqs.sh
|
Shell
|
apache-2.0
| 2,396 |
#!/bin/bash
# This script tests the high level end-to-end functionality demonstrated
# as part of the examples/sample-app
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
os::log::info "Starting containerized end-to-end test"
# cluster up no longer produces a cluster that run the e2e test. These use cases are already mostly covered
# in existing e2e suites. The image-registry related tests stand out as ones that may not have an equivalent.
|
liangxia/origin
|
hack/test-end-to-end-docker.sh
|
Shell
|
apache-2.0
| 449 |
#!/bin/bash
rm last-test-run.log
for filename in third_party/tests/*.cs; do
SHORTNAME=${filename##*/}
./compile-and-run-test.sh ${SHORTNAME%%.*} --quiet >> last-test-run.log 2>> last-test-run.log
if [ $? -ne 0 ]; then
echo -ne F
else
echo -ne .
fi
done
echo
echo Done.
cat last-test-run.log
|
uranium62/ilwasm
|
runtests.sh
|
Shell
|
apache-2.0
| 313 |
#!/bin/sh
type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh
# Kills rsyslogd
if [ -f /var/run/syslogd.pid ]; then
read pid < /var/run/syslogd.pid
kill $pid
kill -0 $pid && kill -9 $pid
else
warn "rsyslogd-stop: Could not find a pid for rsyslogd. Won't kill it."
fi
|
yuwata/dracut
|
modules.d/98syslog/rsyslogd-stop.sh
|
Shell
|
gpl-2.0
| 286 |
(../parentR1dcookie ikev2.record westnet--eastnet-ikev2 ../lib-parentI1psk//parentI1psk.pcap parentR1dcookie.pcap
echo TCPDUMP output
tcpdump -v -v -s 1600 -n -r parentR1dcookie.pcap) 2>&1 | sed -f ../lib-parentR1/sanity.sed
|
y-trudeau/openswan-patch-meraki
|
testing/lib/libpluto/lib-parentR1dcookie/testlist.sh
|
Shell
|
gpl-2.0
| 233 |
#!/bin/bash
docker build -t runtimejs docker
|
runtimejs/runtime
|
docker/docker-prepare.sh
|
Shell
|
apache-2.0
| 46 |
#!/usr/bin/env roundup
#
: ${RUNDECK_USER?"environment variable not set."}
: ${RUNDECK_PROJECT?"environment variable not set."}
: ${REMOTE_NODE?"environment variable not set."}
# Let's get started
# -----------------
# Helpers
# ------------
#. ./include.sh
# The Plan
# --------
describe "project: dispatch script remote node"
it_should_dispatch_script_remotely() {
# Run the script file on the remote node
bash -c "rd adhoc -p $RUNDECK_PROJECT -f -F $REMOTE_NODE -s /tests/rundeck/test-dispatch-script.sh | grep -v ^#" > test.output
# diff with expected
cat >expected.output <<END
This is test-dispatch-script.sh
On node $REMOTE_NODE $REMOTE_NODE
With tags: remote remote
With args:
END
set +e
diff expected.output test.output
result=$?
set -e
rm expected.output test.output #test2.output
if [ 0 != $result ] ; then
echo "FAIL: output differed from expected"
exit 1
fi
}
it_should_dispatch_script_utf8_remotely() {
# Run the script file on the remote node
bash -c "RD_DEBUG=3 rd adhoc -p $RUNDECK_PROJECT -f -F $REMOTE_NODE -s /tests/rundeck/test-dispatch-script-utf8.sh | grep -v ^#" > test2.output
bash -c "rd adhoc -p $RUNDECK_PROJECT -f -F $REMOTE_NODE -s /tests/rundeck/test-dispatch-script-utf8.sh | grep -v ^#" > test.output
# diff with expected
cat >expected.output <<END
This is test-dispatch-script-utf8.sh
UTF-8 Text: ไฝ ๅฅฝ
END
set +e
diff expected.output test.output
result=$?
set -e
rm expected.output test.output #test2.output
if [ 0 != $result ] ; then
echo "FAIL: output differed from expected"
cat test2.output
exit 1
fi
}
it_should_dispatch_script_remotely_dos_lineendings() {
# Run the script file on the remote node
bash -c "rd adhoc -p $RUNDECK_PROJECT -f -F $REMOTE_NODE -s /tests/rundeck/test-dispatch-script-dos.sh | grep -v ^#" > test.output
# diff with expected
cat >expected.output <<END
This is test-dispatch-script-dos.sh
On node $REMOTE_NODE $REMOTE_NODE
With tags: remote remote
With args:
END
set +e
diff expected.output test.output
result=$?
set -e
rm expected.output test.output #test2.output
if [ 0 != $result ] ; then
echo "FAIL: output differed from expected"
exit 1
fi
}
it_should_dispatch_script_remotely_with_args() {
bash -c "rd adhoc -p $RUNDECK_PROJECT -f -F '$REMOTE_NODE' -s /tests/rundeck/test-dispatch-script.sh -- arg1 arg2 | grep -v ^#"> test.output
# diff with expected
cat >expected.output <<END
This is test-dispatch-script.sh
On node $REMOTE_NODE $REMOTE_NODE
With tags: remote remote
With args: arg1 arg2
END
set +e
diff expected.output test.output
result=$?
set -e
rm expected.output test.output #test2.output
if [ 0 != $result ] ; then
echo "FAIL: output differed from expected"
exit 1
fi
}
it_should_dispatch_url_remotely() {
bash -c "rd adhoc -p $RUNDECK_PROJECT -f -F '$REMOTE_NODE' -u file:/tests/rundeck/test-dispatch-script.sh -- arg1 arg2 | grep -v ^#"> test.output
# diff with expected
cat >expected.output <<END
This is test-dispatch-script.sh
On node @node.name@ $REMOTE_NODE
With tags: @node.tags@ remote
With args: arg1 arg2
END
set +e
diff expected.output test.output
result=$?
set -e
rm expected.output test.output #test2.output
if [ 0 != $result ] ; then
echo "FAIL: output differed from expected"
exit 1
fi
}
it_should_dispatch_url_utf8_remotely() {
bash -c "rd adhoc -p $RUNDECK_PROJECT -f -F '$REMOTE_NODE' -u file:/tests/rundeck/test-dispatch-script-utf8.sh -- arg1 arg2 | grep -v ^#"> test.output
# diff with expected
cat >expected.output <<END
This is test-dispatch-script-utf8.sh
UTF-8 Text: ไฝ ๅฅฝ
END
set +e
diff expected.output test.output
result=$?
set -e
rm expected.output test.output #test2.output
if [ 0 != $result ] ; then
echo "FAIL: output differed from expected"
exit 1
fi
}
|
variacode/rundeck
|
test/docker/dockers/rundeck/tests/rundeck/should-dispatch-script-remotely-test.sh
|
Shell
|
apache-2.0
| 4,074 |
#!/bin/sh
test_description='CRLF conversion'
. ./test-lib.sh
has_cr() {
tr '\015' Q <"$1" | grep Q >/dev/null
}
test_expect_success setup '
git config core.autocrlf false &&
for w in Hello world how are you; do echo $w; done >LFonly &&
for w in I am very very fine thank you; do echo ${w}Q; done | q_to_cr >CRLFonly &&
for w in Oh here is a QNUL byte how alarming; do echo ${w}; done | q_to_nul >LFwithNUL &&
git add . &&
git commit -m initial &&
LFonly=$(git rev-parse HEAD:LFonly) &&
CRLFonly=$(git rev-parse HEAD:CRLFonly) &&
LFwithNUL=$(git rev-parse HEAD:LFwithNUL) &&
echo happy.
'
test_expect_success 'default settings cause no changes' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
git read-tree --reset -u HEAD &&
! has_cr LFonly &&
has_cr CRLFonly &&
LFonlydiff=$(git diff LFonly) &&
CRLFonlydiff=$(git diff CRLFonly) &&
LFwithNULdiff=$(git diff LFwithNUL) &&
test -z "$LFonlydiff" -a -z "$CRLFonlydiff" -a -z "$LFwithNULdiff"
'
test_expect_success 'crlf=true causes a CRLF file to be normalized' '
# Backwards compatibility check
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
echo "CRLFonly crlf" > .gitattributes &&
git read-tree --reset -u HEAD &&
# Note, "normalized" means that git will normalize it if added
has_cr CRLFonly &&
CRLFonlydiff=$(git diff CRLFonly) &&
test -n "$CRLFonlydiff"
'
test_expect_success 'text=true causes a CRLF file to be normalized' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
echo "CRLFonly text" > .gitattributes &&
git read-tree --reset -u HEAD &&
# Note, "normalized" means that git will normalize it if added
has_cr CRLFonly &&
CRLFonlydiff=$(git diff CRLFonly) &&
test -n "$CRLFonlydiff"
'
test_expect_success 'eol=crlf gives a normalized file CRLFs with autocrlf=false' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
git config core.autocrlf false &&
echo "LFonly eol=crlf" > .gitattributes &&
git read-tree --reset -u HEAD &&
has_cr LFonly &&
LFonlydiff=$(git diff LFonly) &&
test -z "$LFonlydiff"
'
test_expect_success 'eol=crlf gives a normalized file CRLFs with autocrlf=input' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
git config core.autocrlf input &&
echo "LFonly eol=crlf" > .gitattributes &&
git read-tree --reset -u HEAD &&
has_cr LFonly &&
LFonlydiff=$(git diff LFonly) &&
test -z "$LFonlydiff"
'
test_expect_success 'eol=lf gives a normalized file LFs with autocrlf=true' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
git config core.autocrlf true &&
echo "LFonly eol=lf" > .gitattributes &&
git read-tree --reset -u HEAD &&
! has_cr LFonly &&
LFonlydiff=$(git diff LFonly) &&
test -z "$LFonlydiff"
'
test_expect_success 'autocrlf=true does not normalize CRLF files' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
git config core.autocrlf true &&
git read-tree --reset -u HEAD &&
has_cr LFonly &&
has_cr CRLFonly &&
LFonlydiff=$(git diff LFonly) &&
CRLFonlydiff=$(git diff CRLFonly) &&
LFwithNULdiff=$(git diff LFwithNUL) &&
test -z "$LFonlydiff" -a -z "$CRLFonlydiff" -a -z "$LFwithNULdiff"
'
test_expect_success 'text=auto, autocrlf=true does not normalize CRLF files' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
git config core.autocrlf true &&
echo "* text=auto" > .gitattributes &&
git read-tree --reset -u HEAD &&
has_cr LFonly &&
has_cr CRLFonly &&
LFonlydiff=$(git diff LFonly) &&
CRLFonlydiff=$(git diff CRLFonly) &&
LFwithNULdiff=$(git diff LFwithNUL) &&
test -z "$LFonlydiff" -a -z "$CRLFonlydiff" -a -z "$LFwithNULdiff"
'
test_expect_success 'text=auto, autocrlf=true does not normalize binary files' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
git config core.autocrlf true &&
echo "* text=auto" > .gitattributes &&
git read-tree --reset -u HEAD &&
! has_cr LFwithNUL &&
LFwithNULdiff=$(git diff LFwithNUL) &&
test -z "$LFwithNULdiff"
'
test_expect_success 'eol=crlf _does_ normalize binary files' '
rm -f .gitattributes tmp LFonly CRLFonly LFwithNUL &&
echo "LFwithNUL eol=crlf" > .gitattributes &&
git read-tree --reset -u HEAD &&
has_cr LFwithNUL &&
LFwithNULdiff=$(git diff LFwithNUL) &&
test -z "$LFwithNULdiff"
'
test_done
|
johnkeeping/git
|
t/t0025-crlf-auto.sh
|
Shell
|
gpl-2.0
| 4,225 |
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage: bump_e2e_image.sh
set -o errexit
set -o nounset
set -o pipefail
TREE="$(git rev-parse --show-toplevel)"
bazel run //experiment/image-bumper -- --image-regex gcr.io/k8s-staging-test-infra/kubekins-e2e "${TREE}/experiment/generate_tests.py" "${TREE}/experiment/test_config.yaml" "${TREE}/config/prow/config.yaml"
find "${TREE}/config/jobs/" . -name "*.yaml" | xargs bazel run //experiment/image-bumper -- --image-regex gcr.io/k8s-staging-test-infra/kubekins-e2e
bazel run //experiment:generate_tests -- \
"--yaml-config-path=${TREE}/experiment/test_config.yaml" \
"--output-dir=${TREE}/config/jobs/kubernetes/generated/"
git commit -am "Bump gcr.io/k8s-staging-test-infra/kubekins-e2e (using generate_tests and manual)"
|
michelle192837/test-infra
|
experiment/bump_e2e_image.sh
|
Shell
|
apache-2.0
| 1,344 |
#!/usr/bin/env bash
###########################################################################
# check_spelling.sh
# ---------------------
# Date : December 2016
# Copyright : (C) 2016 by Denis Rouzaud
# Email : [email protected]
###########################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
###########################################################################
# -r: deactivate interactive mode to fix errors
# optional argument: list of files to be checked
# temporarly display all commands to debug issues in TRAVIS
# if [[ $TRAVIS =~ true ]]; then
# set -x
# fi
# extensions or files that should be excluded from file list if :% is appended in the spelling.dat file
EXCLUDE_SCRIPT_LIST='(\.(xml|svg|sip|t2t|pl|sh|qgs|badquote|cmake(\.in)?)|^(debian/copyright|cmake_templates/.*|INSTALL|NEWS|tests/testdata/labeling/README.rst|tests/testdata/font/QGIS-Vera/COPYRIGHT.TXT|doc/(news|INSTALL)\.html))$'
DIR=$(git rev-parse --show-toplevel)/scripts/spell_check
AGIGNORE=${DIR}/.agignore
# GNU prefix command for mac os support (gsed, gsplit)
GP=
if [[ "$OSTYPE" =~ darwin* ]]; then
GP=g
fi
# ARGUMENTS
INTERACTIVE=$( tty -s && echo YES || echo NO)
DEBUG=NO
OUTPUTLOG=""
while getopts ":rdl:" opt; do
case $opt in
r)
INTERACTIVE=NO
;;
d)
DEBUG=YES
;;
l)
OUTPUTLOG=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
esac
done
shift $(expr $OPTIND - 1)
if [ $# -ne 0 ]; then
EXCLUDE=$(${GP}sed -e 's/\s*#.*$//' -e '/^\s*$/d' $AGIGNORE | tr '\n' '|' | ${GP}sed -e 's/|$//')
INPUTFILES=$(echo $@ | tr -s '[[:blank:]]' '\n' | ${GP}egrep -iv "$EXCLUDE" | tr '\n' ' ' )
if [[ -z $INPUTFILES ]]; then
exit 0
fi
echo "Running spell check on files: $INPUTFILES"
else
INPUTFILES="."
fi
# regex to find escape string
SPELLOKRX='(#\s*spellok|<!--\s*#\s*spellok\s*-->)'
# split into several files to avoid too long regexes
SPLIT=8
${GP}split --number=l/$SPLIT --numeric-suffixes --suffix-length=2 --additional-suffix=~ ${DIR}/spelling.dat spelling
# global replace variables (dictionary)
declare -A GLOBREP_ALLFILES=()
declare -A GLOBREP_CURRENTFILE=()
declare -A GLOBREP_IGNORE=()
ERRORFOUND=NO
for I in $(seq -f '%02g' 0 $(($SPLIT-1)) ) ; do
( [[ "$INTERACTIVE" =~ YES ]] || [[ "$TRAVIS" =~ true ]] ) && printf "Progress: %d/%d\r" $(( I + 1 )) $SPLIT
SPELLFILE=spelling$I~
${GP}sed -i '/^#/d' $SPELLFILE
# if correction contains an uppercase letter and is the same as the error character wise, this means that the error is searched as a full word and case sensitive (not incorporated in a bigger one)
CASEMATCH_FIXCASE=$(${GP}sed -rn '/^(\w+):\1(:\*)?$/Ip' $SPELLFILE | ${GP}sed -r 's/^(\w+):\1(:\*)?$/(\\b|_)\1(\\b|_)/I')
REMAINS=$( ${GP}sed -r '/^(\w+):\1(:\*)?$/Id' $SPELLFILE)
# for error or correction containing any non letter character (space, apostrophe) search is full word and case insensitive
IGNORECASE_FIXSPECIALCHAR=$(echo "$REMAINS" | perl -ne " print if /^(\w*[ '.โฆ])*\w*:\w*(?(1)|[ '.โฆ])/" | ${GP}sed -r 's/(^[.]+?):.*?(:[*%])?$/(\\b|_|^| )\1(\\b|_|$| )/g' | ${GP}sed -r 's/\./\\./g' | ${GP}sed -r 's/^(\w.*?):.*?(:[*%])?$/(\\b|_)\1(\\b|_)/' )
REMAINS=$( echo "$REMAINS" | perl -ne " print if not /^(\w*[ '.โฆ])*\w*:\w*(?(1)|[ '.โฆ])/")
# This will try to look for misspelling within larger words.
# Condition is hard to explain in words.
# You can test it here: https://regex101.com/r/7kznVA/14
# adding :* in spelling.dat willextra words that should not be checked in longer words ca
# remove those in spelling.dat ending with :*
# following can be checked in longer words case insensitively
IGNORECASE_INWORD=$(echo "$REMAINS" | perl -ne 'print if /^(\w)(\w)(\w)\w*(\w)(\w)(\w):(?:(?!\2\3\w|\w\1\2).)\w*?(?:(?!\5\6\w|\w\4\5)\w\w\w)$/' | cut -d: -f1 )
REMAINS=$( echo "$REMAINS" | perl -ne 'print if not /^(\w)(\w)(\w)\w*(\w)(\w)(\w):(?:(?!\2\3\w|\w\1\2).)\w*?(?:(?!\5\6\w|\w\4\5)\w\w\w)$/' | cut -d: -f1 )
# Trying with the rest as whole words case insensitively
IGNORECASE_WHOLEWORD=$(echo "$REMAINS" | ${GP}sed -r 's/^(.+)$/(\\b|_)\1(\\b|_)/')
# or in camel case, case sensitively for word of at least 4 chars
MATCHCASE_INWORD=$(echo "$REMAINS" | ${GP}sed -r '/^.{,3}$/d' | ${GP}sed -r 's/^(\w)(.*)/(\\b|_)(\l\1\L\2_|\u\1\U\2_|\u\1\L\2\U[_A-Z0-9])|\L[_a-z0-9]\u\1\L\2(\\b|\U[_A-Z0-9])|\L[_a-z0-9]\u\1\U\2\L(\\b|[_a-z0-9])/' )
if [[ "$DEBUG" =~ YES ]]; then
echo "*** FIX CASE (case sensitive) ***"
echo "$CASEMATCH_FIXCASE"
echo "*** SPECIAL CHAR (case insensitive) ***"
echo "$IGNORECASE_FIXSPECIALCHAR"
echo "*** IN WORD (case insensitive) ***"
echo "$IGNORECASE_INWORD"
echo "*** WHOLE WORDS (case insensitive) ***"
echo "$IGNORECASE_WHOLEWORD"
echo "*** IN WORD CAMELCASE (case sensitive) **"
echo "$MATCHCASE_INWORD"
echo "*****"
fi
CASEMATCH_FIXCASE=$( echo "$CASEMATCH_FIXCASE" | ${GP}sed -r '/^\s*$/d' | tr '\n' '\|' | ${GP}sed -r 's/\|$//')
IGNORECASE_FIXSPECIALCHAR=$(echo "$IGNORECASE_FIXSPECIALCHAR" | ${GP}sed -r '/^\s*$/d' | tr '\n' '\|' | ${GP}sed -r 's/\|$//')
IGNORECASE_INWORD=$( echo "$IGNORECASE_INWORD" | ${GP}sed -r '/^\s*$/d' | tr '\n' '\|' | ${GP}sed -r 's/\|$//')
IGNORECASE_WHOLEWORD=$( echo "$IGNORECASE_WHOLEWORD" | ${GP}sed -r '/^\s*$/d' | tr '\n' '\|' | ${GP}sed -r 's/\|$//')
MATCHCASE_INWORD=$( echo "$MATCHCASE_INWORD" | ${GP}sed -r '/^\s*$/d' | tr '\n' '\|' | ${GP}sed -r 's/\|$//')
IGNORECASE=$(echo "(${IGNORECASE_FIXSPECIALCHAR}|${IGNORECASE_INWORD}|${IGNORECASE_WHOLEWORD})" | ${GP}sed -r 's/\(\|/(/' | ${GP}sed -r 's/\|\|/|/g' | ${GP}sed -r 's/\|\)/)/' | ${GP}sed -r 's/^\(\)$//')
CASEMATCH=$(echo "(${CASEMATCH_FIXCASE}|${MATCHCASE_INWORD})" | ${GP}sed -r 's/\(\|/(/' |${GP}sed -r 's/\|\|/|/g' | ${GP}sed -r 's/\|\)/)/' | ${GP}sed -r 's/^\(\)$//')
RUN_IGNORECASE=OFF
RUN_CASEMATCH=OFF
if [[ ! -z "${IGNORECASE}" ]]; then
RUN_IGNORECASE=ON
fi
if [[ ! -z "${CASEMATCH}" ]]; then
RUN_CASEMATCH=ON
fi
IGNORECASE=$IGNORECASE'(?!.*'"${SPELLOKRX}"')'
CASEMATCH=$CASEMATCH'(?!.*'"${SPELLOKRX}"')'
FILE=$INPUTFILES # init with input files (if ag is run with single file, file path is not written in output)
while read -u 3 -r LINE; do
echo -e "$LINE"
NOCOLOR=$(echo "$LINE" | ${GP}sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g')
if [[ "$NOCOLOR" =~ ^[[:alnum:]][[:alnum:]\/\._-]+$ ]]; then
FILE=$NOCOLOR
GLOBREP_CURRENTFILE=()
fi
if [[ "$NOCOLOR" =~ ^[0-9]+: ]]; then
if [[ -z $FILE ]]; then
echo "*** error: no file"
exit 1
fi
NUMBER=$(echo "$NOCOLOR" | cut -d: -f1)
ERRORLINE=$(echo "$NOCOLOR" | cut -d: -f2)
ERROR=$(echo "$LINE" | ${GP}sed -r 's/^.*?\x1B\[30;43m(.*?)\x1B\[0m.*?$/\1/')
PREVCHAR=$(echo "$LINE" | cut -d: -f2- | ${GP}sed -r 's/^(.*?)\x1B\[30;43m.*?\x1B\[0m.*?$/\1/' | ${GP}sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | tail -c 2)
NEXTCHAR=$(echo "$LINE" | cut -d: -f2- | ${GP}sed -r 's/^.*?\x1B\[30;43m.*?\x1B\[0m(.*?)$/\1/' | ${GP}sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | head -c 1)
if [[ "$DEBUG" =~ YES ]]; then
echo "prev char: $PREVCHAR"
echo "next char: $NEXTCHAR"
fi
ERRORNOCOLOR=$(echo "$ERROR" | ${GP}sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g')
if [[ "$ERRORNOCOLOR" =~ ^[[:digit:]]+: ]]; then
echo "*** error: could not find error in $LINE" >&2
else
# if the error is not in IGNORECASE_INWORD, then it matched previous and next character (needs to remove them)
# also make error small case and escape special chars: () |
ERRORSMALLCASE=$(echo ${ERRORNOCOLOR,,} |${GP}sed -r 's/\(/\\(/g' | ${GP}sed -r 's/\)/\\)/g' | ${GP}sed -r 's/\|/\\|/g' )
if [[ ! "${ERRORSMALLCASE}" =~ $IGNORECASE_INWORD ]]; then
if [[ -n $(ag --nonumbers --case-sensitive "^${ERRORSMALLCASE:1:-1}${ERRORSMALLCASE: -1}?:" scripts/spell_check/spelling.dat) ]]; then
PREVCHAR=${ERROR::1}
# remove first character
ERRORSMALLCASE=${ERRORSMALLCASE#?}
ERROR=${ERROR#?}
fi
if [[ -n $(ag --nonumbers --case-sensitive "^${ERRORSMALLCASE::-1}:" scripts/spell_check/spelling.dat) ]]; then
NEXTCHAR=${ERROR:${#ERROR}-1:1}
# remove last character
ERRORSMALLCASE=${ERRORSMALLCASE::-1}
ERROR=${ERROR::-1}
fi
fi
ERRORSMALLCASE=$(${GP}sed -r 's/\./\\./g' <<< $ERRORSMALLCASE)
# get correction from spelling.dat
CORRECTION=$(ag --nonumbers --case-sensitive "^${ERRORSMALLCASE}:" ${DIR}/spelling.dat | cut -d: -f2)
# exclude script files
if [[ "$(ag --nonumbers --case-sensitive "^${ERRORSMALLCASE}:" ${DIR}/spelling.dat | cut -d: -f3)" =~ "%" ]]; then
if [[ "$FILE" =~ $EXCLUDE_SCRIPT_LIST ]]; then
echo "skipping script file for $(${GP}sed -r 's/\\//g' <<< $ERRORSMALLCASE)"
continue
fi
fi
if [[ -z "$CORRECTION" ]]; then
CORRECTION=$(perl -e "use strict; use warnings; while(<>) { chop; my(\$a,\$b) = split /:/; \$a = qr(\$a); if( my @matches = '${ERRORSMALLCASE}' =~ /^\$a\$/i ) { print sprintf(\$b, @matches); last; }}" ${DIR}/spelling.dat )
# exclude script files
if [[ "$(ag --nonumbers --case-sensitive ":${CORRECTION}" ${DIR}/spelling.dat | cut -d: -f3)" =~ "%" ]]; then
if [[ "$FILE" =~ $EXCLUDE_SCRIPT_LIST ]]; then
echo "skipping script file for $(${GP}sed -r 's/\\//g' <<< $ERRORSMALLCASE)"
continue
fi
fi
fi
ERRORFOUND=YES
if [[ -z "$CORRECTION" ]]; then
echo "could not find correction for $ERROR" >&2
else
# Match case
MATCHCASE="$ERROR:$CORRECTION"
CORRECTIONCASE=$(echo "$MATCHCASE" | ${GP}sed -r 's/([A-Z]+):(.*)/\1:\U\2/; s/([A-Z][a-z]+):([a-z])/\1:\U\2\L/' | cut -d: -f2)
if [[ -n $OUTPUTLOG ]]; then
echo "$FILE $NUMBER $ERROR $CORRECTIONCASE" >> $OUTPUTLOG
fi
if [[ "$INTERACTIVE" =~ YES ]]; then
# Skip global replace
if [[ -n ${GLOBREP_ALLFILES["$ERROR"]} ]]; then
echo -e "replace \x1B[33m$ERROR\x1B[0m by \x1B[33m$CORRECTIONCASE\x1B[0m in \x1B[33m$FILE\x1B[0m"
${GP}sed -i -r "/${SPELLOKRX}/! s/${PREVCHAR}${ERROR}${NEXTCHAR}/${PREVCHAR}$CORRECTIONCASE${NEXTCHAR}/g" $FILE
continue
elif [[ ( -n ${GLOBREP_CURRENTFILE["$ERROR"]} ) || ( -n ${GLOBREP_IGNORE["$ERROR"]} ) ]]; then
echo "skipping occurrence"
continue
else
# escape string
SPELLOKSTR='//#spellok'
if [[ "$FILE" =~ \.(txt|html|htm|dox)$ ]]; then
SPELLOKSTR='<!--#spellok-->'
elif [[ "$FILE" =~ \.(h|cpp|sip)$ ]] && [[ "$ERRORLINE" =~ ^\s*(\/*\|\/\/) ]]; then
# line is already commented
SPELLOKSTR='#spellok'
elif [[ "$FILE" =~ \.(py|pl|sh|cmake(\.in)?)$ ]]; then
SPELLOKSTR='#spellok'
fi
SPELLOKSTR_ESC=$(echo "$SPELLOKSTR" | ${GP}sed -r 's/\//\\\//g')
# Display menu
echo "***"
echo -e "Error found: \x1B[31m$ERROR\x1B[0m"
echo -e " r) \x1B[4mr\x1B[0meplace by \x1B[33m$CORRECTIONCASE\x1B[0m at line $NUMBER"
echo -e " f) replace all occurrences by \x1B[33m$CORRECTIONCASE\x1B[0m in current \x1B[4mf\x1B[0mile"
echo -e " a) replace all occurrences by \x1B[33m$CORRECTIONCASE\x1B[0m in \x1B[4ma\x1B[0mll files"
echo -e " p) a\x1B[4mp\x1B[0mpend \x1B[33m$SPELLOKSTR\x1B[0m at the end of the line $NUMBER to avoid spell check on this line"
echo -e " t) \x1B[4mt\x1B[0mype your own correction"
echo -e " c) skip and \x1B[4mc\x1B[0montinue"
echo -e " o) skip all \x1B[4mo\x1B[0mccurences and continue"
echo -e " e) \x1B[4me\x1B[0mxit"
TOREPLACE=$(${GP}sed -r 's/([.\[/\]])/\\\1/g' <<< "${PREVCHAR}${ERROR}${NEXTCHAR}")
PREVCHAR=$(${GP}sed -r 's/\//\\\//g' <<< "${PREVCHAR}")
NEXTCHAR=$(${GP}sed -r 's/\//\\\//g' <<< "${NEXTCHAR}")
if [[ "$DEBUG" =~ YES ]]; then
echo "__${PREVCHAR}__${ERROR}__${NEXTCHAR}__"
echo "${NUMBER}s/$TOREPLACE/${PREVCHAR}$CORRECTIONCASE${NEXTCHAR}/g"
fi
while read -n 1 n; do
echo ""
case $n in
r)
echo -e "replacing \x1B[33m$ERROR\x1B[0m by \x1B[33m$CORRECTIONCASE\x1B[0m in \x1B[33m$FILE\x1B[0m at line \x1B[33m$NUMBER\x1B[0m"
${GP}sed -i "${NUMBER}s/$TOREPLACE/${PREVCHAR}$CORRECTIONCASE${NEXTCHAR}/g" $FILE
break
;;
f)
GLOBREP_CURRENTFILE+=(["$ERROR"]=1)
echo -e "replacing \x1B[33m$ERROR\x1B[0m by \x1B[33m$CORRECTIONCASE\x1B[0m in \x1B[33m$FILE\x1B[0m"
${GP}sed -i -r "/${SPELLOKRX}/! s/$TOREPLACE/${PREVCHAR}$CORRECTIONCASE${NEXTCHAR}/g" $FILE
break
;;
a)
GLOBREP_CURRENTFILE+=(["$ERROR"]=1)
GLOBREP_ALLFILES+=(["$ERROR"]=1)
echo -e "replace \x1B[33m$ERROR\x1B[0m by \x1B[33m$CORRECTIONCASE\x1B[0m in \x1B[33m$FILE\x1B[0m"
${GP}sed -i -r "/${SPELLOKRX}/! s/$TOREPLACE/${PREVCHAR}$CORRECTIONCASE${NEXTCHAR}/g" $FILE
break
;;
p)
echo -e "appending \x1B[33m$SPELLOKSTR\x1B[0m to \x1B[33m$FILE\x1B[0m at line \x1B[33m$NUMBER\x1B[0m"
${GP}sed -i "${NUMBER}s/\$/ $SPELLOKSTR_ESC/" $FILE
break
;;
t)
echo "Enter the correction: "
read CORRECTION
MATCHCASE="$ERROR:$CORRECTION"
CORRECTIONCASE=$(echo "$MATCHCASE" | ${GP}sed -r 's/([A-Z]+):(.*)/\1:\U\2/; s/([A-Z][a-z]+):([a-z])/\1:\U\2\L/' | cut -d: -f2)
echo -e "replacing \x1B[33m$ERROR\x1B[0m by \x1B[33m$CORRECTIONCASE\x1B[0m in \x1B[33m$FILE\x1B[0m at line \x1B[33m$NUMBER\x1B[0m"
${GP}sed -i "${NUMBER}s/$TOREPLACE/${PREVCHAR}$CORRECTIONCASE${NEXTCHAR}/g" $FILE
break
;;
c)
break
;;
o)
GLOBREP_IGNORE+=(["$ERROR"]=1)
break
;;
e)
exit 1
;;
*) invalid option;;
esac
done
fi
fi
fi
fi
if [[ "$NOCOLOR" =~ ^\s*$ ]]; then
FILE=""
fi
fi
done 3< <(
[[ "$RUN_IGNORECASE" == "ON" ]] && unbuffer ag --noaffinity --all-text --nopager --color-match "30;43" --numbers --nomultiline --ignore-case -p $AGIGNORE "${IGNORECASE}" $INPUTFILES ;
[[ "$RUN_CASEMATCH" == "ON" ]] && unbuffer ag --noaffinity --all-text --nopager --color-match "30;43" --numbers --nomultiline --case-sensitive -p $AGIGNORE "${CASEMATCH}" $INPUTFILES
)
rm -f $SPELLFILE
done
( [[ "$INTERACTIVE" =~ YES ]] || [[ "$TRAVIS" =~ true ]] ) && echo
if [[ "$ERRORFOUND" =~ YES ]]; then
echo -e "\x1B[1msome errors have been found.\x1B[0m" >&2
exit 1
else
exit 0
fi
|
stevenmizuno/QGIS
|
scripts/spell_check/check_spelling.sh
|
Shell
|
gpl-2.0
| 16,417 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function enable-accounting() {
mkdir -p /etc/systemd/system.conf.d/
cat <<EOF >/etc/systemd/system.conf.d/kubernetes-accounting.conf
[Manager]
DefaultCPUAccounting=yes
DefaultMemoryAccounting=yes
EOF
systemctl daemon-reload
}
function prepare-package-manager() {
echo "Prepare package manager"
# Useful if a mirror is broken or slow
if [ -z "$CUSTOM_FEDORA_REPOSITORY_URL" ]; then
echo "fastestmirror=True" >> /etc/dnf/dnf.conf
else
# remove trailing slash from URL if it's present
CUSTOM_FEDORA_REPOSITORY_URL="${CUSTOM_FEDORA_REPOSITORY_URL%/}"
sed -i -e "/^metalink=/d" /etc/yum.repos.d/*.repo
sed -i -e "s@^#baseurl=http://download.fedoraproject.org/pub/fedora@baseurl=$CUSTOM_FEDORA_REPOSITORY_URL@" /etc/yum.repos.d/*.repo
fi
}
function add-volume-support() {
echo "Adding nfs volume support"
# we need nfs-utils to support volumes
dnf install -y nfs-utils
}
function write-salt-config() {
local role="$1"
# Update salt configuration
mkdir -p /etc/salt/minion.d
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
federations_domain_map: ''
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")'
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
EOF
if [ -n "${EVICTION_HARD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
EOF
fi
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: warning
log_level_logfile: warning
EOF
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
network_mode: openvswitch
networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")'
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
cloud: vagrant
roles:
- $role
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")'
keep_host_etcd: true
kube_user: '$(echo "$KUBE_USER" | sed -e "s/'/''/g")'
EOF
}
function release_not_found() {
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
echo "are running from a clone of the git repo, please run 'make quick-release'." >&2
echo "Note that this requires having Docker installed. If you are running " >&2
echo "from a release tarball, something is wrong. Look at " >&2
echo "http://kubernetes.io/ for information on how to contact the development team for help." >&2
exit 1
}
function install-salt() {
server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$server_binary_tar" ]]; then
server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$server_binary_tar" ]]; then
release_not_found
fi
salt_tar="/vagrant/server/kubernetes-salt.tar.gz"
if [[ ! -f "$salt_tar" ]]; then
salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$salt_tar" ]]; then
release_not_found
fi
echo "Running release install script"
rm -rf /kube-install
mkdir -p /kube-install
pushd /kube-install
tar xzf "$salt_tar"
cp "$server_binary_tar" .
./kubernetes/saltbase/install.sh "${server_binary_tar##*/}"
popd
if ! which salt-call >/dev/null 2>&1; then
# Install salt from official repositories.
# Need to enable testing-repos to get version of salt with fix for dnf-core-plugins
dnf config-manager --set-enabled updates-testing
dnf install -y salt-minion
# Fedora >= 23 includes salt packages but the bootstrap is
# creating configuration for a (non-existent) salt repo anyway.
# Remove the invalid repo to prevent dnf from warning about it on
# every update. Assume this problem is specific to Fedora 23 and
# will fixed by the time another version of Fedora lands.
local fedora_version=$(grep 'VERSION_ID' /etc/os-release | sed 's+VERSION_ID=++')
if [[ "${fedora_version}" = '23' ]]; then
local repo_file='/etc/yum.repos.d/saltstack-salt-fedora-23.repo'
if [[ -f "${repo_file}" ]]; then
rm "${repo_file}"
fi
fi
fi
}
function run-salt() {
echo " Now waiting for the Salt provisioning process to complete on this machine."
echo " This can take some time based on your network, disk, and cpu speed."
salt-call --local state.highstate
}
function create-salt-kubelet-auth() {
local -r kubelet_kubeconfig_folder="/srv/salt-overlay/salt/kubelet"
mkdir -p "${kubelet_kubeconfig_folder}"
(umask 077;
cat > "${kubelet_kubeconfig_folder}/kubeconfig" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
users:
- name: kubelet
user:
token: ${KUBELET_TOKEN}
EOF
)
}
function create-salt-kubeproxy-auth() {
kube_proxy_kubeconfig_folder="/srv/salt-overlay/salt/kube-proxy"
mkdir -p "${kube_proxy_kubeconfig_folder}"
(umask 077;
cat > "${kube_proxy_kubeconfig_folder}/kubeconfig" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
EOF
)
}
|
DongyiYang/kubernetes
|
cluster/vagrant/provision-utils.sh
|
Shell
|
apache-2.0
| 7,623 |
#! /usr/bin/env bash
PWD_BACKUP=${PWD}
SCRIPT_DIR=`dirname "${PWD}/$0"`
cd "${SCRIPT_DIR}/.." || exit 1
function fail() { cd "${PWD_BACKUP}" ; exit 1; }
####################################################################
kwrite \
doc/configure.ac \
include/uriparser/UriBase.h \
configure.ac \
ChangeLog \
&
####################################################################
cd "${PWD_BACKUP}" || fail
exit 0
|
dodikk/uriparser-ios
|
scripts/edit_version.sh
|
Shell
|
bsd-3-clause
| 420 |
#!/bin/sh
# SUMMARY: Check that the ca-certificates package works
# LABELS:
# REPEAT:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=ca-certificates
clean_up() {
rm -rf ${NAME}-*
}
trap clean_up EXIT
# Test code goes here
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
RESULT="$(linuxkit run $NAME)"
echo "${RESULT}"
echo "${RESULT}" | grep -q "suite PASSED"
exit 0
|
deitch/linuxkit
|
test/cases/040_packages/003_ca-certificates/test.sh
|
Shell
|
apache-2.0
| 450 |
#!/bin/bash
set -eu
if [ "${DOCKER_HOST-}" = "" -a "$(uname)" = "Darwin" ]; then
if ! type -P "boot2docker" >& /dev/null; then
echo "boot2docker not found!"
exit 1
fi
echo "boot2docker shellinit # initializing DOCKER_* env variables"
eval $(boot2docker shellinit 2>/dev/null)
fi
# Verify that Docker is installed.
DOCKER="docker"
if [[ ! $(type -P "$DOCKER") ]]; then
echo "Docker executable not found!"
echo "Installation instructions at https://docs.docker.com/installation/"
exit 1
fi
# Verify docker is reachable.
OUT=$(($DOCKER images > /dev/null) 2>&1) || (
echo "Docker is not reachable. Is the Docker daemon running?"
echo "'docker images': $OUT"
exit 1
)
|
mbonaci/cockroach
|
build/init-docker.sh
|
Shell
|
apache-2.0
| 697 |
#!/bin/bash
# ----------------------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ----------------------------------------------------------------------------
#
# This script is for cleaning the host machine where one or more of the Stratos servers are run.
# ----------------------------------------------------------------------------
source "./conf/setup.conf"
if [ "$UID" -ne "0" ]; then
echo ; echo " You must be root to run $0. (Try running 'sudo bash' first.)" ; echo
exit 69
fi
function help {
echo ""
echo "Clean the host machine where one or more of the Stratos2 servers are run."
echo "usage:"
echo "clean.sh -u <mysql username> -p <mysql password>"
echo ""
}
while getopts u:p: opts
do
case $opts in
u)
mysql_user=${OPTARG}
;;
p)
mysql_pass=${OPTARG}
;;
*)
help
#exit 1
;;
esac
done
function helpclean {
echo ""
echo "Enter DB credentials if you need to clear Stratos DB"
echo "usage:"
echo "clean.sh -u <mysql username> -p <mysql password>"
echo ""
}
function clean_validate {
if [ -z $stratos_path ]; then
echo "stratos_path is not set"
exit 1
fi
if [ -z $log_path ]; then
echo "log_path is not set"
exit 1
fi
}
clean_validate
if [[ ( -n $mysql_user && -n $mysql_pass ) ]]; then
read -p "Please confirm that you want to remove stratos databases, servers and logs [y/n] " answer
if [[ $answer != y ]] ; then
exit 1
fi
fi
echo 'Stopping Carbon java processes'
#killing carbon processes
for pid in $(ps aux | grep "[o]rg.wso2.carbon.bootstrap.Bootstrap" | awk '{print $2}')
do
echo "killing Carbon process $pid"
kill $pid
done
#killing activemq
for pid in $(ps aux | grep "[a]pache-activemq" | awk '{print $2}')
do
echo "killing ActiveMQ $pid"
kill $pid
done
echo 'Waiting for applications to exit'
sleep 15
if [[ ( -n $mysql_user && -n $mysql_pass ) ]]; then
echo 'Removing userstore database'
mysql -u $mysql_user -p$mysql_pass -e "DROP DATABASE IF EXISTS $userstore_db_schema;"
fi
if [[ -d $stratos_path/scripts ]]; then
echo 'Removing scripts'
rm -rf $stratos_path/scripts
fi
if [[ -d $stratos_path ]]; then
echo 'Removing Stratos'
rm -rf $stratos_path/*
fi
echo 'Removing logs'
rm -rf $log_path/*
|
asankasanjaya/stratos
|
tools/stratos-installer/clean.sh
|
Shell
|
apache-2.0
| 3,151 |
#! /bin/sh
# Test suite for exclude.
# Copyright (C) 2009-2011 Free Software Foundation, Inc.
# This file is part of the GNUlib Library.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TMP=excltmp.$$
LIST=flist.$$
ERR=0
# Test anchored
cat > $LIST <<EOT
foo*
bar
Baz
EOT
cat > $TMP <<EOT
bar: 1
foo/bar: 0
EOT
./test-exclude$EXEEXT -anchored $LIST -- bar foo/bar |
tr -d '\015' |
diff -c $TMP - || ERR=1
rm -f $TMP $LIST
exit $ERR
|
sigma-random/asuswrt-merlin
|
release/src/router/coreutils-8.13/gnulib-tests/test-exclude6.sh
|
Shell
|
gpl-2.0
| 1,027 |
#!/bin/bash
# this script runs continuous test using CMake's testing mechanisms and submits the results
# to the dashboard.
# use GNU screen to start this script from ssh sessions and then detach the session.
if [ -n "$1" ]
then
WAKEUPAT="$1" # time to wake up every day in HH:MM in UTC
else
WAKEUPAT="02:55" # time to wake up every day in HH:MM in UTC
fi
if [ -z "$DISPLAY" ]
then
# if $DISPLAY is not set (e.g. on a VM or server), ROOT sents a warning to stderr e.g. when loading Marlin libraries; this might fail tests
echo " Variable \$DISPLAY not set, set to 'localhost:0' (otherwise tests might fail due to ROOT error message)"
export DISPLAY=localhost:0
fi
EUDAQDIR="$(readlink -f $(dirname $0)/../../..)"
BUILDDIR="$(readlink -f $(dirname $0)/../../../build)"
if [ ! -f "${EUDAQDIR}/main/include/eudaq/Documentation.hh" -o ! -d "$BUILDDIR" ]
then
echo " ERROR: Could not identify EUDAQ source and/or build directory!";
exit;
fi
echo " Using EUDAQ source directory: $EUDAQDIR"
echo " Using EUDAQ build directory: $BUILDDIR"
cd $EUDAQDIR
if (( $? )); then
{
echo " Could not change into EUDAQ source directory!";
exit;
}
fi;
# setup done!
echo " It's $(date --utc +%H:%M), and I'm waiting for my time to wake up ($WAKEUPAT UTC)... "
# infinite loop
while :; do
now="$(date --utc +%H:%M)"
if [[ "$now" = "$WAKEUPAT" ]]; then
echo " it's $now, time to wake up!"
echo " .. running nightly checks .."
ctest -V -S "$EUDAQDIR/etc/tests/nightly/nightly.cmake" -D CTEST_CMAKE_GENERATOR="Unix Makefiles" -D WITH_MEMCHECK=ON -D CTEST_SOURCE_DIRECTORY="$EUDAQDIR" -D CTEST_BINARY_DIRECTORY="$BUILDDIR"
echo " .. my job is done done for now, going to sleep on $(date) .. "
sleep 59
fi
sleep 30
done
|
arummler/eudaq
|
legacy/etc/tests/nightly/run_nightly.sh
|
Shell
|
lgpl-3.0
| 1,784 |
#!/bin/bash
make
make install PREFIX=$PREFIX
|
phac-nml/bioconda-recipes
|
recipes/adapterremoval/build.sh
|
Shell
|
mit
| 46 |
#!/bin/bash -e
#
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test whether Desugar runs static initializers of interfaces.
if grep "THIS STRING IS NOT EXPECTED TO APPEAR IN THE OUTPUT OF DESUGAR!!!" "${1}" ; then
exit 1
else
exit 0
fi
|
twitter-forks/bazel
|
src/test/java/com/google/devtools/build/android/desugar/static_initializer_of_functional_interface_should_not_execute.sh
|
Shell
|
apache-2.0
| 799 |
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
run_daemonset_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets)"
### Create a rolling update DaemonSet
# Pre-condition: no DaemonSet exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should be 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should stay 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
# Test set commands
kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4'
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_daemonset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
### Test rolling back a DaemonSet
# Pre-condition: no DaemonSet or its pods exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a DaemonSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the DaemonSet (revision 2)
kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo daemonset "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_apply_deployments_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply deployments"
## kubectl apply should propagate user defined null values
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply base deployment
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
# apply new deployment with new template labels
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
# cleanup
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
# Post-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# kubectl apply deployment --overwrite=true --force=true
# Pre-Condition: no deployment exists
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
# apply deployment nginx
kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx'
# apply deployment with new labels and a conflicting resourceVersion
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
# apply deployment with --force and --overwrite will succeed
kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10
# check the changed deployment
output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2)
kube::test::if_has_string "${output_message}" '"name": "nginx2"'
# applying a resource (with --force) that is both conflicting and invalid will
# cause the server to only return a "Conflict" error when we attempt to patch.
# This means that we will delete the existing resource after receiving 5 conflict
# errors in a row from the server, and will attempt to create the modified
# resource that we are passing to "apply". Since the modified resource is also
# invalid, we will receive an invalid error when we attempt to create it, after
# having deleted the old resource. Ensure that when this case is reached, the
# old resource is restored once again, and the validation error is printed.
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'Invalid value'
# Ensure that the old object has been restored
kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2'
# cleanup
kubectl delete deployments --all --grace-period=10
set +o nounset
set +o errexit
}
run_deployment_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing deployments"
# Test kubectl create deployment (using default - old generator)
kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.apps/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1'
# Clean up
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
# Test kubectl create deployment
kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
# Clean up
kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
### Test kubectl create deployment with image and command
# Pre-Condition: No deployment exists.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy nginx-with-command' "{{$container_name_field}}" 'nginx'
# Clean up
kubectl delete deployment nginx-with-command "${kube_flags[@]}"
### Test kubectl create deployment should not fail validation
# Pre-Condition: No deployment exists.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}"
# Post-Condition: Deployment "deployment-with-unixuserid" is created.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
# Clean up
kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}"
### Test cascading deletion
## Test that rs is deleted when deployment is deleted.
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
# Deleting the deployment should delete the rs.
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
## Test that rs is not deleted when deployment is deleted with cascade set to false.
# Pre-condition: no deployment and rs exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Delete the deployment with cascade set to false.
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
# Wait for the deployment to be deleted and then verify that rs is not
# deleted.
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Cleanup
# Find the name of the rs to be deleted.
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
kubectl delete rs ${output_message} "${kube_flags[@]}"
### Auto scale deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment.apps nginx-deployment "${kube_flags[@]}"
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Update the deployment (revision 2)
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment.apps "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
kube::test::get_object_assert deployment.apps "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
# The resumed deployment can now be rolled back
kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Check that the new replica set has all old revisions stored in an annotation
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
# Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]}"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
### Set image of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's image
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a deployment specified by file
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a local file without talking to the server
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of all containers of the deployment
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Set image of all containners of the deployment again when image not change
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
### Set env of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
#configmap is special here due to controller will create kube-root-ca.crt for each namespace automatically
kube::test::get_object_assert 'configmaps/test-set-env-config' "{{$id_field}}" 'test-set-env-config'
kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
# Set env of deployments by configmap from keys
kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]}"
# Assert correct value in deployment env
kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2'
# Assert single value in deployment env
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
# Set env of deployments by configmap
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}"
# Assert all values in deployment env
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2'
# Set env of deployments for all container
kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}"
# Set env of deployments for specific container
kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}"
# Set env of deployments by secret from keys
kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]}"
# Set env of deployments by secret
kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}"
# Remove specific env of deployment
kubectl set env deployment nginx-deployment env-
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_statefulset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
### Test rolling back a StatefulSet
# Pre-condition: no statefulset or its pods exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a StatefulSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the statefulset (revision 2)
kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo statefulset "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up - delete newest configuration
kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_stateful_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets)"
### Create and stop statefulset, make sure it doesn't leak pods
# Pre-condition: no statefulset exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create statefulset
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
### Scale statefulset test with current-replicas and replicas
# Pre-condition: 0 replicas
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
# Command: Scale up
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
# Post-condition: 1 replica, named nginx-0
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
# doesn't start the scheduler, so pet-0 will block all others.
# TODO: test robust scaling in an e2e.
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
### Clean up
kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_rs_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:replicasets)"
### Create and stop a replica set, make sure it doesn't leak pods
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend replica set
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
#TODO(mortent): Remove this workaround when ReplicaSet bug described in issue #69376 is fixed
local replicaset_name="frontend-no-cascade"
sed -r 's/^(\s*)(name\s*:\s*frontend\s*$)/\1name: '"${replicaset_name}"'/' hack/testdata/frontend-replicaset.yaml | kubectl create "${kube_flags[@]}" -f -
# wait for all 3 pods to be set up
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
kube::log::status "Deleting rs"
kubectl delete rs "${replicaset_name}" "${kube_flags[@]}" --cascade=false
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replica set frontend from YAML
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rs 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rs 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rs 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rs
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rs false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rs true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
# Set up three deploy, two deploy have same label
kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}"
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
# Test kubectl scale --selector
kubectl scale deploy --replicas=2 -l run=hello
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
# Test kubectl scale --all
kubectl scale deploy --replicas=3 --all
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
# Clean-up
kubectl delete rs frontend "${kube_flags[@]}"
kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}"
### Expose replica set as service
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Create a service using service/v1 generator
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
# Cleanup services
kubectl delete service frontend{,-2} "${kube_flags[@]}"
# Test set commands
# Pre-condition: frontend replica set exists at generation 1
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
kubectl set env rs/frontend "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4'
### Delete replica set with id
# Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replica sets
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple replica sets at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
### Auto scale replica set
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]}"
# Clean up
kubectl delete rs frontend "${kube_flags[@]}"
fi
set +o nounset
set +o errexit
}
|
Stackdriver/heapster
|
vendor/k8s.io/kubernetes/test/cmd/apps.sh
|
Shell
|
apache-2.0
| 38,841 |
#!/usr/bin/env bash
${IDRIS:-idris} $@ defaultLogger.idr -o default -p effects
./default
${IDRIS:-idris} $@ categoryLogger.idr -o category -p effects
./category
rm -f default category *.ibc
|
kojiromike/Idris-dev
|
test/effects005/run.sh
|
Shell
|
bsd-3-clause
| 190 |
#!/bin/sh
#
# inventory.sh
# $Id: inventory.sh,v 1.6 2003/11/21 12:48:56 djm Exp $
#
# Originally written by Ben Lindstrom, modified by Darren Tucker to use perl
# This file is placed into the public domain.
#
# This will produce an AIX package inventory file, which looks like:
#
# /usr/local/bin:
# class=apply,inventory,openssh
# owner=root
# group=system
# mode=755
# type=DIRECTORY
# /usr/local/bin/slogin:
# class=apply,inventory,openssh
# owner=root
# group=system
# mode=777
# type=SYMLINK
# target=ssh
# /usr/local/share/Ssh.bin:
# class=apply,inventory,openssh
# owner=root
# group=system
# mode=644
# type=FILE
# size=VOLATILE
# checksum=VOLATILE
find . ! -name . -print | perl -ne '{
chomp;
if ( -l $_ ) {
($dev,$ino,$mod,$nl,$uid,$gid,$rdev,$sz,$at,$mt,$ct,$bsz,$blk)=lstat;
} else {
($dev,$ino,$mod,$nl,$uid,$gid,$rdev,$sz,$at,$mt,$ct,$bsz,$blk)=stat;
}
# Start to display inventory information
$name = $_;
$name =~ s|^.||; # Strip leading dot from path
print "$name:\n";
print "\tclass=apply,inventory,openssh\n";
print "\towner=root\n";
print "\tgroup=system\n";
printf "\tmode=%lo\n", $mod & 07777; # Mask perm bits
if ( -l $_ ) {
# Entry is SymLink
print "\ttype=SYMLINK\n";
printf "\ttarget=%s\n", readlink($_);
} elsif ( -f $_ ) {
# Entry is File
print "\ttype=FILE\n";
print "\tsize=$sz\n";
print "\tchecksum=VOLATILE\n";
} elsif ( -d $_ ) {
# Entry is Directory
print "\ttype=DIRECTORY\n";
}
}'
|
linusyang/barrelfish
|
usr/openssh/src/contrib/aix/inventory.sh
|
Shell
|
mit
| 1,621 |
#!/bin/sh
# FILE: wordpress2blogger.sh
# PURPOSE: Shell script for executing the command-line use of the Wordpress
# to Blogger conversion
# REQUIRES: Python installed and executable in the PATH list
#
# USAGE: wordpress2blogger.sh <wordpress_export_file>
#
# AUTHOR: JJ Lueck ([email protected])
PROJ_DIR=`dirname $0`/..
PYTHONPATH=${PROJ_DIR}/lib python ${PROJ_DIR}/src/wordpress2blogger/wp2b.py $*
|
mirastu/google-blog-converters-appengine
|
bin/wordpress2blogger.sh
|
Shell
|
apache-2.0
| 422 |
#!/bin/bash
# File: runtestset.sh
# Description: Script to run tesseract on a single UNLV set.
# Author: Ray Smith
# Created: Wed Jun 13 10:13:01 PDT 2007
#
# (C) Copyright 2007, Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -ne 1 -a $# -ne 2 ]
then
echo "Usage:$0 pagesfile [-zoning]"
exit 1
fi
if [ ! -d api ]
then
echo "Run $0 from the tesseract-ocr root directory!"
exit 1
fi
if [ ! -r api/tesseract ]
then
if [ ! -r tesseract.exe ]
then
echo "Please build tesseract before running $0"
exit 1
else
tess="./tesseract.exe"
fi
else
tess="time -f %U -o times.txt api/tesseract"
export TESSDATA_PREFIX=$PWD/
fi
pages=$1
imdir=${pages%/pages}
setname=${imdir##*/}
if [ $# -eq 2 -a "$2" = "-zoning" ]
then
config=unlv.auto
resdir=testing/results/zoning.$setname
else
config=unlv
resdir=testing/results/$setname
fi
echo -e "Testing on set $setname in directory $imdir to $resdir\n"
mkdir -p $resdir
rm -f testing/reports/$setname.times
while read page dir
do
# A pages file may be a list of files with subdirs or maybe just
# a plain list of files so accomodate both.
if [ "$dir" ]
then
srcdir="$imdir/$dir"
else
srcdir="$imdir"
fi
# echo "$srcdir/$page.tif"
$tess $srcdir/$page.tif $resdir/$page -psm 6 $config 2>&1 |grep -v "OCR Engine"
if [ -r times.txt ]
then
read t <times.txt
echo "$page $t" >>testing/reports/$setname.times
echo -e "\033M$page $t"
if [ "$t" = "Command terminated by signal 2" ]
then
exit 0
fi
fi
done <$pages
|
malcolmgreaves/tesseract-ocr
|
testing/runtestset.sh
|
Shell
|
apache-2.0
| 2,058 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: $NODE_INSTANCE_PREFIX
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
enable_cluster_monitoring: $ENABLE_CLUSTER_MONITORING
enable_cluster_logging: $ENABLE_CLUSTER_LOGGING
enable_cluster_ui: $ENABLE_CLUSTER_UI
enable_node_logging: $ENABLE_NODE_LOGGING
logging_destination: $LOGGING_DESTINATION
elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS
enable_cluster_dns: $ENABLE_CLUSTER_DNS
dns_replicas: $DNS_REPLICAS
dns_server: $DNS_SERVER_IP
dns_domain: $DNS_DOMAIN
e2e_storage_test_environment: $E2E_STORAGE_TEST_ENVIRONMENT
EOF
mkdir -p /srv/salt-overlay/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd
|
rwehner/kubernetes
|
cluster/vsphere/templates/create-dynamic-salt-files.sh
|
Shell
|
apache-2.0
| 1,614 |
SCRIPT_NAME=aout
OUTPUT_FORMAT="a.out-sunos-big"
TEXT_START_ADDR=0x2020
TARGET_PAGE_SIZE=0x2000
ALIGNMENT=8
ARCH=sparc
TEMPLATE_NAME=sunos
|
jlspyaozhongkai/Uter
|
third_party_backup/binutils-2.25/ld/emulparams/sun4.sh
|
Shell
|
gpl-3.0
| 139 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2671-1
#
# Security announcement date: 2015-07-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:40 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - python-django:1.6.1-2ubuntu0.9
#
# Last versions recommanded by security team:
# - python-django:1.6.1-2ubuntu0.16
#
# CVE List:
# - CVE-2015-5143
# - CVE-2015-5144
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade python-django=1.6.1-2ubuntu0.16 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.04_LTS/x86_64/2015/USN-2671-1.sh
|
Shell
|
mit
| 659 |
#!/bin/bash
docker run -d --net host --restart on-failure:3 -v /opt/todomvc:/var/www/html -p 80:80 nginx
|
wscoble/docker-520-meetup
|
frontend/scripts/start.sh
|
Shell
|
mit
| 105 |
#!/bin/bash
set -o errexit
set -o nounset
. libbash.sh
show_releases ()
{
curl -s https://ftp.gnu.org/gnu/emacs/ |
egrep -o '\.tar\.(gz|xz)">emacs-.+</a>' |
egrep -o 'emacs-[0-9]+\.[0-9]' |
uniq |
sed 's/emacs-//'
}
ensure_dependencies ()
{
# libm17n-dev
# libmotif-dev
# --with-x-toolkit=motif
# --with-x-toolkit=gtk2
# libgtk2.0-dev
test -f /etc/debian_version &&
sudo apt-get install \
autoconf \
automake \
build-essential \
libdbus-1-dev \
libgif-dev \
libgnutls28-dev \
libjpeg-dev \
libncurses5-dev \
libotf-dev \
libpng-dev \
librsvg2-dev \
libtiff-dev \
libtool \
libxaw7-dev \
libxml2-dev \
libxpm-dev \
texinfo \
xaw3dg-dev \
xorg-dev
}
unpack ()
{
local _version="${1:-$(show_releases | tail -n 1)}"
cd $(mktemp -d)
curl -L "https://ftp.gnu.org/gnu/emacs/emacs-${_version}.tar.xz" |
tar xJf /dev/stdin
cd "emacs-${_version}"
echo "$PWD"
}
configure ()
{
# ./autogen.sh
./configure \
--enable-link-time-optimization \
--prefix="${HOME}/opt/emacs" \
--with-mailutils \
--with-x-toolkit=lucid
}
compile ()
{
# make -j $(nproc) bootstrap
make -j $(nproc)
}
install ()
{
make install
cd "${HOME}/bin"
ln -s ../opt/emacs/bin/emacs ./
ln -s ../opt/emacs/bin/emacsclient ./
}
entrypoint ()
{
local _command="${1:-}"
shift 1
case "$_command" in
unpack)
unpack "$@"
;;
configure)
configure
;;
compile)
compile
;;
install)
install
;;
all)
unpack
configure
compile
install
;;
show-releases)
show_releases
;;
*)
print_usage
;;
esac
}
entrypoint "$@"
|
grafoo/utl
|
emacs-setup.sh
|
Shell
|
mit
| 2,147 |
wget http://www.fatihakkus.com/yazane/skinimages/button_10.png
wget http://www.fatihakkus.com/yazane/skinimages/button_10__o.png
wget http://www.fatihakkus.com/yazane/skinimages/button_11.png
wget http://www.fatihakkus.com/yazane/skinimages/button_12.png
wget http://www.fatihakkus.com/yazane/skinimages/button_16.png
wget http://www.fatihakkus.com/yazane/skinimages/button_16__o.png
wget http://www.fatihakkus.com/yazane/skinimages/button_2.png
wget http://www.fatihakkus.com/yazane/skinimages/button_21.png
wget http://www.fatihakkus.com/yazane/skinimages/button_4.png
wget http://www.fatihakkus.com/yazane/skinimages/button_5.png
wget http://www.fatihakkus.com/yazane/skinimages/button_6.png
wget http://www.fatihakkus.com/yazane/skinimages/button_7.png
wget http://www.fatihakkus.com/yazane/skinimages/button_8.png
wget http://www.fatihakkus.com/yazane/skinimages/button_9.png
wget http://www.fatihakkus.com/yazane/skinimages/image_1.png
wget http://www.fatihakkus.com/yazane/skinimages/image_13.png
wget http://www.fatihakkus.com/yazane/skinimages/image_17.png
wget http://www.fatihakkus.com/yazane/skinimages/image_19.png
wget http://www.fatihakkus.com/yazane/skinimages/image_3.png
|
ldanielswakman/yazane
|
assets/pano2vr/skinimages/download.sh
|
Shell
|
mit
| 1,189 |
#!/bin/bash
command_exists () {
command -v "$1" >/dev/null 2>&1 ;
}
gem_name="flowdock"
if command_exists $gem_name ; then
echo " (i) $gem_name already installed"
exit 0
else
echo " (i) $gem_name NOT yet installed, attempting install..."
fi
STARTTIME=$(date +%s)
which_ruby="$(which ruby)"
osx_system_ruby_pth="/usr/bin/ruby"
brew_ruby_pth="/usr/local/bin/ruby"
echo
echo " (i) Which ruby: $which_ruby"
echo " (i) Ruby version: $(ruby --version)"
echo
set -e
if [[ "$which_ruby" == "$osx_system_ruby_pth" ]] ; then
echo " -> using system ruby - requires sudo"
echo '$' sudo gem install ${gem_name} --no-document
sudo gem install ${gem_name} --no-document
elif [[ "$which_ruby" == "$brew_ruby_pth" ]] ; then
echo " -> using brew ($brew_ruby_pth) ruby"
echo '$' gem install ${gem_name} --no-document
gem install ${gem_name} --no-document
elif command_exists rvm ; then
echo " -> installing with RVM"
echo '$' gem install ${gem_name} --no-document
gem install ${gem_name} --no-document
elif command_exists rbenv ; then
echo " -> installing with rbenv"
echo '$' gem install ${gem_name} --no-document
gem install ${gem_name} --no-document
echo '$' rbenv rehash
rbenv rehash
else
echo " [!] Failed to install: no ruby is available!"
exit 1
fi
ENDTIME=$(date +%s)
echo
echo " (i) Setup took $(($ENDTIME - $STARTTIME)) seconds to complete"
echo
|
scottrhoyt/bitrise-flowdock-step
|
_setup.sh
|
Shell
|
mit
| 1,366 |
#!/bin/bash
{ [ -f .build/init.sh ] && . .build/init.sh; } || true;
{ [ -f ../.build/init.sh ] && . ../.build/init.sh; } || true;
buildreport || exit
builddocker_init_ver asterisk
vers=${1:-"asterisk full"}
builddocker_vers $vers
|
wenerme/dockerfiles
|
asterisk/build.sh
|
Shell
|
mit
| 231 |
#!/bin/bash
set -o xtrace
scripts/bootstrap-roles.sh
scripts/bootstrap-inventory.sh
scripts/bootstrap-bamboo.sh
|
pantarei/ansible-playbook-bamboo
|
scripts/bootstrap-aio.sh
|
Shell
|
mit
| 114 |
nohup java -Dconfig_path=../config/ -Xms128m -Xmx512m -jar ../saas-price-consumer.jar >/dev/null &
|
liyong299/normal
|
java/other/saas-price/consumer/src/main/startup.sh
|
Shell
|
mit
| 98 |
#!/bin/sh
set -eo pipefail -o nounset
wget --quiet -O blekhman_ad.tsv https://raw.githubusercontent.com/macarthur-lab/gene_lists/master/lists/blekhman_ad.tsv
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/GRCh38/GRCh38.genome
grch38_gtf="$(ggd get-files grch38-gene-features-ensembl-v1 -p 'grch38-gene-features-ensembl-v1.gtf.gz')"
cat << EOF > parse_gtf_by_gene.py
"""
Get a list of genome coordinates for a list of ad genes
"""
import sys
import io
import gzip
gtf_file = sys.argv[1] ## A gtf file with CDS features
ad_gene_file = sys.argv[2] ## A single column tsv file for ad genes
outfile = sys.argv[3] ## File to write to
## Get a set of gene symbols
ad_gene_set = {}
with io.open(ad_gene_file, "rt", encoding = "utf-8") as ad:
ad_gene_set = set(x.strip() for x in ad)
## Parse the gtf file
fh = gzip.open(gtf_file, "rt", encoding = "utf-8") if gtf_file.endswith(".gz") else io.open(ad_gene_file, "rt", encoding = "utf-8")
ad_gene_dict = dict()
header = []
for line in fh:
if line[0] == "#":
header = line.strip().split("\t")
continue
line_dict = dict(zip(header,line.strip().split("\t")))
line_dict.update({x.strip().replace("\"","").split(" ")[0]:x.strip().replace("\"","").split(" ")[1] for x in line_dict["attribute"].strip().split(";")[:-1]})
## If the current gene is in the ad gene set
if line_dict["gene_name"] in ad_gene_set:
if line_dict["gene_name"] not in ad_gene_dict:
ad_gene_dict[line_dict["gene_name"]] = []
## If CDS or stop_codon feature, add feature info to ad_gene_dict
if line_dict["feature"] == "CDS" or line_dict["feature"] == "stop_codon":
## Change 1 based start to zero based start
ad_gene_dict[line_dict["gene_name"]].append([str(line_dict["#chrom"]),
str(int(line_dict["start"]) - 1),
str(line_dict["end"]),
str(line_dict["strand"]),
str(line_dict["gene_id"]),
str(line_dict["gene_name"]),
str(line_dict["transcript_id"]),
str(line_dict["gene_biotype"])
])
fh.close()
## Write dict out
with open(outfile, "w") as o:
for gene, coor in ad_gene_dict.items():
for line in coor:
o.write("\t".join(line) + "\n")
EOF
python parse_gtf_by_gene.py $grch38_gtf blekhman_ad.tsv unflattened_ad_genes.bed
cat << EOF > sort_columns.py
"""
sort the transcript id column
sort and get a unique list of the gene column
"""
import sys
for line in sys.stdin.readlines():
line_list = line.strip().split("\t")
## Sort column 4 - 8 and get a uniqe list
line_list[3] = ",".join(sorted(list(set(line_list[3].strip().split(",")))))
line_list[4] = ",".join(sorted(list(set(line_list[4].strip().split(",")))))
line_list[5] = ",".join(sorted(list(set(line_list[5].strip().split(",")))))
line_list[6] = ",".join(sorted(list(set(line_list[6].strip().split(",")))))
line_list[7] = ",".join(sorted(list(set(line_list[7].strip().split(",")))))
## Print to stdout
print("\t".join(line_list))
EOF
## Merge and sort ad genes with coordinates
gsort unflattened_ad_genes.bed $genome \
| bedtools merge -i - -c 4,5,6,7,8 -o collapse \
| awk -v OFS="\t" 'BEGIN { print "#chrom\tstart\tend\tstrand\tgene_ids\tgene_symbols\ttranscript_ids\tgene_biotypes" } {print $0}' \
| python sort_columns.py \
| gsort /dev/stdin $genome \
| bgzip -c > grch38-autosomal-dominant-genes-blekhman-v1.bed.gz
tabix grch38-autosomal-dominant-genes-blekhman-v1.bed.gz
wget --quiet https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/GRCh38/GRCh38.genome
## Get ad gene complement coordinates
sed "1d" GRCh38.genome \
| bedtools complement -i <(zgrep -v "#" grch38-autosomal-dominant-genes-blekhman-v1.bed.gz) -g /dev/stdin \
| gsort /dev/stdin $genome \
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend"} {print $1,$2,$3}' \
| bgzip -c > grch38-autosomal-dominant-genes-blekhman-v1.compliment.bed.gz
tabix grch38-autosomal-dominant-genes-blekhman-v1.compliment.bed.gz
rm GRCh38.genome
rm blekhman_ad.tsv
rm unflattened_ad_genes.bed
rm parse_gtf_by_gene.py
rm sort_columns.py
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/GRCh38/grch38-autosomal-dominant-genes-blekhman-v1/recipe.sh
|
Shell
|
mit
| 4,633 |
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# PURPOSE
# This script will setup eris and all of its dependencies. It is primarily meant
# for running on cloud providers as a setup script.
# Specifically the script will install:
# * nodejs (useful for middleware)
# * go+git (useful for quick updates of the eris tool)
# * eris
# The script assumes that it will be ran by a root user or a user with sudo
# privileges on the node. Note that it does not currently check that it has
# elevate privileges on the node.
# Note that the script, by default, will **not** install Docker which is a
# **required** dependency for Eris. If, however, the environment variable
# $INSTALL_DOCKER is not blank, then the script will install docker via the
# easy docker installation. If this makes you paranoid then you should
# manually install docker **before** running this script.
# Note that the script also assumes that the user will be a bash user.
# -----------------------------------------------------------------------------
# LICENSE
# The MIT License (MIT)
# Copyright (c) 2016-Present Eris Industries, Ltd.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# -----------------------------------------------------------------------------
# REQUIREMENTS
# Ubuntu
# Docker (**unless** INSTALL_DOCKER is not blank)
# -----------------------------------------------------------------------------
# USAGE
# setup.sh [USER] [SERVICESTOSTART] [CHAINSTOSTART]
# -----------------------------------------------------------------------------
# Set defaults
erisUser=$1
if [[ "$erisUser" == "" ]]
then
erisUser=$USER
fi
if [[ "$erisUser" == "root" ]]
then
userHome=/root
else
userHome=/home/$erisUser
fi
services=( $(echo $2 | tr "," "\n") )
chains=( $(echo $3 | tr "," "\n") )
toStart=( "${services[@]}" "${chains[@]}" )
# -----------------------------------------------------------------------------
# Defaults
GOVERSION="1.5"
NODEVERSION="4"
# -----------------------------------------------------------------------------
# Install dependencies
echo "Hello there! I'm the marmot that installs Eris."
echo
echo
echo "Grabbing necessary dependencies"
export DEBIAN_FRONTEND=noninteractive
curl -sSL https://deb.nodesource.com/setup_"$NODEVERSION".x | sudo -E bash - &>/dev/null
sudo apt-get install -y jq gcc git build-essential nodejs &>/dev/null
curl -sSL https://storage.googleapis.com/golang/go"$GOVERSION".linux-amd64.tar.gz | sudo tar -C /usr/local -xzf - &>/dev/null
if [ -n "$INSTALL_DOCKER" ]
then
curl -sSL https://get.docker.com/ | sudo -E bash - &>/dev/null
fi
sudo usermod -a -G docker $erisUser &>/dev/null
echo "Dependencies Installed."
echo
echo
# -----------------------------------------------------------------------------
# Getting chains
echo "Getting Chain managers"
curl -sSL -o $userHome/simplechain.sh https://raw.githubusercontent.com/eris-ltd/common/master/cloud/chains/simplechain.sh
chmod +x $userHome/*.sh
chown $erisUser:$erisUser $userHome/*.sh
echo "Chain managers acquired."
echo
echo
# -----------------------------------------------------------------------------
# Install eris
sudo -u "$erisUser" -i env START="`printf ",%s" "${toStart[@]}"`" bash <<'EOF'
start=( $(echo $START | tr "," "\n") )
echo "Setting up Go for the user"
mkdir --parents $HOME/go
export GOPATH=$HOME/go
export PATH=$HOME/go/bin:/usr/local/go/bin:$PATH
echo "export GOROOT=/usr/local/go" >> $HOME/.bashrc
echo "export GOPATH=$HOME/go" >> $HOME/.bashrc
echo "export PATH=$HOME/go/bin:/usr/local/go/bin:$PATH" >> $HOME/.bashrc
echo "Finished Setting up Go."
echo
echo
echo "Version Information"
echo
go version
echo
docker version
echo
echo
echo "Building eris."
go get github.com/eris-ltd/eris-cli/cmd/eris
echo
echo
echo "Initializing eris."
export ERIS_PULL_APPROVE="true"
export ERIS_MIGRATE_APPROVE="true"
echo "export ERIS_PULL_APPROVE=\"true\"" >> $HOME/.bashrc
echo "export ERIS_MIGRATE_APPROVE=\"true\"" >> $HOME/.bashrc
eris init --yes 2>/dev/null
echo
echo
echo "Starting Services and Chains: ${start[@]}"
echo
if [ ${#start[@]} -eq 0 ]
then
echo "No services or chains selected"
else
for x in "${start[@]}"
do
if [ -f "$HOME/$x".sh ]
then
echo "Turning on Chain: $x"
$HOME/$x.sh
else
echo "Turning on Service: $x"
eris services start $x
fi
done
fi
EOF
echo
echo "Finished starting services and chains."
# -------------------------------------------------------------------------------
# Cleanup
rm $userHome/*.sh
echo
echo
echo "Eris Installed!"
|
AFDudley/afd-common
|
cloud/setup/setup.sh
|
Shell
|
mit
| 5,596 |
#!/bin/bash
echo ' '
echo 'Installer of REXYGEN runtime components'
echo ' for the Raspberry Pi minicomputer '
echo '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
echo ' '
ROOT=$(dirname $(readlink -f $0))
if [ "$EUID" -ne 0 ]
then echo "Please run this script as root"
exit
fi
# Install core modules
bash $ROOT/script-core
# Final touch
bash $ROOT/script-plain
# Done
|
rexcontrols/rex-install-rpi
|
install-rex.sh
|
Shell
|
mit
| 377 |
#!/bin/bash
cd "$( dirname "$0" )"
cd ../
./cleanup.sh
./setup_env.sh
cd ../src/
python setup_module.py bdist_egg
mv dist/module-0.1-py2.7.egg module.egg
spark-submit --master yarn-cluster \
--num-executors 9 \
--driver-memory 7g \
--executor-memory 7g \
--executor-cores 3 \
--py-files module.egg \
glm_parser.py -s 8 --hadoop config/default.config
spark-submit --master yarn-cluster \
--num-executors 9 \
--driver-memory 7g \
--executor-memory 7g \
--executor-cores 3 \
--py-files module.egg \
glm_parser.py -s 8 --hadoop --learner=perceptron config/default.config
spark-submit --master yarn-cluster \
--num-executors 9 \
--driver-memory 7g \
--executor-memory 7g \
--executor-cores 3 \
--py-files module.egg \
glm_parser.py -s 8 --hadoop --tagger-w-vector=Daten/pos-tagger-vector/fv_Iter_5.db config/default.config
|
sfu-natlang/glm-parser
|
scripts/tests/run_default_yarn.sh
|
Shell
|
mit
| 955 |
#!/bin/sh -e
rm -rf build
script/check.sh --ci-mode
script/measure.sh --ci-mode
script/test.sh --ci-mode
g++ -std=c++11 src/boilerplate.cpp -o build/bp
#script/cpp/build.sh
if [ "${1}" = --run ]; then
build/bp
fi
#SYSTEM=$(uname)
#
# TODO: Needs polish.
#if [ "${SYSTEM}" = Linux ]; then
# script/debian/package.sh
# script/docker/build.sh
#fi
|
FunTimeCoding/cpp-skeleton
|
script/build.sh
|
Shell
|
mit
| 357 |
#!/bin/bash
vmd hexamer_ag_clean_out.pdb -e hexamer_ag_clean.tcl
|
tabinks/frankenzyme.com
|
projects/apc113179/hexamer_ag_clean_out/hexamer_ag_clean_VMD.sh
|
Shell
|
mit
| 65 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3160-1
#
# Security announcement date: 2015-02-11 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:14 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: armv7l
#
# Vulnerable packages fix on version:
# - xorg-server:2:1.12.4-6+deb7u6
# - xserver-xorg-core:2:1.12.4-6+deb7u6
# - xserver-xorg-dev:2:1.12.4-6+deb7u6
# - xdmx:2:1.12.4-6+deb7u6
# - xdmx-tools:2:1.12.4-6+deb7u6
# - xnest:2:1.12.4-6+deb7u6
# - xvfb:2:1.12.4-6+deb7u6
# - xserver-xephyr:2:1.12.4-6+deb7u6
# - xserver-xfbdev:2:1.12.4-6+deb7u6
# - xserver-xorg-core-dbg:2:1.12.4-6+deb7u6
# - xserver-common:2:1.12.4-6+deb7u6
#
# Last versions recommanded by security team:
# - xorg-server:2:1.12.4-6+deb7u6
# - xserver-xorg-core:2:1.12.4-6+deb7u6
# - xserver-xorg-dev:2:1.12.4-6+deb7u6
# - xdmx:2:1.12.4-6+deb7u6
# - xdmx-tools:2:1.12.4-6+deb7u6
# - xnest:2:1.12.4-6+deb7u6
# - xvfb:2:1.12.4-6+deb7u6
# - xserver-xephyr:2:1.12.4-6+deb7u6
# - xserver-xfbdev:2:1.12.4-6+deb7u6
# - xserver-xorg-core-dbg:2:1.12.4-6+deb7u6
# - xserver-common:2:1.12.4-6+deb7u6
#
# CVE List:
# - CVE-2015-0255
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade xorg-server=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xserver-xorg-core=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xserver-xorg-dev=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xdmx=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xdmx-tools=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xnest=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xvfb=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xserver-xephyr=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xserver-xfbdev=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xserver-xorg-core-dbg=2:1.12.4-6+deb7u6 -y
sudo apt-get install --only-upgrade xserver-common=2:1.12.4-6+deb7u6 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/armv7l/2015/DSA-3160-1.sh
|
Shell
|
mit
| 2,072 |
#/bin/bash
sudo ufw disable
sudo apt-get update
sudo apt-get upgrade
sudo apt-get -y install ant build-essential ant-optional default-jdk python \
valgrind ntp ccache git-arch git-completion git-core git-svn git-doc \
git-email python-httplib2 python-setuptools python-dev apt-show-versions
sudo apt-get -y -q install nodejs
sudo apt-get -y -q install npm
sudo apt-get -y -q install git
wget https://bootstrap.pypa.io/get-pip.py
sudo python get-pip.py
pip install requests
git clone https://github.com/VoltDB/voltdb.git
cd voltdb && ant
sudo mkdir /opt/voltdb
sudo chown vagrant /opt/voltdb
sudo cp voltdb/bin /opt/voltdb -rf
sudo cp voltdb/doc /opt/voltdb -rf
cp voltdb/examples /opt/voltdb -rf
cp voltdb/lib /opt/voltdb -rf
cp voltdb/tools /opt/voltdb -rf
cp voltdb/voltdb /opt/voltdb -rf
cp voltdb/version.txt /opt/voltdb
PATH=$PATH:/opt/voltdb/bin
echo 'PATH=$PATH:/opt/voltdb/bin' > .bashrc
git clone https://github.com/eimink/vsaa-server-nodejs.git
cd vsaa-server-nodejs
npm install restify-oauth2
npm install voltjs underscore
npm install node-uuid
sudo npm install sharedmemory
voltdb create -B
dbdriver="voltdb"
dbmodule="voltdb"
perl -pi -e "s/db_driver.*,/db_driver : \"$dbmodule\",/" config.js
perl -pi -e "s/listen.*,/listen : 8000,/" config.js
echo "Volt NodeJS uses 8000 port"
echo "Change config.js to have your voltdb configuration."
echo "nodejs server.js"
|
eimink/vsaa-server-nodejs
|
vagrant/voltdb/deploy.sh
|
Shell
|
mit
| 1,385 |
#!/usr/bin/env bash
ab -k -n 400 -c 10 -g baseline.dat -H "Accept-Encoding: gzip" http://127.0.0.1/
|
kmacrow/Shellac
|
benchmarks/run-baseline.sh
|
Shell
|
mit
| 102 |
#!/bin/bash
echo Removing temp files
rm -f ./src/*~
rm -f ./test/*~
rm -f ./*~
echo Staging changes
git add -A
echo Printing status
git status
|
FelixMaxwell/GPIOLib
|
stage.sh
|
Shell
|
mit
| 145 |
#!/bin/sh
mkdir portraits landscapes 2>/dev/null
for f in "$@"; do
[ ! -f "$f" ] && continue
# TODO: Filter out any non-image files
# Maybe: exifautotran to normalise jpeg rotation?
# ImageMagick to get orientation. Slow:
#orientation=$(identify -format '%[fx:(h>w)]' "$f")
# file + shell to get orientation. Much faster:
test $(file -L "$f" | sed -E 's/^.*(, ([0-9]+) ?x ?([0-9]+)|, height=([0-9]+),.*, width=([0-9]+)).*$/\2\5 -gt \3\4/')
orientation=$?
if [ $orientation -eq 1 ]; then
echo " portrait: $f"
#mv "$f" portraits
ln "$f" portraits
else
echo "landscape: $f"
#mv "$f" landscapes
ln "$f" landscapes
fi
done
|
DarkStarSword/junk
|
sort_images_by_orientation.sh
|
Shell
|
mit
| 647 |
#!/bin/bash
container=colourchallenge
# Docker exec
docker exec $container sh -c "node /srv/app/lib/cli add"
|
StudioLE/node-colour-challenge
|
docker-add.sh
|
Shell
|
mit
| 111 |
rexdep --pattern "." --start "("
|
itchyny/rexdep
|
test/invalid_regexp/start.sh
|
Shell
|
mit
| 33 |
#!/bin/sh
if [ -d /mnt/MD0_ROOT ] || [ -d /mnt/HDA_ROOT ]; then
/opt/farm/ext/backup/fs/dirs-qnap.sh
else
/opt/farm/ext/backup/fs/dirs-linux.sh
fi
|
serverfarmer/sf-backup
|
fs/detect.sh
|
Shell
|
mit
| 150 |
# ------------------------------------------------------------------------------
# Description
# -----------
#
# Completion script for git-extras (http://github.com/tj/git-extras).
#
# This depends on and reuses some of the internals of the _git completion
# function that ships with zsh itself. It will not work with the _git that ships
# with git.
#
# ------------------------------------------------------------------------------
# Authors
# -------
#
# * Alexis GRIMALDI (https://github.com/agrimaldi)
# * spacewander (https://github.com/spacewander)
#
# ------------------------------------------------------------------------------
# Inspirations
# -----------
#
# * git-extras (http://github.com/tj/git-extras)
# * git-flow-completion (http://github.com/bobthecow/git-flow-completion)
#
# ------------------------------------------------------------------------------
# Internal functions
# These are a lot like their __git_* equivalents inside _git
__gitex_command_successful () {
if (( ${#*:#0} > 0 )); then
_message 'not a git repository'
return 1
fi
return 0
}
__gitex_commits() {
declare -A commits
git log --oneline -15 | sed 's/\([[:alnum:]]\{7\}\) /\1:/' | while read commit
do
hash=$(echo $commit | cut -d':' -f1)
commits[$hash]="$commit"
done
local ret=1
_describe -t commits commit commits && ret=0
}
__gitex_remote_names() {
local expl
declare -a remote_names
remote_names=(${(f)"$(_call_program remotes git remote 2>/dev/null)"})
__git_command_successful || return
_wanted remote-names expl remote-name compadd $* - $remote_names
}
__gitex_tag_names() {
local expl
declare -a tag_names
tag_names=(${${(f)"$(_call_program tags git for-each-ref --format='"%(refname)"' refs/tags 2>/dev/null)"}#refs/tags/})
__git_command_successful || return
_wanted tag-names expl tag-name compadd $* - $tag_names
}
__gitex_branch_names() {
local expl
declare -a branch_names
branch_names=(${${(f)"$(_call_program branchrefs git for-each-ref --format='"%(refname)"' refs/heads 2>/dev/null)"}#refs/heads/})
__git_command_successful || return
_wanted branch-names expl branch-name compadd $* - $branch_names
}
__gitex_specific_branch_names() {
local expl
declare -a branch_names
branch_names=(${${(f)"$(_call_program branchrefs git for-each-ref --format='"%(refname)"' refs/heads/"$1" 2>/dev/null)"}#refs/heads/$1/})
__git_command_successful || return
_wanted branch-names expl branch-name compadd - $branch_names
}
__gitex_chore_branch_names() {
__gitex_specific_branch_names 'chore'
}
__gitex_feature_branch_names() {
__gitex_specific_branch_names 'feature'
}
__gitex_refactor_branch_names() {
__gitex_specific_branch_names 'refactor'
}
__gitex_bug_branch_names() {
__gitex_specific_branch_names 'bug'
}
__gitex_submodule_names() {
local expl
declare -a submodule_names
submodule_names=(${(f)"$(_call_program branchrefs git submodule status | awk '{print $2}')"}) # '
__git_command_successful || return
_wanted submodule-names expl submodule-name compadd $* - $submodule_names
}
__gitex_author_names() {
local expl
declare -a author_names
author_names=(${(f)"$(_call_program branchrefs git log --format='%aN' | sort -u)"})
__git_command_successful || return
_wanted author-names expl author-name compadd $* - $author_names
}
# subcommands
_git-authors() {
_arguments -C \
'(--list -l)'{--list,-l}'[show authors]' \
'--no-email[without email]' \
}
_git-bug() {
local curcontext=$curcontext state line ret=1
declare -A opt_args
_arguments -C \
': :->command' \
'*:: :->option-or-argument' && ret=0
case $state in
(command)
declare -a commands
commands=(
'finish:merge bug into the current branch'
)
_describe -t commands command commands && ret=0
;;
(option-or-argument)
curcontext=${curcontext%:*}-$line[1]:
case $line[1] in
(finish)
_arguments -C \
':branch-name:__gitex_bug_branch_names'
;;
-r|--remote )
_arguments -C \
':remote-name:__gitex_remote_names'
;;
esac
return 0
esac
_arguments \
'(--remote -r)'{--remote,-r}'[setup remote tracking branch]'
}
_git-changelog() {
_arguments \
'(-l --list)'{-l,--list}'[list commits]' \
}
_git-chore() {
local curcontext=$curcontext state line ret=1
declare -A opt_args
_arguments -C \
': :->command' \
'*:: :->option-or-argument' && ret=0
case $state in
(command)
declare -a commands
commands=(
'finish:merge and delete the chore branch'
)
_describe -t commands command commands && ret=0
;;
(option-or-argument)
curcontext=${curcontext%:*}-$line[1]:
case $line[1] in
(finish)
_arguments -C \
':branch-name:__gitex_chore_branch_names'
;;
-r|--remote )
_arguments -C \
':remote-name:__gitex_remote_names'
;;
esac
return 0
esac
_arguments \
'(--remote -r)'{--remote,-r}'[setup remote tracking branch]'
}
_git-contrib() {
_arguments \
':author:__gitex_author_names'
}
_git-count() {
_arguments \
'--all[detailed commit count]'
}
_git-create-branch() {
local curcontext=$curcontext state line
_arguments -C \
': :->command' \
'*:: :->option-or-argument'
case "$state" in
(command)
_arguments \
'(--remote -r)'{--remote,-r}'[setup remote tracking branch]'
;;
(option-or-argument)
curcontext=${curcontext%:*}-$line[1]:
case $line[1] in
-r|--remote )
_arguments -C \
':remote-name:__gitex_remote_names'
;;
esac
esac
}
_git-delete-branch() {
_arguments \
':branch-name:__gitex_branch_names'
}
_git-delete-submodule() {
_arguments \
':submodule-name:__gitex_submodule_names'
}
_git-delete-tag() {
_arguments \
':tag-name:__gitex_tag_names'
}
_git-effort() {
_arguments \
'--above[ignore file with less than x commits]'
}
_git-extras() {
local curcontext=$curcontext state line ret=1
declare -A opt_args
_arguments -C \
': :->command' \
'*:: :->option-or-argument' && ret=0
case $state in
(command)
declare -a commands
commands=(
'update:update git-extras'
)
_describe -t commands command commands && ret=0
;;
esac
_arguments \
'(-v --version)'{-v,--version}'[show current version]'
}
_git-feature() {
local curcontext=$curcontext state line ret=1
declare -A opt_args
_arguments -C \
': :->command' \
'*:: :->option-or-argument' && ret=0
case $state in
(command)
declare -a commands
commands=(
'finish:merge feature into the current branch'
)
_describe -t commands command commands && ret=0
;;
(option-or-argument)
curcontext=${curcontext%:*}-$line[1]:
case $line[1] in
(finish)
_arguments -C \
':branch-name:__gitex_feature_branch_names'
;;
-r|--remote )
_arguments -C \
':remote-name:__gitex_remote_names'
;;
esac
return 0
esac
_arguments \
'(--remote -r)'{--remote,-r}'[setup remote tracking branch]'
}
_git-graft() {
_arguments \
':src-branch-name:__gitex_branch_names' \
':dest-branch-name:__gitex_branch_names'
}
_git-guilt() {
_arguments -C \
'(--email -e)'{--email,-e}'[display author emails instead of names]' \
'(--ignore-whitespace -w)'{--ignore-whitespace,-w}'[ignore whitespace only changes]' \
'(--debug -d)'{--debug,-d}'[output debug information]' \
'-h[output usage information]'
}
_git-ignore() {
_arguments -C \
'(--local -l)'{--local,-l}'[show local gitignore]' \
'(--global -g)'{--global,-g}'[show global gitignore]' \
'(--private -p)'{--private,-p}'[show repo gitignore]'
}
_git-ignore() {
_arguments -C \
'(--append -a)'{--append,-a}'[append .gitignore]' \
'(--replace -r)'{--replace,-r}'[replace .gitignore]' \
'(--list-in-table -l)'{--list-in-table,-l}'[print available types in table format]' \
'(--list-alphabetically -L)'{--list-alphabetically,-L}'[print available types in alphabetical order]' \
'(--search -s)'{--search,-s}'[search word in available types]'
}
_git-merge-into() {
_arguments '--ff-only[merge only fast-forward]'
_arguments \
':src:__gitex_branch_names' \
':dest:__gitex_branch_names'
}
_git-missing() {
_arguments \
':first-branch-name:__gitex_branch_names' \
':second-branch-name:__gitex_branch_names'
}
_git-refactor() {
local curcontext=$curcontext state line ret=1
declare -A opt_args
_arguments -C \
': :->command' \
'*:: :->option-or-argument' && ret=0
case $state in
(command)
declare -a commands
commands=(
'finish:merge refactor into the current branch'
)
_describe -t commands command commands && ret=0
;;
(option-or-argument)
curcontext=${curcontext%:*}-$line[1]:
case $line[1] in
(finish)
_arguments -C \
':branch-name:__gitex_refactor_branch_names'
;;
-r|--remote )
_arguments -C \
':remote-name:__gitex_remote_names'
;;
esac
return 0
esac
_arguments \
'(--remote -r)'{--remote,-r}'[setup remote tracking branch]'
}
_git-squash() {
_arguments \
':branch-name:__gitex_branch_names'
}
_git-stamp() {
_arguments -C \
'(--replace -r)'{--replace,-r}'[replace stamps with same id]'
}
_git-standup() {
_arguments -C \
'-a[Specify the author of commits. Use "all" to specify all authors.]' \
'-d[Show history since N days ago]' \
'-D[Specify the date format displayed in commit history]' \
'-f[Fetch commits before showing history]' \
'-g[Display GPG signed info]' \
'-h[Display help message]' \
'-L[Enable the inclusion of symbolic links]' \
'-m[The depth of recursive directory search]'
}
_git-summary() {
_arguments '--line[summarize with lines rather than commits]'
__gitex_commits
}
_git-undo(){
_arguments -C \
'(--soft -s)'{--soft,-s}'[only rolls back the commit but changes remain un-staged]' \
'(--hard -h)'{--hard,-h}'[wipes your commit(s)]'
}
zstyle -g existing_user_commands ':completion:*:*:git:*' user-commands
zstyle ':completion:*:*:git:*' user-commands $existing_user_commands \
alias:'define, search and show aliases' \
archive-file:'export the current head of the git repository to an archive' \
authors:'generate authors report' \
back:'undo and stage latest commits' \
bug:'create bug branch' \
bulk:'run bulk commands' \
changelog:'generate a changelog report' \
chore:'create chore branch' \
clear-soft:'soft clean up a repository' \
clear:'rigorously clean up a repository' \
commits-since:'show commit logs since some date' \
contrib:'show user contributions' \
count:'show commit count' \
create-branch:'create branches' \
delete-branch:'delete branches' \
delete-merged-branches:'delete merged branches' \
delete-submodule:'delete submodules' \
delete-tag:'delete tags' \
delta:'lists changed files' \
effort:'show effort statistics on file(s)' \
extras:'awesome git utilities' \
feature:'create/merge feature branch' \
force-clone:'overwrite local repositories with clone' \
fork:'fork a repo on github' \
fresh-branch:'create fresh branches' \
gh-pages:'create the github pages branch' \
graft:'merge and destroy a given branch' \
guilt:'calculate change between two revisions' \
ignore-io:'get sample gitignore file' \
ignore:'add .gitignore patterns' \
info:'returns information on current repository' \
local-commits:'list local commits' \
lock:'lock a file excluded from version control' \
locked:'ls files that have been locked' \
merge-into:'merge one branch into another' \
merge-repo:'merge two repo histories' \
missing:'show commits missing from another branch' \
mr:'checks out a merge request locally' \
obliterate:'rewrite past commits to remove some files' \
pr:'checks out a pull request locally' \
psykorebase:'rebase a branch with a merge commit' \
pull-request:'create pull request to GitHub project' \
reauthor:'replace the author and/or committer identities in commits and tags' \
rebase-patch:'rebases a patch' \
refactor:'create refactor branch' \
release:'commit, tag and push changes to the repository' \
rename-branch:'rename a branch' \
rename-tag:'rename a tag' \
rename-remote:'rename a remote' \
repl:'git read-eval-print-loop' \
reset-file:'reset one file' \
root:'show path of root' \
scp:'copy files to ssh compatible `git-remote`' \
sed:'replace patterns in git-controlled files' \
setup:'set up a git repository' \
show-merged-branches:'show merged branches' \
show-tree:'show branch tree of commit history' \
show-unmerged-branches:'show unmerged branches' \
squash:'import changes from a branch' \
stamp:'stamp the last commit message' \
standup:'recall the commit history' \
summary:'show repository summary' \
sync:'sync local branch with remote branch' \
touch:'touch and add file to the index' \
undo:'remove latest commits' \
unlock:'unlock a file excluded from version control'
|
hickey/git-extras
|
etc/git-extras-completion.zsh
|
Shell
|
mit
| 14,700 |
#!/usr/bin/env bash
# Test if Homebrew is not installed.
if test ! $(which brew); then
info "Installing Homebrew..."
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" </dev/null || true
fi
info "Installing Homebrew packages..."
|
erikverstegen/dotfiles
|
install/darwin/homebrew.sh
|
Shell
|
mit
| 288 |
#!/bin/bash
/elasticsearch/bin/elasticsearch "$@"
|
yaronr/elasticsearch
|
entrypoint.sh
|
Shell
|
mit
| 51 |
#!/bin/sh
install_target=`pwd`/workspace
set -e
# install linear-cgi
cd deps/linear-fcgi
./bootstrap
./configure --prefix=${install_target} --enable-silent-rules
make clean all install
|
linear-rpc/linear-js
|
test/makecgi.sh
|
Shell
|
mit
| 188 |
#!/bin/bash
KEYWORDS_WALES="Wales"
KEYWORDS_CARDIFF="Cardiff"
KEYWORDS_WALES_EXCLUDE="$KEYWORDS_CARDIFF|Jim(|bo|my)(| )Wales|New(| )South(| )Wales"
KEYWORDS_WALES_ALL="$KEYWORDS_WALES|$KEYWORDS_CARDIFF"
if [ "$1" == "" ]; #Normal operation
then
debug_start "Wales"
WALES=$(egrep -i "$KEYWORDS_WALES" "$NEWPAGES" | egrep -iv "$KEYWORDS_WALES_EXCLUDE")
CARDIFF=$(egrep -i "$KEYWORDS_CARDIFF" "$NEWPAGES")
categorize "WALES" "Wales"
categorize "CARDIFF" "Cardiff"
debug_end "Wales"
fi
|
MW-autocat-script/MW-autocat-script
|
catscripts/Government/Countries/United_Kingdom/Wales/Wales.sh
|
Shell
|
mit
| 500 |
# https://github.com/tadija/AEDotFiles
# xcode.sh
alias xcopen="xcfile() { local workspace=\$(find *.xcworkspace 2>/dev/null | head -1); local project=\$(find *.xcodeproj 2>/dev/null | head -1); \${workspace:-\${project}} ; }; open -a Xcode \"${xcfile}\""
alias xcwhich="xcode-select --print-path"
alias cartbs="carthage bootstrap --platform iOS --no-use-binaries"
alias cartup="carthage update --platform iOS --no-use-binaries"
alias cartcc="rm -rf ~/Library/Caches/org.carthage.CarthageKit"
alias codesigndoc="bash -l -c '$(curl -sfL https://raw.githubusercontent.com/bitrise-tools/codesigndoc/master/_scripts/install_wrap.sh)'"
alias fl="bundle exec fastlane"
alias flup="bundle update fastlane"
alias alphacheck="sips -g all"
alias alphadisable="mogrify -alpha off"
alias xcr="spx && sleep 1 && xed ."
# usage: $ simrec recording.mp4
simrec() {
xcrun simctl io booted recordVideo "$1"
}
# usage: $ simurl http://apple.com
simurl() {
xcrun simctl openurl booted "$1"
}
# usage: $ xcswitch /Applications/Xcode-beta.app
xcswitch() {
sudo xcode-select --switch "$1"
}
|
tadija/AEDotFiles
|
plugins/xcode.sh
|
Shell
|
mit
| 1,078 |
#!/bin/bash
set -e
DATA_DIR=${DATA_DIR:=/apache-archiva/data}
function urlParse(){
# extract the protocol
local proto="$(echo $1 | grep :// | sed -e's,^\(.*://\).*,\1,g')"
# remove the protocol -- updated
url=$(echo $1 | sed -e s,$proto,,g)
# extract the user (if any)
user="$(echo $url | grep @ | cut -d@ -f1)"
# extract the host -- updated
host=$(echo $url | sed -e s,$user@,,g | cut -d/ -f1)
# by request - try to extract the port
port="$(echo $host | sed -e 's,^.*:,:,g' -e 's,.*:\([0-9]*\).*,\1,g' -e 's,[^0-9],,g')"
host=${host/:$port}
echo "$host $port"
}
hostAndPort=(`urlParse ${http_proxy}`)
proxyId=""
remoteDownloadNetworkProxyId=""
networkProxy=""
if [ ${#hostAndPort[@]} -ge 1 ]
then
remoteDownloadNetworkProxyId='<remoteDownloadNetworkProxyId\>proxy<\/remoteDownloadNetworkProxyId>'
proxyId='<proxyId>proxy<\/proxyId>'
networkProxy=" \
<networkProxy> \
<id>proxy<\/id> \
<protocol>https<\/protocol> \
<host>${hostAndPort[0]}<\/host> \
<port>${hostAndPort[1]}<\/port> \
<username\/> \
<password\/> \
<useNtml>false<\/useNtml> \
<\/networkProxy>"
fi
cat ${DATA_DIR}/archiva.xml | \
sed "s/{{remoteDownloadNetworkProxyId}}/${remoteDownloadNetworkProxyId}/g" | \
sed "s/{{proxyId}}/${proxyId}/g" | \
sed "s/{{networkProxy}}/${networkProxy}/g" > /tmp/archiva.xml
mv /tmp/archiva.xml ${DATA_DIR}/archiva.xml
|
kawasima/dockerfiles
|
archiva-proxy/init.sh
|
Shell
|
mit
| 1,483 |
#!/bin/bash
. config.sh
. app/colors.sh
# run as root only
if [[ $EUID -ne 0 ]] ; then
run_error "This script must be run with root access\e[49m"
exit 1
fi
[ $# -eq 0 ] && { run_error "Usage: openssl <version>"; exit; }
if [ -z ${ROOT+x} ]; then show_red "Error" "ROOT system variable is not set! Check config.sh"; exit 1; fi
if [ -z ${CACHE+x} ]; then show_red "Error" "CACHE system variable is not set! Check config.sh"; exit 1; fi
if [ -z ${BUILD+x} ]; then show_red "Error" "BUILD system variable is not set! Check config.sh"; exit 1; fi
# Set: vars
MAIN_DIR="openssl"
WORKDIR="${BUILD}${MAIN_DIR}/"
CACHEDIR="${CACHE}${MAIN_DIR}/"
FILENAME="${MAIN_DIR}-${1}.tar.gz"
# Clear: current install
rm -Rf ${WORKDIR} && mkdir -p ${WORKDIR}
# Workspace
show_blue "Install" "${MAIN_DIR} from source"
# Run
run_install "${MAIN_DIR}-${1}:: required by NGINX Gzip module for headers compression"
if [ ! -s "${CACHE}${FILENAME}" ] ; then
run_download "${FILENAME}"
wget -O ${CACHE}${FILENAME} wget http://www.openssl.org/source/${FILENAME} &> /dev/null
else
show_yellow "Cache" "found ${FILENAME}. Using from cache"
fi
cd ${WORKDIR}
if [ ! -s "${CACHE}${FILENAME}" ] ; then
rm -Rf ${WORKDIR}
run_error "${CACHE}${FILENAME} not found"
exit 1
else
cd ${WORKDIR}
show_blue_bg "Unpack" "${MAIN_DIR}-${1}.tar.gz"
tar -xzf "${CACHE}${FILENAME}" -C ${WORKDIR}
cp -PR ${WORKDIR}${MAIN_DIR}-${1}/* ${WORKDIR}
./config --prefix=/usr --openssldir=/etc/ssl --libdir=lib shared zlib-dynamic # darwin64-x86_64-cc
make depend && make && make test && make install
run_ok
fi
|
gp187/nginx-builder
|
app/installers/openssl.sh
|
Shell
|
mit
| 1,684 |
function powerline_precmd() {
export PS1="$(powerline $? --shell zsh)"
}
function install_powerline_precmd() {
for s in "${precmd_functions[@]}"; do
if [ "$s" = "powerline_precmd" ]; then
return
fi
done
precmd_functions+=(powerline_precmd)
}
install_powerline_precmd
|
fromonesrc/dotfiles
|
zsh/powerline.zsh
|
Shell
|
mit
| 291 |
# THis is for correction part
# $1 : new temp.
# $2 : new pressure
# Example: ./kofke.sh Tr0.94 32.87335142
text_slurm="mpirun -np 48 /home/hjung52/gromacs5.1.2/bin/gmx_mpi mdrun -pin on -v >& log"
if [ ! -d "$1/gas-c" ]; then
mkdir -p $1/{gas,liq}-c
fi
for PHASE in gas liq
do
cd $1/$PHASE-c
cp ../$PHASE-p/confout.gro conf.gro
cp ../$PHASE-p/grompp.mdp ./
head -6 ../$PHASE-p/$1-$PHASE.slurm > $1-$PHASE.slurm
workdir=$(pwd)
echo "cd ${workdir}" >> $1-$PHASE.slurm
echo "${text_slurm}" >> $1-$PHASE.slurm
echo "exit 0" >> $1-$PHASE.slurm
sed -i '/ref-p =/c\ref-p = '"$2"' ;' grompp.mdp
if [ "$PHASE" = "liq" ]
then
gmx_mpi grompp -p ../../../FF.Argon/RNP/topol-4k.top
else
gmx_mpi grompp -p ../../../FF.Argon/White/topol-4k.top
fi
qsub $1-$PHASE.slurm
cd ../..
done
|
jht0664/Utility_python_gromacs
|
kofke/kofke-corr.sh
|
Shell
|
mit
| 821 |
#!/bin/bash
apt-get update
apt-get install -y update-notifier-common
# Preseed apt-cache settings
cat <<EOF | debconf-set-selections
apt-cacher apt-cacher/mode select daemon
EOF
apt-get install -y apt-cacher apache2
grep -q "^allowed_hosts" /etc/apt-cacher/apt-cacher.conf || echo "allowed_hosts = *" >> /etc/apt-cacher/apt-cacher.conf
grep -q "^package_files_regexp" /etc/apt-cacher/apt-cacher.conf || echo "package_files_regexp = (?:^[-+.a-z0-9]+[_-](?:\d:)?[-+.~a-zA-Z0-9]*(?:[_-]?(?:[-a-z0-9])*\.(?:u|d)?deb|\.dsc|\.tar(?:\.gz|\.bz2|\.xz)|\.diff\.gz)|\.rpm|index\.db-.+\.gz|\.jigdo|\.template)$" >> /etc/apt-cacher/apt-cacher.conf
service apt-cacher restart
service apache2 restart
echo 'Acquire::http::Proxy "http://10.5.5.5:3142";' > /etc/apt/apt.conf.d/01proxy
apt-get install -y update-notifier-common
apt-get dist-upgrade -y
apt-get autoremove -y
if [ -e /var/run/reboot-required ]; then
echo "Shutting down... Wait a moment, then run 'vagrant up'."
shutdown -h now
fi
|
johnmarkschofield/vagrant-apt-cacher
|
setup.bash
|
Shell
|
mit
| 988 |
#!/bin/bash
# Simple script to list version numbers of critical development tools
export LC_ALL=C
bash --version | head -n1 | cut -d" " -f2-4
echo "/bin/sh -> `readlink -f /bin/sh`"
echo -n "Binutils: "; ld --version | head -n1 | cut -d" " -f3-
bison --version | head -n1
if [ -h /usr/bin/yacc ];
then
echo "/usr/bin/yacc -> `readlink -f /usr/bin/yacc`";
elif [ -x /usr/bin/yacc ];
then
echo yacc is `/usr/bin/yacc --version | head -n1`
else
echo "yacc not found"
fi
bzip2 --version 2>&1 < /dev/null | head -n1 | cut -d" " -f1,6-
echo -n "Coreutils: "; chown --version | head -n1 | cut -d")" -f2
diff --version | head -n1
find --version | head -n1
gawk --version | head -n1
if [ -h /usr/bin/awk ];
then
echo "/usr/bin/awk -> `readlink -f /usr/bin/awk`";
elif [ -x /usr/bin/awk ];
then
echo awk is `/usr/bin/awk --version | head -n1`
else
echo "awk not found"
fi
gcc --version | head -n1
g++ --version | head -n1
ldd --version | head -n1 | cut -d" " -f2- # glibc version
grep --version | head -n1
gzip --version | head -n1
cat /proc/version
m4 --version | head -n1
make --version | head -n1
patch --version | head -n1
echo Perl `perl -V:version`
sed --version | head -n1
tar --version | head -n1
makeinfo --version | head -n1
xz --version | head -n1
echo 'int main(){}' > dummy.c && g++ -o dummy dummy.c
if [ -x dummy ]
then
echo "g++ compilation OK";
else
echo "g++ compilation failed";
fi
rm -f dummy.c dummy
|
Rocla/lfs-7.8
|
script-root_version-check.sh
|
Shell
|
mit
| 1,440 |
#! /bin/sh
gconftool-2 --load ~/.dotfiles/config-export/guake/apps-guake.xml
gconftool-2 --load ~/.dotfiles/config-export/guake/schemas-apps-guake.xml
|
firemound/dotfiles
|
scripts/setup/guake_restore.sh
|
Shell
|
mit
| 153 |
#!/bin/bash
#
# Print stats for an interface at a regular interval.
#
# cat /proc/net/dev | grep p3p1
#
# Expected format:
# Inter-| Receive | Transmit
# face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
# p3p1: 62532782890571 45637642358 0 676564 31 0 0 7623825 23685063868011 19807410698 0 0 0 0 0 0
#
# $1: - interface name
# $2 - bytes received
# $3 - pkts received
# $10 - pkts transmitted
# $11 - pkts transmitted
#
bold() { ansi 1 "$@"; }
ansi() { echo -e "\e[${1}m${*:2}\e[0m"; }
dfile=/proc/net/dev
ifname=p3p1
psecs=3
psecs=1
astring0=`cat ${dfile} | grep ${ifname} | awk '{print "rxb0="$2"; rxp0="$3"; txb0="$10"; txp0="$11"; secs0="systime()}'`
eval ${astring0}
echo "# t0 recv_bytes = "${rxb0}" @ "${secs0}" seconds since 1970-01-01 00:00:00 UTC"
echo "# t0 recv_pkts = "${rxp0}" @ "${secs0}" seconds since 1970-01-01 00:00:00 UTC"
echo "# t0 tmit_bytes = "${txb0}" @ "${secs0}" seconds since 1970-01-01 00:00:00 UTC"
echo "# t0 tmit_pkts = "${txp0}" @ "${secs0}" seconds since 1970-01-01 00:00:00 UTC"
echo "# Pausing for "${psecs}" seconds."
sleep ${psecs}
astring1=`cat ${dfile} | grep ${ifname} | awk '{print "rxb1="$2"; rxp1="$3"; txb1="$10"; txp1="$11"; secs1="systime()}'`
eval ${astring1}
echo "# t1 recv_bytes = "${rxb1}" @ "${secs1}" seconds since 1970-01-01 00:00:00 UTC."
echo "# t1 recv_pkts = "${rxp1}" @ "${secs1}" seconds since 1970-01-01 00:00:00 UTC."
echo "# t1 tmit_bytes = "${txb1}" @ "${secs1}" seconds since 1970-01-01 00:00:00 UTC."
echo "# t1 tmit_pkts = "${txp1}" @ "${secs1}" seconds since 1970-01-01 00:00:00 UTC."
drxb=`echo $rxb1" - "$rxb0 | bc -l`
drxp=`echo $rxp1" - "$rxp0 | bc -l`
dtxb=`echo $txb1" - "$txb0 | bc -l`
dtxp=`echo $txp1" - "$txp0 | bc -l`
brrxMiB=`echo ${drxb}"/("${psecs}"*1.*1024.*1024.)" | bc -l `; brrxMiB=`printf "%-10.3f" ${brrxMiB}`
prrxPPS=`echo ${drxp}"/("${psecs}"*1.)" | bc -l `; prrxPPS=`printf "%-10.1f" ${prrxPPS}`
bpprx=`echo ${drxb}"/"${drxp} | bc -l`;bpprx=`printf "%.1f" ${bpprx}`
brtxMiB=`echo ${dtxb}"/("${psecs}"*1.*1024.*1024.)" | bc -l `; brtxMiB=`printf "%-10.3f" ${brtxMiB}`
prtxPPS=`echo ${dtxp}"/("${psecs}"*1.)" | bc -l `; prtxPPS=`printf "%-10.1f" ${prtxPPS}`
bpptx=`echo ${dtxb}"/"${dtxp} | bc -l`;bpptx=`printf "%.1f" ${bpptx}`
echo ${ifname}","${psecs}"s," \
$(bold 'RX')','${drxb}"b,"${drxp}"p,"$(bold ${brrxMiB}"MiB/s ")","${prrxPPS}"p/s,"${bpprx}"b/p," \
$(bold 'TX')','${dtxb}"b,"${dtxp}"p,"$(bold ${brtxMiB}"MiB/s ")","${prtxPPS}"p/s,"${bpptx}"b/p" \
${secs0}","${secs1}
|
christophernhill/nese
|
network/file_transfer_performance/measurement/if_stats.sh
|
Shell
|
mit
| 2,687 |
#!/usr/bin/env sh
# hide desktop
defaults write com.apple.finder CreateDesktop -bool false
killall Finder
|
bendrucker/dotfiles
|
macos/finder.sh
|
Shell
|
mit
| 108 |
#!/bin/bash
set -euxo pipefail
export DEBIAN_FRONTEND=noninteractive
apt-get update || :
apt-get install -y etckeeper git
sed -i -e 's/^#VCS="git"/VCS="git"/' -e 's/^VCS="bzr"/#VCS="bzr"/' -e 's/^GIT_COMMIT_OPTIONS=""/GIT_COMMIT_OPTIONS="-v"/' /etc/etckeeper/etckeeper.conf
etckeeper init 'Initial commit'
etckeeper commit 'Setup etckeeper' || :
|
znz/ruby-test-vagrant
|
provision-etckeeper.sh
|
Shell
|
mit
| 346 |
#!/bin/bash
## PARSER PACKAGE 02 ##
#
# This package is tailored towards longer simulations.
# It will grab the following plots from the data:
#
# - activeVehicleCount
# - activeRoadsideUnitCount
# - coveredCells
# - meanSignal
# - meanSaturation
# - signalToSaturation
# - coverageDistribution
#
# Run from the location where the 'simulations' folder is present.
set -e
if [ -z "$1" ]; then
echo "Error: Please specify a directory with simulations."
exit 1
fi
SIMDIR=$1
VISDIR=plots
SIMDESCR=description.txt
SCRIPTDIR=$(dirname $0)
PACKAGENAME=package02_limit
TEXSUBDIR=tex
PACKAGEDIR=${SIMDIR}/${VISDIR}/${PACKAGENAME}
LOGFILE=${PACKAGEDIR}/${PACKAGENAME}.log
declare -a PARSERS=(
"activeVehicleCount full"
"activeRoadsideUnitCount_limit full"
"coveredCells full"
"meanSignal_limit full"
"meanSaturation_limit full"
"signalToSaturation_limit full"
"singles/horizontalCoverageDistribution 3000"
)
# Check for the presence of a simulation folder
if [ ! -d ${SIMDIR} ]; then
echo "Error: Simulations folder not present."
exit 1
fi
# Ensure we're working with pdflatex version 3
if [[ ! $(pdflatex --version) =~ "pdfTeX 3" ]]; then
echo "Error: pdflatex version 3 is required."
exit 1
fi
# Check for previous plotfolders and offer to wipe them
PLOTFOLDERS=$(find ${SIMDIR} -type d -name ${VISDIR} | wc -l | tr -d ' ')
if [ ${PLOTFOLDERS} -ne 0 ]; then
if [ "$2" = "--overwrite" ]; then
echo "Erasing existing visualization folders..."
find ${SIMDIR} -type d -name ${VISDIR} -exec rm -rf {} +
else
echo "Error: A folder with previous plots exists."
echo "Add '--overwrite' as the second argument to erase it."
exit 1
fi
fi
# Plotting directory
if [ -d ${PACKAGEDIR} ]; then
echo "Folder with previous plots exists, move it before proceeding."
exit 1
fi
mkdir -p ${PACKAGEDIR}
# Run this package's parsers
printf "Running parsers..."
for PARSER in "${PARSERS[@]}"
do
PARSERCOMPONENTS=(${PARSER})
printf "\n\t* ${PARSERCOMPONENTS[0]}"
printf "\n### Running ${SCRIPTDIR}/${PARSERCOMPONENTS[0]}.sh\n" >> ${LOGFILE}
${SCRIPTDIR}/${PARSERCOMPONENTS[0]}.sh ${SIMDIR} ${PARSERCOMPONENTS[1]} >> ${LOGFILE} 2>&1
done
printf "\n"
printf "Gathering plots... "
# Create TeX dir and figures subdir, 1-shot
mkdir -p ${PACKAGEDIR}/${TEXSUBDIR}/figures
# Gather generated plots
printf "\n### Moving plot PDF files\n" >> ${LOGFILE}
find ${SIMDIR}/${VISDIR} -not \( -path ${PACKAGEDIR} -prune \) -type f -iname '*.pdf' -exec cp {} ${PACKAGEDIR}/${TEXSUBDIR}/figures/ \; >> ${LOGFILE} 2>&1
# Copy TeX scaffold over
cp ${SCRIPTDIR}/${PACKAGENAME}.tex ${PACKAGEDIR}/${TEXSUBDIR}
# Copy simulation description, if present
if [ -f ${SIMDIR}/${SIMDESCR} ]; then
cp -f ${SIMDIR}/${SIMDESCR} ${PACKAGEDIR}/${TEXSUBDIR}
fi
# Compile TeX
printf "\n### Running pdflatex\n" >> ${LOGFILE}
( cd ${PACKAGEDIR}/${TEXSUBDIR} ; pdflatex -interaction=nonstopmode -file-line-error -recorder ${PACKAGENAME}.tex ) >> ${LOGFILE} 2>&1
# Copy PDF down
#find ${PACKAGEDIR} -type f -name '${PACKAGENAME}.pdf' -exec cp {} ${SIMDIR}/${VISDIR}/ \;
printf "done.\n"
|
abreis/swift-gissumo
|
scripts/parsers/package02_limit.sh
|
Shell
|
mit
| 3,061 |
# Detect & load version managers
() {
typeset -a managers
# Detect nvm
# nvm recommends git checkout not brew
export NVM_DIR=${NVM_DIR:-$HOME/.nvm}
[[ -e $NVM_DIR/nvm.sh ]] && {
managers+=(nvm)
function init-nvm {
local cmd
cmd='source $NVM_DIR/nvm.sh'
# avoid calling `nvm use` again
(( ${+NVM_BIN} )) && cmd+=' --no-use'
eval "$cmd"
}
}
# Detect pyenv, both by brew or git
(( ${+commands[pyenv]} )) && {
managers+=(pyenv)
function init-pyenv {
integer has_virtualenv
typeset -a pyenv_commands
pyenv_commands=($(pyenv commands))
[[ ${pyenv_commands[(r)virtualenv]} == virtualenv ]] \
&& ((has_virtualenv = 1))
if (( ${+PYENV_SHELL} )); then
eval "$(pyenv init - --no-rehash zsh)"
else
eval "$(pyenv init - zsh)"
fi
if (( has_virtualenv )); then
# see https://github.com/pyenv/pyenv-virtualenv#activate-virtualenv
# eval "$(pyenv virtualenv-init - zsh)"
function virtualenv-init {
eval "$(pyenv virtualenv-init - zsh)"
unfunction virtualenv-init
}
fi
}
}
# Detect rbenv, both by brew or git
(( ${+commands[rbenv]} )) && {
managers+=(rbenv)
function init-rbenv {
if (( ${+RBENV_SHELL} )); then
eval "$(rbenv init - --no-rehash zsh)"
else
eval "$(rbenv init - zsh)"
fi
}
}
# set default value if nmk_version_managers is unset
(( ! ${+nmk_version_managers} )) && {
typeset -ga nmk_version_managers
nmk_version_managers=($managers)
}
local manager
for manager in $nmk_version_managers; do
case $manager in
nvm ) init-nvm; unfunction init-nvm ;;
pyenv ) init-pyenv; unfunction init-pyenv ;;
rbenv ) init-rbenv; unfunction init-rbenv ;;
esac
done
}
|
nuimk/nmk
|
zsh/src/zshrc/50-version-managers.zsh
|
Shell
|
mit
| 2,143 |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Chirp/Chirp.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Chirp/Chirp.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
trifl/Chirp
|
Example/Pods/Target Support Files/Pods-Chirp_Example/Pods-Chirp_Example-frameworks.sh
|
Shell
|
mit
| 7,861 |
# Derived from https://hub.docker.com/r/picoded/ubuntu-openjdk-8-jdk/dockerfile
apt-get update && \
apt-get install -y openjdk-8-jdk && \
apt-get install -y ant && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/oracle-jdk8-installer;
apt-get update && \
apt-get install -y ca-certificates && \
apt-get install -y ca-certificates-java && \
apt-get clean && \
update-ca-certificates -f && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/oracle-jdk8-installer;
apt-get update --fix-missing && \
apt-get install -y apt-transport-https && \
apt-get install -y bzip2 && \
apt-get install -y curl && \
apt-get install -y git && \
apt-get install -y parallel && \
apt-get install -y software-properties-common && \
apt-get install -y wget && \
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 && \
add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran35/' && \
apt-get update && \
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \
apt-get -y --allow-unauthenticated --no-install-recommends install r-base r-base-dev libcurl4-openssl-dev libssl-dev libxml2-dev && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/html' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/data' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/doc' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/tests' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/examples' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/help' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/www' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/www-dir' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/staticdocs' -exec rm -r "{}" \; && \
find /usr/local/lib/R/site-library/ -depth -wholename '*/demo' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/html' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/data' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/doc' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/tests' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/examples' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/help' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/www' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/www-dir' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/staticdocs' -exec rm -r "{}" \; && \
find /usr/lib/R/library/ -depth -wholename '*/demo' -exec rm -r "{}" \; && \
rm -rf /usr/local/lib/R/site-library/BH && \
rm -rf /usr/share/mime /usr/share/tcltk && \
rm -rf /usr/share/tcltk /usr/share/man && \
rm -rf /usr/share/doc /usr/share/locale /usr/share/perl5 && \
apt-get -y autoremove && \
apt-get clean
# Update to R 4.1.1:
# apt-get update -qq && \
# apt install --no-install-recommends software-properties-common dirmngr && \
# wget -qO- https://cloud.r-project.org/bin/linux/ubuntu/marutter_pubkey.asc | tee -a /etc/apt/trusted.gpg.d/cran_ubuntu_key.asc && \
# add-apt-repository "deb https://cloud.r-project.org/bin/linux/ubuntu $(lsb_release -cs)-cran40/" && \
|
srp33/ShinyLearner_Environment
|
install_debian_packages.sh
|
Shell
|
mit
| 3,575 |
#!/bin/bash
# Tell internal unit to reboot via the assigned GPIO pin
logfile="reboot_internal.log"
reboot_pin=26 # GPIO pin to monitor
# Make sure output is off for a minute (in case it was still on for any reason)
# The internal unit has to see the input low at least once or it will not reboot
# This is to prevent a constant reboot loop if the input goes bad and gets stuck high
printf "%s\t%s\n" "$(date '+%Y%m%d %T')" "Requesting reboot of internal unit via GPIO" >> $logfile
raspi-gpio set $reboot_pin op dl # dl - Drive low
sleep 65
# Set pin high for 6 minutes (internal unit should reboot after 5)
raspi-gpio set $reboot_pin op dh # dh - Drive high
sleep 365
# Turn pin off when complete
raspi-gpio set $reboot_pin op dl # dl - Drive low
|
thephez/data-diode
|
scripts/reboot_internal.sh
|
Shell
|
mit
| 753 |
#!/bin/zsh
# https://github.com/denysdovhan/spaceship-prompt/blob/master/docs/Troubleshooting.md#what-is-the-i-before-prompt-character-
export SPACESHIP_VI_MODE_SHOW=false
export SPACESHIP_CHAR_SUFFIX=" "
export SPACESHIP_CHAR_SYMBOL=ยป
# SPACESHIP_CHAR_SYMBOL=โ
# SPACESHIP_CHAR_SYMBOL=โฏ
# SPACESHIP_CHAR_SYMBOL=ฮป
# REPORTTIME is a nifty feature of zsh. If you set it to a non-negative value, then every time, any command you run that takes longer than the value you set it to in seconds,
# zsh will print usage statistics afterwards as if you had run the command prefixed with time.
# Report CPU usage for commands running longer than 10 seconds
export REPORTTIME=10
|
radum/dotfiles
|
zsh/prompt.zsh
|
Shell
|
mit
| 675 |
#!/bin/bash
#On Tool Labs
PYTHONPATH=/shared/pywikipedia/core:/shared/pywikipedia/core/externals/httplib2:/shared/pywikipedia/core/scripts
# Wikinewses: Ar, Bg, Bs, Ca, Cs, El, En, Eo, Fa, Fi, He, Ja, Ko, No, Pt, Ro, Ru, Sq, Sr, Sv, Ta, Tr, Uk, Zh are all global bot allowed
# As of: August 6th, 2015
# This is meant to run on cron with companion script
if [ $1 == "ar" ] || [ $1 == "bg" ] || [ $1 == "bs" ] || [ $1 == "ca" ] || [ $1 == "cs" ] || [ $1 == "el" ] || [ $1 == "en" ] || [ $1 == "eo" ] || [ $1 == "fa" ] || [ $1 == "fi" ] || [ $1 == "he" ] || [ $1 == "ja" ] || [ $1 == "ko" ] || [ $1 == "no" ] || [ $1 == "pt" ] || [ $1 == "ro" ] || [ $1 == "ru" ] || [ $1 == "sq" ] || [ $1 == "sr" ] || [ $1 == "sv" ] || [ $1 == "ta" ] || [ $1 == "tr" ] || [ $1 == "uk" ] || [ $1 == "zh" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:14
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:12
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:11
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:10
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:9
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:8
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:5
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:4
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:3
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:2
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:1
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:0
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:14
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:12
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:11
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:10
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:9
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:8
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:5
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:4
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:3
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:2
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:1
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:0
fi
if [ $1 == "ar" ] || [ $1 == "bg" ] || [ $1 == "bs" ] || [ $1 == "cs" ] || [ $1 == "el" ] || [ $1 == "en" ] || [ $1 == "fa" ] || [ $1 == "fi" ] || [ $1 == "he" ] || [ $1 == "ja" ] || [ $1 == "ko" ] || [ $1 == "no" ] || [ $1 == "pt" ] || [ $1 == "ro" ] || [ $1 == "ru" ] || [ $1 == "sr" ] || [ $1 == "sv" ] || [ $1 == "tr" ] || [ $1 == "uk" ] || [ $1 == "zh" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:6
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:6
fi
if [ $1 == "bg" ] || [ $1 == "en" ] || [ $1 == "fi" ] || [ $1 == "he" ] || [ $1 == "ja" ] || [ $1 == "pt" ] || [ $1 == "ru" ] || [ $1 == "sv" ] || [ $1 == "zh" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:7
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:7
fi
if [ $1 == "cs" ] || [ $1 == "en" ] || [ $1 == "fi" ] || [ $1 == "he" ] || [ $1 == "pt" ] || [ $1 == "ru" ] || [ $1 == "zh" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:13
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:13
fi
if [ $1 == "ar" ] || [ $1 == "ca" ] || [ $1 == "cs" ] || [ $1 == "el" ] || [ $1 == "en" ] || [ $1 == "eo" ] || [ $1 == "fa" ] || [ $1 == "fi" ] || [ $1 == "he" ] || [ $1 == "ja" ] || [ $1 == "ko" ] || [ $1 == "no" ] || [ $1 == "pt" ] || [ $1 == "ro" ] || [ $1 == "ru" ] || [ $1 == "sr" ] || [ $1 == "sv" ] || [ $1 == "ta" ] || [ $1 == "tr" ] || [ $1 == "uk" ] || [ $1 == "zh" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:15
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:15
fi
if [ $1 == "en" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:90
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:92
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:90
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:92
fi
if [ $1 == "ar" ] || [ $1 == "ca" ] || [ $1 == "en" ] || [ $1 == "fa" ] || [ $1 == "he" ] || [ $1 == "ja" ] || [ $1 == "no" ] || [ $1 == "pt" ] || [ $1 == "ru" ] || [ $1 == "sq" ] || [ $1 == "ta" ] || [ $1 == "tr" ] || [ $1 == "zh" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:100
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:100
fi
if [ $1 == "ar" ] || [ $1 == "en" ] || [ $1 == "fa" ] || [ $1 == "he" ] || [ $1 == "ja" ] || [ $1 == "no" ] || [ $1 == "pt" ] || [ $1 == "ru" ] || [ $1 == "ta" ] || [ $1 == "tr" ] || [ $1 == "zh" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:101
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:101
fi
if [ $1 == "ar" ] || [ $1 == "bg" ] || [ $1 == "ca" ] || [ $1 == "el" ] || [ $1 == "en" ] || [ $1 == "fa" ] || [ $1 == "pt" ] || [ $1 == "ru" ] || [ $1 == "sq" ] || [ $1 == "sr" ] || [ $1 == "uk" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:102
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:102
fi
if [ $1 == "pt" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:103
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:103
fi
if [ $1 == "uk" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:104
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:104
fi
if [ $1 == "no" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:106
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:107
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:106
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:107
fi
if [ $1 == "ja" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:108
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:109
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:108
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:109
fi
if [ $1 == "ca" ] || [ $1 == "cs" ] || [ $1 == "en" ] || [ $1 == "fa" ] || [ $1 == "ro" ] || [ $1 == "ru" ] || [ $1 == "uk" ] || [ $1 == "zh" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:828
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:828
fi
if [ $1 == "cs" ] || [ $1 == "uk" ] ; then
# Try a full scan first
python /shared/pywikipedia/core/scripts/redirect.py -fullscan do -family:wikinews -always -lang:$1 -namespace:829
# Pull from special page, in case full scan fails
python /shared/pywikipedia/core/scripts/redirect.py do -family:wikinews -always -lang:$1 -namespace:829
fi
|
Avicennasis/AvicBot
|
redirects/avicbotrdnews.sh
|
Shell
|
mit
| 10,256 |
#!/usr/bin/bash
help () {
echo -e "\n usage: ${0} <xml-schema-file> <saxon-jar-file> <xsl-file>\n"
echo -e " * output may be directed to a file when running stand-alone\n"
}
XML_SCHEMA_DEFAULT=""
XML_TRANSFORMER_DEFAULT="/opt/saxonica/SaxonEE9-7-0-18J/saxon9ee.jar"
XSL_DEFAULT="./lib/extract-xml-schema-content.xsl.sef"
if [ ${#} -eq 3 ] ; then
XML_SCHEMA_ARG=${1}
XML_TRANSFORMER_ARG=${2}
XSL_ARG=${3}
if [ -n "${XML_SCHEMA_ARG}" ]; then
XML_SCHEMA=${XML_SCHEMA_ARG};
else
XML_SCHEMA=${XML_SCHEMA_DEFAULT};
fi
if [ -n "${XML_TRANSFORMER_ARG}" ]; then
XML_TRANSFORMER=${XML_TRANSFORMER_ARG};
else
XML_TRANSFORMER=${XML_TRANSFORMER_DEFAULT};
fi
if [ -n "${XSL_ARG}" ]; then
XSL=${XSL_ARG};
else
XSL=${XSL_DEFAULT};
fi
if [ -f ${XML_SCHEMA} ] ; then
if [ -f ${XML_TRANSFORMER} ] ; then
java -jar "${XML_TRANSFORMER}" -xsl:"${XSL}" -s:"${XML_SCHEMA}"
else
echo -e "\n file does not exist: ${XML_TRANSFORMER}\n"
fi
else
echo -e "\n file does not exist: ${XML_SCHEMA}\n"
fi
else
help
fi
|
gmoyanollc/xml-schema-content-extractor
|
bin/extract-content.sh
|
Shell
|
mit
| 1,080 |
#!/bin/bash
# bundle name
bundlename="SecureBox.tar.gz"
# target dir
dirname="SecureBox/distrib"
mkdir $dirname
cd ..
find SecureBox/src -name "*.py" | xargs tar -cvzf "$dirname/$bundlename" SecureBox/README.md SecureBox/LICENSE SecureBox/requirements.txt SecureBox/install_macosx.sh
|
americanpezza/SecureBox
|
create_bundle.sh
|
Shell
|
mit
| 288 |
#!/usr/bin/env bash
curl -s "https://get.sdkman.io" | bash
source "$HOME/.sdkman/bin/sdkman-init.sh"
|
andue/dotfiles
|
sdkman.sh
|
Shell
|
mit
| 102 |
#!/bin/bash
VERSION=0.0.10
BASE_PATH=$(cd `dirname $0`; cd ..; pwd)
POM=$BASE_PATH/pom.xml
POM_VERSION=`awk '/<version>[^<]+<\/version>/{gsub(/<version>|<\/version>/,"",$1);print $1;exit;}' $POM`
PARAM=$1
AUTO_VERSION=false
PROJECT_ID=
PROJECT=
TOKEN=
TARGET_FILE=
URL_UPLOAD=
URL_RELEASE=
function usage() {
echo "Yegoo Co.ltd"
echo "Deploy version $VERSION"
echo "Usage: deploy [version(x.x.x.x)]"
if [ "$1" != "" ]; then
echo -e $1
fi
exit 1
}
function newVersionFromPom() {
LEFT_VERSION=$1
OV=${LEFT_VERSION%%.*}
LEFT_VERSION=${LEFT_VERSION#*$OV.}
TV=${LEFT_VERSION%%.*}
LEFT_VERSION=${LEFT_VERSION#*$TV.}
SV=${LEFT_VERSION%%.*}
LEFT_VERSION=${LEFT_VERSION#*$SV.}
FVT=${LEFT_VERSION%%.*}
FV=${FVT%-*}
echo $OV.$TV.$SV.$(($FV+1))-SNAPSHOT
}
BANNER='
_____ _
| __ \ | |
| | | | ___ _ __ | | ___ _ _
| | | |/ _ \ _ \| |/ _ \| | | |
| |__| | __/ |_) | | (_) | |_| |
|_____/ \___| .__/|_|\___/ \__, |
| | __/ |
|_| |___/
'
echo -e "$BANNER"
if [ "$PARAM" == "rollback" ]; then
echo "Rollback "
mvn versions:revert -f $POM
exit 0
fi
if [ "$PARAM" == "" ]; then
PARAM=$POM_VERSION
AUTO_VERSION=true
fi
if [[ "$PARAM" =~ ^[0-9]+.[0-9]+.[0-9]+.[0-9]+(-SNAPSHOT)?$ ]]; then
OLD_VERSION=${PARAM%-*}
NEW_VERSION=`newVersionFromPom $OLD_VERSION`
read -p "Are you sure to continue (release version $OLD_VERSION, new version $NEW_VERSION)? [Y/N]" yesOrNo
case $yesOrNo in
[yY]*)
echo "Update release version $OLD_VERSION"
mvn versions:set -DnewVersion=$OLD_VERSION -f $POM
mvn package -DskipTests -f $POM
if [[ $? -eq '0' ]]; then
ret=$(curl -s --request POST --header "PRIVATE-TOKEN: $TOKEN" --form "file=@$TARGET_FILE" $URL_UPLOAD)
markdown1=${ret#*markdown\":\"}
msg=${markdown1:0:-2}
git tag -a v$OLD_VERSION -m "$OLD_VERSION"
git push origin v$OLD_VERSION
if [[ $? -eq '0' ]]; then
git tag -d v$OLD_VERSION
echo "Update next version $NEW_VERSION"
mvn versions:set -DnewVersion=$NEW_VERSION -f $POM
mvn versions:commit -f $POM
else
mvn versions:revert -f $POM
fi
data="# Release v$OLD_VERSION
$msg"
ret=$(curl -s --request POST --header "PRIVATE-TOKEN: $TOKEN" --data "description=$data" $URL_RELEASE)
else
mvn versions:revert -f $POM
fi
;;
[nN]*)
echo "exit"
exit
;;
*)
echo "Just enter Y or N, please."
exit
;;
esac
else
if $AUTO_VERSION; then
usage "pom version error, you must init pom version x.x.x.x\nex: deploy.sh 1.0.0.1"
else
usage "input version error"
fi
fi
|
ihuanglei/scripts
|
shell/deploy-java-gitlab.sh
|
Shell
|
mit
| 3,089 |
#!/usr/bin/env bash
# Copyright:: Copyright (c) 2016 Been Kyung-yoon (http://www.php79.com/)
# License:: The MIT License (MIT)
STACK_ROOT=$( dirname $( cd "$( dirname "$0" )" && pwd ) )
source "${STACK_ROOT}/includes/function.inc.sh"
title "SELinux ๋ฅผ ๋นํ์ฑํํฉ๋๋ค."
sed -i 's/^SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# exit status code ๋ฅผ 1๋ก ๋ฐํํ์ฌ, ์ค์น๊ฐ ์ค๋จ๋๋ฏ๋ก ๊ฐ์ ๋ก 0์ผ๋ก ๋ฐํ
/usr/sbin/setenforce 0
exit 0
|
php79/stack
|
scripts/selinux-disable.sh
|
Shell
|
mit
| 474 |
#!/bin/bash
echo "CI: " $CI "TRAVIS: " $TRAVIS "TRAVIS_TAG: " $TRAVIS_TAG
echo "settting up"
./setup.sh
echo "testing"
go test
echo "golang build"
go build -o golangweb .
echo "building docker image"
VERSION="$1"
if [ -z "$1" ]
then
VERSION=0.0.0
fi
echo "version: " ${VERSION}
./build-image.sh "${VERSION}"
if [ -z "$TRAVIS" ]; then
TAG="${VERSION}"
IMAGE=tmoaaz/golangweb:${TAG}
echo "running container image:" ${IMAGE}
docker run -it --rm --name golangweb --publish 8080:8080 ${IMAGE}
echo "deleting image"
docker rmi ${IMAGE}
fi
echo "DONE"
|
tmoaaz/docker-kubernetes-demo
|
build.sh
|
Shell
|
mit
| 567 |
#!/bin/sh
exec /sbin/setuser root /usr/sbin/php5-fpm 1>>/var/log/php5-fpm.log 2>&1
|
dronemill/harmony-api
|
dockerfile_fs/etc/service/php5-fpm.sh
|
Shell
|
mit
| 84 |
#!/bin/sh
# Intended for x86_64 Linux build environment
# with native g++ compiler installed (64-bit Linux Mint 18 tested)
# works on Ubuntu MATE 18.04/20.04 LTS (binaries aren't backward compatible)
make -f simd_make_x64.mk clean
make -f core_make_x64.mk clean
# RooT demo compilation requires Xext development library in addition to g++
cd ../root
make -f RooT_make_x64.mk clean
cd ../test
|
VectorChief/QuadRay-engine
|
test/clean_linux.sh
|
Shell
|
mit
| 400 |
#!/usr/bin/env bash
sudo -E apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo -E tee /etc/apt/sources.list.d/mongodb.list
sudo -E apt-get update
sudo -E apt-get install mongodb-org
|
unixfreak0037/mwzoo
|
mwzoo/ubuntu_install_mongodb.sh
|
Shell
|
mit
| 287 |
#!/bin/bash
set -ev
node -v
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo "Skipping deploy; just doing a build."
exit 0
fi
# Deployment to firebase for master branch
if [ "$TRAVIS_BRANCH" == "master" ]; then
echo "Deploying to firebase in case of master too"
firebase deploy --token "$FIREBASE_TOKEN"
# Purge the cache if the site is successfully deployed to production
# We don't control other domains so we leave the purging details on them
node --harmony_async_await --harmony ./scripts/purger/index.js
# deploying to Github Pages the master version, in case we need it any time
node ./scripts/deploy-ghpages.js
fi
# Deploy each branch to staging, a mirror of master will be deployed on surge,
# cos it's already being deployed on firebase too
# Create an automated staging environment on one website, where we can select
# all the staging environments
surge --domain "hannan-fascinations-""$TRAVIS_BRANCH"".surge.sh" --project ./_site
|
abdulhannanali/fascinations-of-hannan
|
scripts/deploy.sh
|
Shell
|
mit
| 992 |
#!/bin/bash
# reports the distribution of lowest coverage in exons
# each exon is characterized by a bp with the lowest coverage
# distribution of theses values for
# input is dcoverage
echo "target,gene,min_coverage"
cat $1 | awk -F "\t" '{print $1"-"$2"-"$3","$4","$2+$5","$6}' | \
awk -F ',' '
BEGIN{
prev_target=0;
prev_pos=0;
prev_cov=0;
prev_gene=0;
prev_bases_below20=0;
}
{
target=$1
if (prev_target != target){
if (prev_target !=0){
print prev_target","prev_gene","prev_cov;
}
prev_target=target;
prev_cov=$4;
prev_gene=$2;
}
if ($4 < prev_cov){
prev_cov = $4
}
}
END{
print prev_target","prev_gene","prev_cov;
}'
|
naumenko-sa/bioscripts
|
bam/bam.coverage.per_exon.sh
|
Shell
|
mit
| 709 |
#!/bin/sh
# the repository name must exist in the environment
if [ -z $REPOSITORY ]; then
echo 'Missing repository name...exiting...'
exit 1
fi
# extract sources and prepare for build
tar -xf gmp-6.1.2.tar.xz
cd gmp-6.1.2
mkdir build && cd build
PREFIX=/usr/local
../configure \
--prefix=$PREFIX \
--enable-cxx=yes \
CFLAGS='-O3 -s' \
CXXFLAGS='-O3 -s -Wl,-rpath,/usr/local/amd64-linux-musl/lib64,-rpath-link,/usr/local/amd64-linux-musl/lib64'
# Calculates the optimal job count
JOBS=$(getThreadCount)
# build and install
make -j $JOBS && make -j $JOBS check && make install
# make this image a package
package
|
MetaBarj0/scripts
|
docker/share/images/gmp/build-sources.sh
|
Shell
|
mit
| 630 |
#!/bin/bash
# TO Show all Databases that will be used by backup.sh script for backup
showdb(){
influx -host "$INFLUX_HOST" -port 8086 -execute 'SHOW DATABASES'
}
DATABASES=$(showdb)
echo "$DATABASES" | sed -e 's/[\r]//g' | sed -e 's/^.\{26\}//' | sed 's/ /:/g'
|
mcci-catena/docker-ttn-dashboard
|
influxdb/showdb.sh
|
Shell
|
mit
| 265 |
cd test
rm -rf testdata
php sumr-gentestdata.php
php ../sumr.php testdata > sum_testdata2.txt
diff sum_testdata.txt sum_testdata2.txt
|
kzokojima/sumr-php
|
test.sh
|
Shell
|
mit
| 134 |
#!/bin/sh
docker run -i --volumes-from data -t hnakamur/createrepo
|
hnakamur/centos7-dockerfiles
|
createrepo/run.sh
|
Shell
|
mit
| 67 |
#! /bin/bash
source ./scripts/setenv.sh
set -ex
# Rewriting java.*
echo Generating rewritten apache library
java $JAVA_OPTS ui.RewriteAllSources --bodies -j -classpath -j $LIBCP -m rewriteinfo.txt -d rewrittenSources $JAVASTAR
echo Going to directory containing rewritten apache
cd rewrittenSources/$ANALYZEDLIBS_BASENAME/jdk1.6.0_06_src
echo Compiling generated sources
mkdir -p build
javac -d build `pathlist_recursive.py '*.java' java/util`
echo Going back to original directory
cd ../../..
|
jgaltidor/VarJ
|
src/VarJFrontend/scripts/rewrite_compile_java_bodies.sh
|
Shell
|
mit
| 497 |
#! /bin/bash
DIR=$(readlink -enq "$(dirname $0)/../ext/")
shopt -s nullglob
export NO_INTERACTION=1
export REPORT_EXIT_STATUS=1
make -C "$DIR" test
for i in $DIR/tests/*.log; do
echo "====== $i ======";
cat "$i";
done
[ -n "$(echo $DIR/tests/*.log)" ] && exit 1
exit 0
|
sjinks/psr3logger
|
.travis/run_php_tests.sh
|
Shell
|
mit
| 273 |
if $(grc &>/dev/null)
then
# alias ls="grc ls"
alias ping="grc ping"
alias traceroute="grc traceroute"
alias configure="grc configure"
alias gcc="grc gcc"
alias make="grc make"
alias netstat="grc netstat"
alias diff="grc diff"
alias wdiff="grc wdiff"
alias last="grc last"
alias mount="grc mount"
alias mtr="grc mtr"
alias ps="grc ps"
alias dig="grc dig"
# alias ifconfig="grc ifconfig"
fi
if [[ `uname` == 'Darwin' ]]
then
alias emacs="subl -w"
alias emasc="subl -w"
# alias cat="colorize"
# alias less="colorize"
else
alias emacs="emacs -nw"
alias emcas="emacs -nw"
alias emasc="emacs -nw"
fi
|
stiang/dotfiles
|
system/aliases.zsh
|
Shell
|
mit
| 641 |
#!/bin/sh
COLLECTORDEMO_TESTS=
export COLLECTORDEMO_TESTS
COLLECTORDEMO_HOME="$(dirname $0)"
. "${COLLECTORDEMO_HOME}"/etc/common
cd "${COLLECTORDEMO_HOME}"
TARGET="$@"
TARGET="${TARGET:-develop}"
"${COLLECTORDEMO_BIN}"/python.sh setup.py -q ${TARGET}
[ $? != 0 ] && echo "ERROR!!!" && exit 1
"${COLLECTORDEMO_BIN}"/django-migrate.sh
"${COLLECTORDEMO_BIN}"/django-load.sh
exit 0
# Local Variables:
# indent-tabs-mode: nil
# End:
# vim: ai et sw=4 ts=4
|
rentalita/django-collectordemo
|
build.sh
|
Shell
|
mit
| 460 |
mkdir -p tmp/pids
nohup bundle exec rake environment resque:work QUEUE=post_receive,mailer,system_hook RAILS_ENV=production PIDFILE=tmp/pids/resque_worker.pid > ./log/resque.log &
|
RaymondChou/Gitlab_Chinese
|
resque.sh
|
Shell
|
mit
| 181 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.