#!/bin/tcsh 

# start at a generic run script for the mpi version.  this should probably
# end up in the shell scripts directory - but it is here for now.  nsc.

# this code shamelessly stolen from tim.
#=============================================================================
# This block of directives constitutes the preamble for the LSF queuing system
# LSF is used on the IBM   Linux cluster 'lightning'
# LSF is used on the IMAGe Linux cluster 'coral'
# LSF is used on the IBM   'bluevista'
# The queues on lightning and bluevista are supposed to be similar.
#
# the normal way to submit to the queue is:    bsub < runme_filter
#
# an explanation of the most common directives follows:
# -J Job name (master script job.csh presumes filter_server.xxxx.log)
# -o STDOUT filename
# -e STDERR filename
# -P      account
# -q queue    cheapest == [standby, economy, (regular,debug), premium] == $$$$
# -n number of processors  (really)
# -W hh:mm  execution time (must be specified on some hosts)
##=============================================================================
#BSUB -J filter
#BSUB -o filter.%J.log
#BSUB -q standby
#BSUB -n 4
#BSUB -W 00:30
#
#
##=============================================================================
## This block of directives constitutes the preamble for the PBS queuing system
## PBS is used on the CGD   Linux cluster 'bangkok'
## PBS is used on the CGD   Linux cluster 'calgary'
## 
## the normal way to submit to the queue is:    qsub runme_filter
## 
## an explanation of the most common directives follows:
## -N     Job name
## -r n   Declare job non-rerunable
## -e <arg>  filename for standard error
## -o <arg>  filename for standard out 
## -q <arg>   Queue name (small, medium, long, verylong)
## -l nodes=xx:ppn=2   requests BOTH processors on the node. On both bangkok 
##                     and calgary, there is no way to 'share' the processors
##                     on the node with another job, so you might as well use
##                     them both.  (ppn == Processors Per Node)
##=============================================================================
#PBS -N filter
#PBS -r n
#PBS -e filter.err
#PBS -o filter.log
#PBS -q medium
#PBS -l nodes=4:ppn=2

# A common strategy for the beginning is to check for the existence of
# some variables that get set by the different queuing mechanisms.
# This way, we know which queuing mechanism we are working with,
# and can set 'queue-independent' variables for use for the remainder
# of the script.

if ($?LS_SUBCWD) then

   # LSF has a list of processors already in a variable (LSB_HOSTS)
   # submit this script with:  bsub < runme_filter

   mpirun.lsf ./filter
   

else if ($?PBS_O_WORKDIR) then

   # PBS has a list of processors in a file whose name is (PBS_NODEFILE)
   # submit this script with:  qsub runme_filter

   mpirun ./filter

else if ($?NODEFILE) then

   # no batch system; use mpirun but supply a node list file.
   # to get into this section you must have first set NODEFILE
   # to the name of a file and fill it with the local node names:
   #  setenv NODEFILE  ~/nodelist
   #  echo "node7:2" > $NODEFILE
   #  echo "node5:2" >> $NODEFILE
   #  echo "node3:2" >> $NODEFILE
   #  echo "node1:2" >> $NODEFILE
   # then just run this script:  ./runme_filter

   mpirun -np 4 -nolocal -machinefile $NODEFILE ./filter

else

   # interactive - assume you are using 'lam-mpi' and that you have
   # already run 'lamboot' once to start the lam server, or mpich
   # and 'mpd' is running.  run this script:  ./runme_filter

   mpirun -np 4 ./filter

endif

