I'm having similar problems at Birmingham as Mark at Durham with the
installation: release 2.1.8, same symptoms relating to fstab.
cp: cannot create regular file /root//var/obj/conf/fstab/fstab.hda: no
such file or directory
...
LCFG object fstab: Missing fstab template for hda
(/var/obj/conf/fstab/fstab.hda)
FAILED
I'll attach my site config in case it's the cause of the problems.
Lawrie.
--
Tel: 0121 414 4621 Fax: 0121 414 6709 Email: [log in to unmask]
/*
site-cfg.h
==================================================
EXAMPLE SITE SPECIFIC CONFIGURATION
*/
/* Set the root password for all nodes controlled by the server.
If you wish to set a different password for a particular node, set
this value in the machine configuration file.
To generate a value for this field, do the following:
openssl passwd
and then put the printed value below.
*/
+auth.rootpwd -secret-
/* SOURCE TREE LOCATIONS --------------------------------------------------
------------------------------------------------------------------------- */
/* Define the root locations of the Globus tree and the EDG tree. These
are used in many configuration files and for setting the ld.so.conf
libraries. NOTE: the underscore at the end of the define. Used to avoid
confusion with the GLOBUS_LOCATION and EDG_LOCATION tags in configuration
files. */
#define GLOBUS_LOCATION_ /opt/globus
#define EDG_LOCATION_ /opt/edg
#define EDG_LOCATION_VAR_ EDG_LOCATION_/var
#define EDG_LOCATION_TMP_ /tmp
/* COMMON GRID DEFINITIONS ------------------------------------------------
--------------------------------------------------------------------------- */
/* This is a space-separated list of the subject names of all of the grid's
trusted brokers. Each subject name MUST be enclosed in double quotes.
This is used by the MyProxy server to recognize from which brokers to
allow proxy renewal. */
#define GRID_TRUSTED_BROKERS "/O=Grid/O=CERN/OU=cern.ch/CN=host/lxshare0403.cern.ch"
/* COMMON SITE DEFINITIONS ------------------------------------------------
--------------------------------------------------------------------------- */
/* CE AND SE HOST NAMES. These are defined here because they are used in
some of the site definitions. */
/* ComputingElement hostname */
#define CE_HOSTNAME epcf36.ph.bham.ac.uk
/* StorageElement hostname */
#define SE_HOSTNAME epcf37.ph.bham.ac.uk
#define SITE_LOCALDOMAIN ph.bham.ac.uk
#define SITE_MAILROOT [log in to unmask]
#define SITE_GATEWAYS 147.188.46.1
/* Allowed networks (useful for tcpwrappers) */
#define SITE_ALLOWED_NETWORKS 127.0.0.1, 147.188.46., 147.188.47.
#define SITE_NAMESERVERS 147.188.128.2 147.188.128.102
/* The netmask, network and broadcast should be set explicitly. */
/* Change the (CERN) example values below. */
#define SITE_NETMASK 255.255.255.0
#define SITE_NETWORK 147.188.46.0
#define SITE_BROADCAST 147.188.46.255
/* NTP server (domain and hostname) */
#define SITE_NTP_HOSTNAME ntp0.bham.ac.uk ntp1.bham.ac.uk
/* The time zone */
#define SITE_TIMEZONE Europe/London
/* Site name. */
#define SITE_NAME_ SITE_LOCALDOMAIN
/* Site EDG version */
#define SITE_EDG_VERSION v2_1_8
/* Site installation date year month day time */
#define SITE_INSTALLATION_DATE_ 20040114093000Z
/* Site distinguished name. */
#define SITE_DN_ \"dc=ph, dc=bham, dc=ac, dc=uk, o=Grid\"
#define SITE_LCFG_SERVER epac2.ph.bham.ac.uk
/* A GridFTP server is not normally needed on the gatekeeper and so is
not started by default. Defining the macro below will start a
gridftp server. */
/* #define CE_RUNS_GRIDFTP */
/* Set this variable if you have your poolaccount home directories on
an external server. This avoids the setup of the poolaccount and
home directories on the CE in this case. */
/* #define CE_USES_EXTERNAL_HOMEDIRS */
/* NFS access control is done via host names. The gridmapdir and home
areas are typically exported from the CE (gatekeeper) and the SE
cache area is exported from the SE.
The definitions below are appropriate for sites with one SE and one
CE. Sites with multiple SEs or CEs will have to modify the
individual configuration files (as well as the definitions below).
The actual NFS options lists are correct if the *HOSTS values can
be expressed as a single wildcarded value. If this is not the
case, then the ACLs will have to be specified manually. */
#define SITE_CE_HOSTS CE_HOSTNAME
#define SITE_SE_HOSTS SE_HOSTNAME
#define SITE_WN_HOSTS epcf3[345].ph.bham.ac.uk
#define SITE_NFS_ACL_FROM_CE SITE_SE_HOSTS(rw,no_root_squash) SITE_WN_HOSTS(rw,no_root_squash)
#define SITE_NFS_ACL_FROM_SE SITE_CE_HOSTS(rw,no_root_squash) SITE_WN_HOSTS(rw,no_root_squash)
/* The default configuration of MDS is that there is a GRIS running on
each functional node (CE, SE). There is a single site-level GIIS
running be default on the CE. This site-level GIIS then registers to
the top-level GIIS for the production or development testbed. The
details are handled via the globuscfg configuration object. */
/* Usually use a name like nikhefpro or nikhefdev for the production
or development testbeds. */
#define SITE_GIIS bham
#define SITE_GIIS_HOSTNAME CE_HOSTNAME
/* These point to the next highest level in the MDS hierarchy. Ask to
find out the parameters for this. At time of tagging these were:
edgdev on lxshare0372.cern.ch for DEVELOPMENT Testbed
edgpro on lxshare0373.cern.ch for PRODUCTION (Application) Testbed
but DO ask to be sure.*/
#define TOP_GIIS none
#define TOP_GIIS_HOSTNAME none.cern.ch
#define SITE_GIIS_ON_CE 1
/* LSL Check ??? */
#define COUNTRY_GIIS_ON_CE 1
/* Usually the R-GMA server and fabric monitoring are put on the same
node. Change the lines below if this is not the case for your
site. */
#define SITE_MON_HOST epcf38.ph.bham.ac.uk
/* You MUST use the quotes and space for the value. If you use a
comma, PBS job submissions will fail! */
#define SITE_GLOBUS_TCP_RANGE "50000 52000"
/* The location of the site GOUT daemon. (Usually on MON box.) */
#define SITE_GOUT_HOST SITE_MON_HOST
#define SITE_GOUT_PORT 2169
/* SITE BDII -------------------------------------------------------
--------------------------------------------------------------------------- */
/* The information concerning the BDII associated with the resource
broker. If you run a RB at your site you should also run a
BDII. The password is the result of running slappasswd on the plain
text password. The quotes are important. */
/* LSL Check ???? */
#define SITE_BDII_HOST bdii.example.org
#define SITE_BDII_PORT 2170
#define SITE_BDII_PASSWD \"{SSHA}919X1lDSoORGrx1dJXdve7KUsmER0704\"
#define SITE_BDII_PASSWD_PLAIN something
/* SITE R-GMA -------------------------------------------------------
--------------------------------------------------------------------------- */
#define GRID_RGMA_INFO_CATALOG gppic06.gridpp.rl.ac.uk
#define SITE_RGMA_SERVER SITE_MON_HOST
/* Uncomment this line, if a GOUT server is desired. You only need
one if you are running an RB at your site and want to attach it to
RGMA. */
/* #define RGMA_RUN_GOUT */
/* Put in here the fully qualified name of your site's network
monitoring machine. If you have no such machine then use the
default below. */
#define NETMON_HOST SITE_MON_HOST
/* SITE MONITORING --------------------------------------------------
--------------------------------------------------------------------------- */
/* Comment this out if site fabric monitoring is not desired. */
/* Must modify the BASE-rpm file to enable or disable the fabric monitoring. */
#define SITE_FABRIC_MON_SERVER SITE_MON_HOST
/* COMMON DEFAULT VALUES --------------------------------------------------
--------------------------------------------------------------------------- */
/* This defines the default location for the host certificates. If
this is different for your site define the new value here. If you
need to change it for the CE or SE separately, see below. */
#define SITE_DEF_GRIDSEC_ROOT /etc/grid-security
#define SITE_DEF_HOST_CERT SITE_DEF_GRIDSEC_ROOT/hostcert.pem
#define SITE_DEF_HOST_KEY SITE_DEF_GRIDSEC_ROOT/hostkey.pem
#define SITE_DEF_GRIDMAP SITE_DEF_GRIDSEC_ROOT/grid-mapfile
#define SITE_DEF_GRIDMAPDIR /share/grid-security/gridmapdir/
/* LSL commented as these are not in the RAL example
#define SITE_DEF_CERTDIR SITE_DEF_GRIDSEC_ROOT/certificates/
#define SITE_DEF_VOMSDIR SITE_DEF_GRIDSEC_ROOT/vomsdir/
#define SITE_DEF_WEBSERVICES_CERT SITE_DEF_GRIDSEC_ROOT/tomcatcert.pem
#define SITE_DEF_WEBSERVICES_KEY SITE_DEF_GRIDSEC_ROOT/tomcatkey.pem
LSL */
/* RLS PARAMETERS --------------------------------------------------------
--------------------------------------------------------------------------- */
#define RLS_LRC_ALICE_USER edglrc_alice
#define RLS_LRC_ATLAS_USER edglrc_atlas
#define RLS_LRC_CMS_USER edglrc_cms
#define RLS_LRC_LHCB_USER edglrc_lhcb
#define RLS_LRC_DZERO_USER edglrc_dzero
#define RLS_LRC_BIOM_USER edglrc_biom
#define RLS_LRC_EOBS_USER edglrc_eobs
#define RLS_LRC_WPSIX_USER edglrc_wpsix
#define RLS_LRC_ITEAM_USER edglrc_iteam
#define RLS_LRC_TUTOR_USER edglrc_tutor
#define RLS_LRC_BABAR_USER edglrc_babar
#define RLS_LRC_DTEAM_USER edglrc_dteam
/* These passwords are local to your site; choose anything you'd like. */
#define RLS_LRC_ALICE_PASSWORD
#define RLS_LRC_ATLAS_PASSWORD
#define RLS_LRC_CMS_PASSWORD
#define RLS_LRC_LHCB_PASSWORD
#define RLS_LRC_DZERO_PASSWORD
#define RLS_LRC_BIOM_PASSWORD
#define RLS_LRC_EOBS_PASSWORD
#define RLS_LRC_WPSIX_PASSWORD
#define RLS_LRC_ITEAM_PASSWORD
#define RLS_LRC_TUTOR_PASSWORD
#define RLS_LRC_BABAR_PASSWORD
#define RLS_LRC_DTEAM_PASSWORD
#define RLS_RMC_ALICE_USER edgrmc_alice
#define RLS_RMC_ATLAS_USER edgrmc_atlas
#define RLS_RMC_CMS_USER edgrmc_cms
#define RLS_RMC_LHCB_USER edgrmc_lhcb
#define RLS_RMC_DZERO_USER edgrmc_dzero
#define RLS_RMC_BIOM_USER edgrmc_biom
#define RLS_RMC_EOBS_USER edgrmc_eobs
#define RLS_RMC_WPSIX_USER edgrmc_wpsix
#define RLS_RMC_ITEAM_USER edgrmc_iteam
#define RLS_RMC_TUTOR_USER edgrmc_tutor
#define RLS_RMC_BABAR_USER edgrmc_babar
#define RLS_RMC_LCG_USER edgrmc_lcg
/* These passwords are local to your site; choose anything you'd like. */
#define RLS_RMC_ALICE_PASSWORD -secret-
#define RLS_RMC_ATLAS_PASSWORD -secret-
#define RLS_RMC_CMS_PASSWORD -secret-
#define RLS_RMC_LHCB_PASSWORD -secret-
#define RLS_RMC_DZERO_PASSWORD -secret-
#define RLS_RMC_BIOM_PASSWORD -secret-
#define RLS_RMC_EOBS_PASSWORD -secret-
#define RLS_RMC_WPSIX_PASSWORD -secret-
#define RLS_RMC_ITEAM_PASSWORD -secret-
#define RLS_RMC_TUTOR_PASSWORD -secret-
#define RLS_RMC_BABAR_PASSWORD -secret-
#define RLS_RMC_DTEAM_PASSWORD -secret-
#define RLS_EMAIL SITE_MAILROOT
/* This contains a comma-separated list of storage elements on the
site. If you've more than one you'll have to change this value
from the default. */
#define RLS_LRC_SE_HOSTS SE_HOSTNAME
/* NOTE: There is 1 RLS per VO. To set the VO supported by a particular
RLS server, the RLS-cfg.h file must be edited. Sorry. */
/* DATA MGT PARAMETERS FOR SEVERAL NODE TYPES ----------------------------
--------------------------------------------------------------------------- */
/* These variables define which VOs your site supports. At least one
must be defined. It will create 50 accounts for each defined VO.
The file /opt/edg/etc/mkgridmap.conf on your CE and SE must also be modified
accordingly.
*/
#define SE_VO_ALICE
#define SE_VO_ATLAS
#define SE_VO_CMS
#define SE_VO_LHCB
#define SE_VO_DZERO
#define SE_VO_BIOM
#define SE_VO_EOBS
#define SE_VO_WPSIX
#define SE_VO_ITEAM
#define SE_VO_TUTOR
#define SE_VO_BABAR
/* DTEAM VO must be commented */
/* #define SE_VO_DTEAM */
/* These cause a central LRC catalog for the given VO to be
configured. Only uncomment a line here, if you are running the
official LRC for a virtual organization. */
/* #define LRC_VO_ALICE */
/* #define LRC_VO_ATLAS */
/* #define LRC_VO_CMS */
/* #define LRC_VO_LHCB */
/* #define LRC_VO_DZERO */
/* #define LRC_VO_BIOM */
/* #define LRC_VO_EOBS */
/* #define LRC_VO_WPSIX */
/* #define LRC_VO_ITEAM */
/* #define LRC_VO_TUTOR */
/* #define LRC_VO_BABAR */
/* COMPUTING ELEMENT DEFINITIONS ------------------------------------------
--------------------------------------------------------------------------- */
/* ComputingElement hostname. CE_HOSTNAME is DEFINED ABOVE. */
/* Define the batch system used for the CE. Only ONE in std. config! */
#define CE_LRMS_PBS
/* #define CE_LRMS_LSF */
/* #define CE_LRMS_CONDOR */
/* Setup variables for different batch systems. */
/* With PBS the WP4 resource management may also be used. */
#ifdef CE_LRMS_PBS
#define CE_LRMS_NAME pbs
#define CE_USE_RTCS 0
#endif
/* LSF requires manual configuration. */
#ifdef CE_LRMS_LSF
#define CE_LRMS_NAME lsf
#endif
/* Condor requires manual configuration. */
#ifdef CE_LRMS_CONDOR
#define CE_LRMS_NAME condor
#endif
/* This defines the worker nodes for the site. (Used by GRM only
at the moment. This can be removed by uncommenting the line
CE_RUN_GRM.) */
/* #define CE_WORKER_NODES wn01.example.org wn02.example.org */
/* If the batch commands are not in the system default path, then you
must set the following with the necessary paths. */
/* #define CE_LRMS_PATHS /some/path /some/additional/path */
/* Full path of the certificate */
#define CE_CERT_PATH SITE_DEF_HOST_CERT
/* Full path of the secret key */
#define CE_SECKEY_PATH SITE_DEF_HOST_KEY
/* System administrator e-mail */
#define CE_SYSADMIN SITE_MAILROOT
/* Local queue names. This is a space-separated list of queue names. */
#define CE_QUEUES S M L
/* UGLY HACK TO GET GATEKEEPERS PUBLISHED (up to CE_QUEUE10 can be used) */
#define CE_QUEUE01 S
#define CE_QUEUE02 M
#define CE_QUEUE03 L
/* #define CE_QUEUE04 infinite */
/* Mount point(s) of the SE(s) close to this CE */
#define CE_CLOSE_SE_MOUNTPOINT /flatfiles/SE00
/* CPU model */
#define CE_IP_PROCESSMODEL P4
/* CPU vendor */
#define CE_IP_PROCESSVENDOR intel
/* CPU speed */
#define CE_IP_PROCESSSPEED 2000
/* CE Operating System */
#define CE_IP_OS Redhat
/* CE Operating System Release */
#define CE_IP_OS_RELEASE 7.3
/* CE InformationProviders: MinPhysMemory */
#define CE_IP_MINPHYSMEM 512
/* CE InformationProviders: MinVirtMemory */
#define CE_IP_MINVIRTMEM 1024
/* CE InformationProviders: SMPSize (number of cpus in an SMP box) */
#define CE_IP_SMPSIZE 2
/* CE InformationProviders: For some examples of SpecInt at
http://www.specbench.org/osg/cpu2000/results/cint2000.html */
/* CE InformationProviders: SpecInt 2000 */
#define CE_IP_SI00 756
/* CE InformationProviders: SpecFloat 2000 */
#define CE_IP_SF00 764
/* CE InformationProviders: OutboundIP */
#define CE_IP_OUTBOUNDIP TRUE
/* CE InformationProviders: InboundIP */
#define CE_IP_INBOUNDIP FALSE
/* CE InformationProviders: RunTimeEnvironment
Add or delete appropriate tags.
EDG-TEST should be defined for your CE; it indicates that your site
is running but hasn't yet been certified. Change this to
EDG-CERTIFIED once your site has been tested by the ITeam. */
#define CE_IP_RUNTIMEENV EDG-TEST /* EDG-CERTIFIED */ CMS-1.1.0 ATLAS-3.2.1 ALICE-3.07.01 LHCb-1.1.1 IDL-5.4 CMSIM-125 ALICE-3.09.05 ALIEN-1.29.9 /* MSS-AVAILABLE */ POVRAY-3.1 DEMTOOLS RAL-PRO
/* The mountpoint on the CE of the SE exported area via NFS */
#define CE_MOUNTPOINT_SE_AREA CE_CLOSE_SE_MOUNTPOINT
/* This will setup the daemons necessary to run GRM on your site.
This is a package which allows users to monitor their application.
To remove this comment the following line. This will also setup a
tag in the run time environment list. */
/* #define CE_RUN_GRM */
/* Set this to 1 if you want to include the old MDS information providers. */
/* These are not necessary but may be included if desired. */
#define CE_USE_MDS_INFO 0
/* STORAGE ELEMENT DEFINITIONS --------------------------------------------
--------------------------------------------------------------------------- */
/* StorageElement hostname. SE_HOSTNAME is DEFINED ABOVE. */
/* Full path of the certificate */
#define SE_CERT_PATH SITE_DEF_HOST_CERT
/* Full path of the secret key */
#define SE_SECKEY_PATH SITE_DEF_HOST_KEY
/* Disk cache area for SE. */
#define SE_FILE_CACHE_AREA /flatfiles/SE00
/* When configuring the SE; there are two parameters.
1) SE_MSS - <values>: disk
rfio
ads
2) SE_MSS_PATH - <values>: "/bigdisk" <for SE type DISK>
"/castor/cern.ch/grid" <for SE type RFIO>
"" <for SE type ADS>
The PATH is where files will be moved to.
*/
/* By default a disk SE. */
#define SE_MSS disk
#define SE_MSS_PATH /bigdisk
/*
Choose between WP5 SE and gridftp SE switch between :
edg-se WP5-SE
and
disk gridftp
*/
#define SE_NAME edg-se
/* Define which protocols/ports are supported by SE.
Comment out any line not needed. (File only needs to be defined.
The value is ignored.) */
#define SE_PROTOCOL_GRIDFTP_PORT 2811
#define SE_PROTOCOL_RFIO_PORT 3147
#define SE_PROTOCOL_FILE_PORT
/* Set this to 1 if you want to include the old MDS information providers. */
/* These are not necessary but may be included if desired. */
#define SE_USE_MDS_INFO 0
/* WORKER NODE DEFINITIONS ------------------------------------------------
--------------------------------------------------------------------------- */
/* The mountpoint on the WN of the SE exported area via NFS. It should be
the same used for the SE */
#define WN_MOUNTPOINT_SE_AREA CE_MOUNTPOINT_SE_AREA
/* USER INTERFACE DEFINITIONS ---------------------------------------------
--------------------------------------------------------------------------- */
/* Resource broker */
#define UI_RESBROKER rb.example.org
/* Logging and Bookkeeping URL */
#define UI_LOGBOOK https://rb.example.org:7846
/* AFS CLIENT CONFIGURATION -----------------------------------------------
--------------------------------------------------------------------------- */
/* NOTE: AFS is not officially supported by EDG. Configurations
involving AFS are not testbed by EDG and there is no guarantee that
such configurations work. Keeping the rpm list and associated
configuration is on an available-effort basis. */
#define AFS_CELL your_cell
/* LITE INSTALLATION SUPPORT ---------------------------------------------
----------------------------------------------------------------------- */
/* By default EDG recommand full support */
/* If you set LITE to 1 DON'T FORGET BASE-rpm */
#ifndef LITE
#define LITE 0
#endif
/* AUTOFS SUPPORT ---------------------------------------------------------
--------------------------------------------------------------------------*/
/* By default autofs is not used but can be by defining AUTOFS to 1 */
#ifndef AUTOFS
#define AUTOFS 0
#endif
/* COMMON USER ACCOUNTS ---------------------------------------------------
--------------------------------------------------------------------------*/
/* Check that the defaults do not conflict with any site-specific users
or groups. If you are using the pooled accounts, edit the group and
user IDs separately in that file. (The default has VO groups in the
2000 range and VO users in the 1000 range.) */
/* Account for running information system daemons. */
#define USER_UID_EDGINFO 999
#define USER_GID_EDGINFO 999
/* Account for running workload mgt daemons. */
#define USER_UID_EDGUSER 995
#define USER_GID_EDGUSER 995
/* Account for running tomcat4 daemon. */
#define USER_UID_TOMCAT4 91
#define USER_GID_TOMCAT4 91
/* Account for running storage element daemon. */
#define USER_UID_SE 997
#define USER_GID_SE 997
/* Account for running MySQL daemon. */
#define USER_UID_MYSQL 998
#define USER_GID_MYSQL 998
/* Account for running http (apache) daemon. */
#define USER_UID_APACHE 996
#define USER_GID_APACHE 996
/* Priviledged account for RFIO. */
#define USER_UID_STAGE 994
#define USER_GID_STAGE 994
/* Resource management system. Maui and the RTCS share a group. */
#define USER_UID_MAUI 972
#define USER_UID_RTCS 971
#define USER_GID_RMS 97
/* Account for running VOMS software */
/* LSL commented as these are not in the RAL example
#define USER_UID_VOMS 993
#define USER_GID_VOMS 993
LSL */
|