[root@mallarme]# qmgr -c 'print server'
#
# Create queues and set their attributes.
#
#
# Create and define queue dteam
#
create queue dteam
set queue dteam queue_type = Execution
set queue dteam resources_max.cput = 48:00:00
set queue dteam resources_max.walltime = 72:00:00
set queue dteam acl_group_enable = True
set queue dteam acl_groups = dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam acl_groups += dteam
set queue dteam enabled = True
set queue dteam started = True
#
# Create and define queue lhcb
#
create queue lhcb
set queue lhcb queue_type = Execution
set queue lhcb resources_max.cput = 48:00:00
set queue lhcb resources_max.walltime = 72:00:00
set queue lhcb acl_group_enable = True
set queue lhcb acl_groups = lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb acl_groups += lhcb
set queue lhcb enabled = True
set queue lhcb started = True
#
# Create and define queue biomed
#
create queue biomed
set queue biomed queue_type = Execution
set queue biomed resources_max.cput = 48:00:00
set queue biomed resources_max.walltime = 72:00:00
set queue biomed acl_group_enable = True
set queue biomed acl_groups = biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed acl_groups += biomed
set queue biomed enabled = True
set queue biomed started = True
#
# Create and define queue geant4
#
create queue geant4
set queue geant4 queue_type = Execution
set queue geant4 resources_max.cput = 48:00:00
set queue geant4 resources_max.walltime = 72:00:00
set queue geant4 acl_group_enable = True
set queue geant4 acl_groups = geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 acl_groups += geant4
set queue geant4 enabled = True
set queue geant4 started = True
#
# Set server attributes.
#
set server scheduling = True
set server acl_host_enable = False
set server managers = [log in to unmask]
set server operators = [log in to unmask]
set server default_queue = dteam
set server log_events = 511
set server mail_from = adm
set server query_other_jobs = True
set server scheduler_iteration = 600
set server node_ping_rate = 300
set server node_check_rate = 600
set server default_node = lcgpro
set server node_pack = False
D "please, help me with geant4" G
Jeff Templon wrote:
> Hi
>
> If you do a
>
> qmgr -c 'print server'
>
> what does it say about the acl_groups on the queues?
>
> JT
>
> David Garcia Aristegui wrote:
>
>> I think i have discovered the error
>> more test.sh
>> #!/bin/sh
>> echo hola
>>
>> VO biomed
>> [root@mallarme biomedsgm]# su - biomedsgm
>> [biomedsgm@mallarme biomedsgm]$ qsub -q biomed test.sh
>> 9881.mallarme.cnb.uam.es
>>
>> VO geant4
>> [root@mallarme biomedsgm]# su - geantsgm
>> [geantsgm@mallarme geantsgm]$ qsub -q geant4 test.sh
>> qsub: Unauthorized Request
>>
>> How can i fix this? i mean, i want to allow the geant pool acccount
>> to use the geant4 queue.
>>
>> [root@mallarme var]# cd /opt/edg/var/info/
>> [root@mallarme info]# ls -l
>> total 16
>> drwxr-xr-x 2 biomedsgm biomed 4096 Oct 18 16:50 biomed
>> drwxr-xr-x 2 dteamsgm dteam 4096 Nov 26 23:06 dteam
>> drwxr-xr-x 2 geantsgm geant 4096 Nov 25 15:36 geant4
>> drwxr-xr-x 2 lhcbsgm lhcb 4096 Oct 18 16:50 lhcb
>>
>> [root@mallarme info]# cd /var/spool/pbs/server_priv/queues
>> [root@mallarme queues]# ls -l
>> total 16
>> -rw------- 1 root root 791 Dec 7 12:32 biomed
>> -rw------- 1 root root 711 Dec 7 12:32 dteam
>> -rw------- 1 root root 711 Dec 7 12:32 geant4
>> -rw------- 1 root root 794 Dec 7 12:32 lhcb
>>
>> [root@mallarme queues]# cd /var/spool/pbs/server_priv/acl_groups
>> [root@mallarme acl_groups]# ls -l
>> total 16
>> -rw------- 1 root root 133 Dec 7 12:32 biomed
>> -rw------- 1 root root 114 Dec 7 12:32 dteam
>> -rw------- 1 root root 77 Dec 7 12:32 geant4
>> -rw------- 1 root root 95 Dec 7 12:32 lhcb
>>
>> Everything seems to be correct. Any ideas?
>> Thankx.
>>
>> Dan Schrager wrote:
>>
>>> Hi David,
>>>
>>> Let me tell you how I have recently added VO geant4 to WEIZMANN-LCG2:
>>>
>>> 1)add geant4 to the VOS line in yaim def. file
>>> VOS="atlas alice lhcb cms dteam sixt zeus see geant4"
>>>
>>> 2)add this section in the yaim def. file:
>>> VO_GEANT4_SW_DIR=$VO_SW_DIR/geant4
>>> VO_GEANT4_DEFAULT_SE=$SE_HOST
>>> VO_GEANT4_SGM="x"
>>> VO_GEANT4_VOMS_SERVERS="vomss://lcg-voms.cern.ch:8443/voms/geant4?/geant4"
>>>
>>> VO_GEANT4_STORAGE_DIR=$CE_CLOSE_SE1_ACCESS_POINT/geant4
>>> VO_GEANT4_QUEUES="geant4"
>>>
>>> the "x" is a trick bought from ROLLOUT, needed.
>>>
>>> 3)add to users.conf a section like that:
>>> 30000:geant4sgm:2088:geant4:geant4:sgm:
>>> 30001:geant4001:2088:geant4:geant4::
>>> 30002:geant4002:2088:geant4:geant4::
>>> ...
>>> 30050:geant4050:2088:geant4:geant4::
>>>
>>> 2088 is the new group number I chose for geant4; 30000-30050 is my
>>> chosen range of geant4 uids.
>>>
>>> 4) Redo the configuration step of yaim for all nodes.(use scripts...)
>>>
>>> Regards,
>>> Dan
>>>
>>>
>>> David Garcia Aristegui wrote:
>>>
>>>> Yes, we are a YAIM compliant site. Maui was restarted, and we have
>>>> the same error.
>>>> The lcgpbs.rvf is
>>>> [root@mallarme root]# more
>>>> /opt/globus/share/globus_gram_job_manager/lcgpbs.rvf
>>>> Attribute: email_address
>>>> Description: "Set the email address to receive notifications. See the
>>>> email_on_abort, email_on_execution, and emailontermination
>>>> attributes."
>>>> ValidWhen: GLOBUS_GRAM_JOB_SUBMIT
>>>> (...)
>>>> Attribute: queue
>>>> Values: dteam lhcb biomed geant4
>>>>
>>>> Ideas? Thank you very much.
>>>>
>>>> Steve Traylen wrote:
>>>>
>>>>> On Fri, Dec 09, 2005 at 11:56:50AM +0100 or thereabouts, David
>>>>> Garcia Aristegui wrote:
>>>>>
>>>>>
>>>>>> In my site, with a LCG2.6 Torque CE
>>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>> Hi David,
>>>>>
>>>>>
>>>>>
>>>>>> [root@mallarme maui]# qstat -Q
>>>>>> Queue Max Tot Ena Str Que Run Hld Wat Trn Ext Type
>>>>>> ---------------- --- --- --- --- --- --- --- --- --- --- ----------
>>>>>> dteam 0 0 yes yes 0 0 0 0 0 0 Execution
>>>>>> lhcb 0 1 yes yes 0 1 0 0 0 0 Execution
>>>>>> biomed 0 0 yes yes 0 0 0 0 0 0 Execution
>>>>>> geant4 0 0 yes yes 0 0 0 0 0 0 Execution
>>>>>>
>>>>>> But the geant4 jobs never run in a WN, but we have no problems at
>>>>>> all with the othe VO jobs (dteam, lhcb, biomed)
>>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>> If you add a queue you should probably restart maui. But also the
>>>>> file
>>>>> in /opt/globus/share/globus_gram_job_manager/lcgpbs.rvf
>>>>> must contain the new queue which is create by
>>>>> /opt/globus/sbin/globus-initalization.sh which is called by YAIM
>>>>> if you are using that.
>>>>>
>>>>> Steve
>>>>>
>>>>>
>>>>>> *************************************************************
>>>>>> BOOKKEEPING INFORMATION:
>>>>>>
>>>>>> Status info for the Job :
>>>>>> https://gdrb03.cern.ch:9000/L9m-YZ5oaOIilL-jra_0Ww
>>>>>> Current Status: Aborted Status Reason: Job RetryCount
>>>>>> (3) hit
>>>>>> Destination:
>>>>>> mallarme.cnb.uam.es:2119/jobmanager-lcgpbs-geant4
>>>>>> reached on: Wed Dec 7 23:20:09 2005
>>>>>> *************************************************************
>>>>>>
>>>>>>
>>>>>> What should i check to fix this?
>>>>>>
>>>>>> Thankx.
>>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>
>>>> +++++++++++++++++++++++++++++++++++++++++++
>>>> This Mail Was Scanned By Mail-seCure System
>>>> at the Tel-Aviv University CC.
>>>
>>>
>>>
>>>
>>>
>
>
|