PGI User Forum
 SearchSearch   MemberlistMemberlist     RegisterRegister   ProfileProfile    Log inLog in 

CUDA-x86.

Assertion failed
Goto page 1, 2  Next
 
Post new topic   Reply to topic    PGI User Forum Forum Index -> Programming and Compiling
View previous topic :: View next topic  
Author Message
jand



Joined: 17 Aug 2008
Posts: 57

PostPosted: Mon Oct 21, 2013 5:48 pm    Post subject: Assertion failed Reply with quote

Hi,

I have a problem with a derived type that gives me an error when I change one of the arrays in the derived type to allocatable:

Assertion failed in file src/mpid/common/datatype/dataloop/segment.c at line 816: 0
internal ABORT - process 3

I can't see why there should be a problem in the code:

Code:

program struct
USE MPI
IMPLICIT NONE
INTEGER,PARAMETER :: IB = 4,RP=KIND(0.0)
INTEGER,PARAMETER :: NFIELD = 30
INTEGER,PARAMETER :: NPVPERT = 5,NSTN = 30,NARFP = 1,NPV = 5, NVMX = 20
INTEGER,PARAMETER :: NDEP = 25, NRAN = 60, NDAT = 6592, src = 0
INTEGER(KIND=IB) :: ifield,rank,ierr,numtasks,isource1,isource2,tag1
INTEGER(KIND=IB) :: oldtypes(NFIELD), blockcounts(NFIELD)
INTEGER(KIND=MPI_ADDRESS_KIND) :: offsets(NFIELD)
INTEGER(KIND=IB) :: iextent,rextent,dextent
integer :: n, status(mpi_status_size)
  INTEGER :: objtype      !! Name of objtype for MPI sending
  TYPE :: objstruc
    REAL(KIND=RP),DIMENSION(NVMX,NPV)            :: voro
    REAL(KIND=RP),DIMENSION(NDEP,NRAN)           :: ffsl1
    REAL(KIND=RP),DIMENSION(NDEP,NRAN)           :: ffsl2
    REAL(KIND=RP),DIMENSION(NDEP,NRAN)           :: ffsr 
    REAL(KIND=RP),DIMENSION(NDEP,NRAN)     :: ffdelay   
    REAL(KIND=RP),DIMENSION(NSTN)                :: sdpar
    REAL(KIND=RP),DIMENSION(NARFP*NSTN)          :: arpar
    INTEGER(KIND=IB),DIMENSION(NARFP*NSTN)       :: idxar 
    REAL(KIND=RP),DIMENSION(NPVPERT-2)           :: g       
    REAL(KIND=RP),DIMENSION(NPVPERT-2)           :: gp     
    REAL(KIND=RP),DIMENSION(NPVPERT-2,NPVPERT-2) :: Chat 
    REAL(KIND=RP),DIMENSION(NPVPERT-2,NPVPERT-2) :: Chati
    REAL(KIND=RP)                                :: detChat
    REAL(KIND=RP)                                :: beta
    INTEGER(KIND=IB)                             :: k       
    REAL(KIND=RP)                                :: sd       
    REAL(KIND=RP)                                :: logL   
    REAL(KIND=RP)                                :: logPr 
    REAL(KIND=RP)                                :: lognorm
    INTEGER(KIND=IB)                             :: ipropose_bd  = 0
    INTEGER(KIND=IB)                             :: iaccept_bd   = 0
    INTEGER(KIND=IB),DIMENSION(NPV)              :: iproposevoro = 0
    INTEGER(KIND=IB),DIMENSION(NPV)              :: iacceptvoro  = 0
    REAL(KIND=RP),   DIMENSION(NPV)              :: pertsd       = 0._RP
    REAL(KIND=RP)                                :: tcmp
    REAL(KIND=RP),DIMENSION(NDAT) :: dobs 
    REAL(KIND=RP),DIMENSION(NDAT) :: drep 
    REAL(KIND=RP),DIMENSION(NDAT) :: dres 
    !REAL(KIND=RP),DIMENSION(NDAT)            :: dar   
    REAL(KIND=RP),DIMENSION(:),ALLOCATABLE    :: dar
    INTEGER(KIND=IB),DIMENSION(:),ALLOCATABLE :: NTSMP
  END TYPE objstruc

TYPE (objstruc),DIMENSION(2) :: obj

ALLOCATE( obj(1)%dar(NDAT) )
ALLOCATE( obj(2)%dar(NDAT) )

call mpi_init(ierr)
call mpi_comm_rank(mpi_comm_world, rank, ierr)
call mpi_comm_size(mpi_comm_world, numtasks, ierr)

!  Need to first figure offset by getting size of MPI_REAL etc
call MPI_TYPE_EXTENT(MPI_INTEGER, iextent, ierr)
call MPI_TYPE_EXTENT(MPI_REAL, rextent, ierr)
call MPI_TYPE_EXTENT(MPI_DOUBLE_PRECISION, dextent, ierr)

blockcounts = (/ NVMX*NPV , NDEP*NRAN,     NDEP*NRAN,  NDEP*NRAN, NDEP*NRAN,  NSTN, NARFP*NSTN, NARFP*NSTN, &
                 NPVPERT-2,  NPVPERT-2, (NPVPERT-2)**2, (NPVPERT-2)**2,          1, &
                         1,          1,        1,          1,          1,          1,  &
                         1,             1,        NPV,         NPV,            NPV,    1, &
                      NDAT,         NDAT,       NDAT,  NDAT,      NSTN /)
oldtypes(1:7)    = MPI_REAL
oldtypes(8)      = MPI_INTEGER
oldtypes(9:14)   = MPI_REAL
oldtypes(15)     = MPI_INTEGER
oldtypes(16:19)  = MPI_REAL
oldtypes(20:23)  = MPI_INTEGER
oldtypes(24:25)  = MPI_REAL
oldtypes(26:29)  = MPI_REAL
oldtypes(30)     = MPI_INTEGER
offsets(1) = 0
IF(oldtypes(1) == MPI_REAL)THEN
  offsets(2) = rextent * blockcounts(1)
ELSEIF(oldtypes(1) == MPI_INTEGER)THEN
  offsets(2) = iextent * blockcounts(1)
ENDIF
DO ifield = 3,NFIELD
  IF(oldtypes(ifield-1) == MPI_REAL)THEN
    offsets(ifield) = offsets(ifield-1) + rextent*blockcounts(ifield-1)
  ELSEIF(oldtypes(ifield-1) == MPI_INTEGER)THEN
    offsets(ifield) = offsets(ifield-1) + iextent*blockcounts(ifield-1)
  ENDIF
ENDDO
call MPI_TYPE_CREATE_STRUCT( NFIELD, blockcounts, offsets, oldtypes, objtype, ierr)
call MPI_TYPE_COMMIT(objtype, ierr)


IF(rank==src)THEN

  PRINT*,rank,'receiving'
  CALL MPI_RECV(obj(1), 1, objtype, MPI_ANY_SOURCE,MPI_ANY_TAG, MPI_COMM_WORLD, status, ierr )
  isource1 = status(MPI_SOURCE)  !! This saves the slave id for the following communication
  PRINT*,rank,'received from',isource1
  CALL MPI_RECV(obj(2), 1, objtype, MPI_ANY_SOURCE,MPI_ANY_TAG, MPI_COMM_WORLD, status, ierr )
  isource2 = status(MPI_SOURCE)  !! This saves the slave id for the following communication
  PRINT*,rank,'receiving from',isource2
  CALL MPI_SEND(obj(1), 1,objtype, isource1, rank, MPI_COMM_WORLD, ierr)
  PRINT*, 'SENDING'
  CALL MPI_SEND(obj(2), 1,objtype, isource2, rank, MPI_COMM_WORLD, ierr)
  PRINT*, 'DONE SENDING'

ELSE

  PRINT*,rank,'sending'
  tag1 = 1
  CALL MPI_SEND(obj, 1,objtype, src, tag1, MPI_COMM_WORLD, ierr)
  PRINT*,rank,'receiving'
  CALL MPI_RECV(obj, 1,objtype, src, MPI_ANY_TAG, MPI_COMM_WORLD, status, ierr )

ENDIF

call mpi_finalize(ierr)

end


Any help would be greatly appreciated.

Jan
Back to top
View user's profile
mkcolg



Joined: 30 Jun 2004
Posts: 6119
Location: The Portland Group Inc.

PostPosted: Tue Oct 22, 2013 11:47 am    Post subject: Reply with quote

Hi Jan,

The assertion looks like it's coming from your MPI library. Which version of MPI are you using? Is it the MPICH2 that we ship with the PGI CDK?

Thanks,
MAt
Back to top
View user's profile
jand



Joined: 17 Aug 2008
Posts: 57

PostPosted: Tue Oct 22, 2013 11:54 am    Post subject: Reply with quote

Hi Mat.

Thanks for your reply.

I am on OSX 10.8 and installed MPICH2 1.5 (so it's not what is shipped with PGI since I wanted to use the same MPI we are using on our cluster).

It's strange since I sometimes get segmentation faults as well when I make some arrays in that structure allocatable. However, the code runs fine when I make another array allocatable. But I can't spot the problem in the code.

Thanks, Jan
Back to top
View user's profile
mkcolg



Joined: 30 Jun 2004
Posts: 6119
Location: The Portland Group Inc.

PostPosted: Fri Oct 25, 2013 9:27 am    Post subject: Reply with quote

Hi Jan,

When I ran your program on Linux I got a seg fault that appeared to be related to how obj was being passed. Due to time constraints, I handed this off to another dev tech who was able to get the example to work when he registered the obj member's addresses into the offset array.

He was a bit confused why the second section only sends and receives "obj" instead of "obj(1)" and "obj(2)" so we updated it so they mirror each other.

Hope this helps,
Mat


Code:
% cat uf_10_22_13d.F90
program struct
 USE MPI
 IMPLICIT NONE
 INTEGER,PARAMETER :: IB = 4,RP=KIND(0.0)
 INTEGER,PARAMETER :: NFIELD = 30
 INTEGER,PARAMETER :: NPVPERT = 5,NSTN = 30,NARFP = 1,NPV = 5, NVMX = 20
 INTEGER,PARAMETER :: NDEP = 25, NRAN = 60, NDAT = 6592, src = 0
 INTEGER(KIND=IB) :: ifield,rank,ierr,numtasks,isource1,isource2,tag1
 INTEGER(KIND=IB) :: oldtypes(NFIELD), blockcounts(NFIELD)
 INTEGER(KIND=MPI_ADDRESS_KIND) :: offsets(NFIELD)
 INTEGER(8) :: iextent,rextent,dextent ,i

 integer :: n, status(mpi_status_size)
   INTEGER :: objtype1,objtype2      !! Name of objtype for MPI sending
   TYPE :: objstruc
     REAL(KIND=RP),DIMENSION(NVMX,NPV)            :: voro
     REAL(KIND=RP),DIMENSION(NDEP,NRAN)           :: ffsl1
     REAL(KIND=RP),DIMENSION(NDEP,NRAN)           :: ffsl2
     REAL(KIND=RP),DIMENSION(NDEP,NRAN)           :: ffsr
     REAL(KIND=RP),DIMENSION(NDEP,NRAN)     :: ffdelay
     REAL(KIND=RP),DIMENSION(NSTN)                :: sdpar
     REAL(KIND=RP),DIMENSION(NARFP*NSTN)          :: arpar
     INTEGER(KIND=IB),DIMENSION(NARFP*NSTN)       :: idxar
     REAL(KIND=RP),DIMENSION(NPVPERT-2)           :: g
     REAL(KIND=RP),DIMENSION(NPVPERT-2)           :: gp
     REAL(KIND=RP),DIMENSION(NPVPERT-2,NPVPERT-2) :: Chat
     REAL(KIND=RP),DIMENSION(NPVPERT-2,NPVPERT-2) :: Chati
     REAL(KIND=RP)                                :: detChat
     REAL(KIND=RP)                                :: beta
     INTEGER(KIND=IB)                             :: k
     REAL(KIND=RP)                                :: sd
     REAL(KIND=RP)                                :: logL
     REAL(KIND=RP)                                :: logPr
     REAL(KIND=RP)                                :: lognorm
     INTEGER(KIND=IB)                             :: ipropose_bd  = 0
     INTEGER(KIND=IB)                             :: iaccept_bd   = 0
     INTEGER(KIND=IB),DIMENSION(NPV)              :: iproposevoro = 0
     INTEGER(KIND=IB),DIMENSION(NPV)              :: iacceptvoro  = 0
     REAL(KIND=RP),   DIMENSION(NPV)              :: pertsd  = 0._RP
     REAL(KIND=RP)                                :: tcmp
     REAL(KIND=RP),DIMENSION(NDAT) :: dobs
     REAL(KIND=RP),DIMENSION(NDAT) :: drep
     REAL(KIND=RP),DIMENSION(NDAT) :: dres
     !REAL(KIND=RP),DIMENSION(NDAT)            :: dar
     REAL(KIND=RP),DIMENSION(:),ALLOCATABLE    :: dar
     INTEGER(KIND=IB),DIMENSION(:),ALLOCATABLE :: NTSMP
   END TYPE objstruc

 TYPE (objstruc),DIMENSION(2) :: obj

 ALLOCATE( obj(1)%dar(NDAT) )
 ALLOCATE( obj(2)%dar(NDAT) )
 ALLOCATE( obj(1)%NTSMP(NSTN))
 ALLOCATE( obj(2)%NTSMP(NSTN) )

 call mpi_init(ierr)
 call mpi_comm_rank(mpi_comm_world, rank, ierr)
 call mpi_comm_size(mpi_comm_world, numtasks, ierr)

 !  Need to first figure offset by getting size of MPI_REAL etc
 call MPI_TYPE_EXTENT(MPI_INTEGER, iextent, ierr)
 call MPI_TYPE_EXTENT(MPI_REAL, rextent, ierr)
 call MPI_TYPE_EXTENT(MPI_DOUBLE_PRECISION, dextent, ierr)

 blockcounts = (/ NVMX*NPV , NDEP*NRAN,     NDEP*NRAN,  NDEP*NRAN, &
                  NDEP*NRAN,  NSTN, NARFP*NSTN, NARFP*NSTN, &
           NPVPERT-2,  NPVPERT-2, (NPVPERT-2)**2, (NPVPERT-2)**2, &
           1, &
     1,          1,        1,          1, 1,          1,  &
     1,             1,        NPV,         NPV, &
     NPV,    1, &
    NDAT,         NDAT,       NDAT,  NDAT,      NSTN &
/)
 oldtypes(1:7)    = MPI_REAL
 oldtypes(8)      = MPI_INTEGER
 oldtypes(9:14)   = MPI_REAL
 oldtypes(15)     = MPI_INTEGER
 oldtypes(16:19)  = MPI_REAL
 oldtypes(20:23)  = MPI_INTEGER
 oldtypes(24:25)  = MPI_REAL
 oldtypes(26:29)  = MPI_REAL
 oldtypes(30)     = MPI_INTEGER

 offsets(1) = 0
 IF(oldtypes(1) == MPI_REAL)THEN
   offsets(2) = rextent * blockcounts(1)
 ELSEIF(oldtypes(1) == MPI_INTEGER)THEN
   offsets(2) = iextent * blockcounts(1)
 ENDIF
 DO ifield = 3,NFIELD
   IF(oldtypes(ifield-1) == MPI_REAL)THEN
     offsets(ifield) = offsets(ifield-1) + rextent*blockcounts(ifield-1)
   ELSEIF(oldtypes(ifield-1) == MPI_INTEGER)THEN
     offsets(ifield) = offsets(ifield-1) + iextent*blockcounts(ifield-1)
   ENDIF
 ENDDO

if(rank==src) PRINT*,'OFFSETS',offsets

call mpi_get_address(obj(2)%voro,offsets(1),ierr)
call mpi_get_address(obj(2)%ffsl1,offsets(2),ierr)
call mpi_get_address(obj(2)%ffsl2,offsets(3),ierr)
call mpi_get_address(obj(2)%ffsr,offsets(4),ierr)
call mpi_get_address(obj(2)%ffdelay,offsets(5),ierr)
call mpi_get_address(obj(2)%sdpar,offsets(6),ierr)
call mpi_get_address(obj(2)%arpar,offsets(7),ierr)
call mpi_get_address(obj(2)%idxar,offsets(8),ierr)
call mpi_get_address(obj(2)%g,offsets(9),ierr)
call mpi_get_address(obj(2)%gp,offsets(10),ierr)
call mpi_get_address(obj(2)%Chat,offsets(11),ierr)
call mpi_get_address(obj(2)%Chati,offsets(12),ierr)
call mpi_get_address(obj(2)%detChat,offsets(13),ierr)
call mpi_get_address(obj(2)%beta,offsets(14),ierr)
call mpi_get_address(obj(2)%k,offsets(15),ierr)
call mpi_get_address(obj(2)%sd,offsets(16),ierr)
call mpi_get_address(obj(2)%logL,offsets(17),ierr)
call mpi_get_address(obj(2)%logPr,offsets(18),ierr)
call mpi_get_address(obj(2)%lognorm,offsets(19),ierr)
call mpi_get_address(obj(2)%ipropose_bd,offsets(20),ierr)
call mpi_get_address(obj(2)%iaccept_bd,offsets(21),ierr)
call mpi_get_address(obj(2)%iproposevoro,offsets(22),ierr)
call mpi_get_address(obj(2)%iacceptvoro,offsets(23),ierr)
call mpi_get_address(obj(2)%pertsd,offsets(24),ierr)
call mpi_get_address(obj(2)%tcmp,offsets(25),ierr)
call mpi_get_address(obj(2)%dobs,offsets(26),ierr)
call mpi_get_address(obj(2)%drep,offsets(27),ierr)
call mpi_get_address(obj(2)%dres,offsets(28),ierr)
call mpi_get_address(obj(2)%dar,offsets(29),ierr)
call mpi_get_address(obj(2)%NTSMP,offsets(30),ierr)

do i=2,size(offsets)
  offsets(i) = offsets(i) - offsets(1)
enddo
offsets(1) = 0

 if(rank==src) PRINT*,'OFFSETS',offsets

 call MPI_TYPE_CREATE_STRUCT( NFIELD, blockcounts, offsets, oldtypes, &
objtype2, ierr)
 call MPI_TYPE_COMMIT(objtype2, ierr)


call mpi_get_address(obj(1)%voro,offsets(1),ierr)
call mpi_get_address(obj(1)%ffsl1,offsets(2),ierr)
call mpi_get_address(obj(1)%ffsl2,offsets(3),ierr)
call mpi_get_address(obj(1)%ffsr,offsets(4),ierr)
call mpi_get_address(obj(1)%ffdelay,offsets(5),ierr)
call mpi_get_address(obj(1)%sdpar,offsets(6),ierr)
call mpi_get_address(obj(1)%arpar,offsets(7),ierr)
call mpi_get_address(obj(1)%idxar,offsets(8),ierr)
call mpi_get_address(obj(1)%g,offsets(9),ierr)
call mpi_get_address(obj(1)%gp,offsets(10),ierr)
call mpi_get_address(obj(1)%Chat,offsets(11),ierr)
call mpi_get_address(obj(1)%Chati,offsets(12),ierr)
call mpi_get_address(obj(1)%detChat,offsets(13),ierr)
call mpi_get_address(obj(1)%beta,offsets(14),ierr)
call mpi_get_address(obj(1)%k,offsets(15),ierr)
call mpi_get_address(obj(1)%sd,offsets(16),ierr)
call mpi_get_address(obj(1)%logL,offsets(17),ierr)
call mpi_get_address(obj(1)%logPr,offsets(18),ierr)
call mpi_get_address(obj(1)%lognorm,offsets(19),ierr)
call mpi_get_address(obj(1)%ipropose_bd,offsets(20),ierr)
call mpi_get_address(obj(1)%iaccept_bd,offsets(21),ierr)
call mpi_get_address(obj(1)%iproposevoro,offsets(22),ierr)
call mpi_get_address(obj(1)%iacceptvoro,offsets(23),ierr)
call mpi_get_address(obj(1)%pertsd,offsets(24),ierr)
call mpi_get_address(obj(1)%tcmp,offsets(25),ierr)
call mpi_get_address(obj(1)%dobs,offsets(26),ierr)
call mpi_get_address(obj(1)%drep,offsets(27),ierr)
call mpi_get_address(obj(1)%dres,offsets(28),ierr)
call mpi_get_address(obj(1)%dar,offsets(29),ierr)
call mpi_get_address(obj(1)%NTSMP,offsets(30),ierr)

do i=2,size(offsets)
  offsets(i) = offsets(i) - offsets(1)
enddo
offsets(1) = 0

 if(rank==src) PRINT*,'OFFSETS',offsets

 call MPI_TYPE_CREATE_STRUCT( NFIELD, blockcounts, offsets, oldtypes, &
objtype1, ierr)
 call MPI_TYPE_COMMIT(objtype1, ierr)

!PRINT*,'MPI_REAL',MPI_REAL
!PRINT*,'MPI_INTEGER',MPI_INTEGER
!PRINT*,'OFFSETS',offsets
!PRINT*,'blockcounts',blockcounts
!PRINT*,'oldtypes',oldtypes


 IF(rank.eq.src)THEN

   PRINT*,rank,'if receiving'
   CALL MPI_RECV(obj(1), 1, objtype1, MPI_ANY_SOURCE,MPI_ANY_TAG,&
   MPI_COMM_WORLD, status, ierr )
   isource1 = status(MPI_SOURCE)  !! This saves the slave id for the
   PRINT*,rank,'if received from 1',isource1
   CALL MPI_RECV(obj(2), 1, objtype2, MPI_ANY_SOURCE,MPI_ANY_TAG,&
   MPI_COMM_WORLD, status, ierr )
   isource2 = status(MPI_SOURCE)  !! This saves the slave id for the
   PRINT*,rank,'if receiving from 2',isource2
   CALL MPI_SEND(obj(1), 1,objtype1, isource1, rank, MPI_COMM_WORLD,&
   ierr)
   PRINT*, 'SENDING'
   CALL MPI_SEND(obj(2), 1,objtype2, isource2, rank, MPI_COMM_WORLD, &
   ierr)
   PRINT*, 'DONE SENDING'

 ELSE

   PRINT*,rank,'else sending'
   tag1 = 1
   CALL MPI_SEND(obj(1), 1,objtype1, src, tag1, MPI_COMM_WORLD, ierr)
   CALL MPI_SEND(obj(2), 1,objtype2, src, tag1, MPI_COMM_WORLD, ierr)
   PRINT*,rank,'else receiving'
   CALL MPI_RECV(obj(1), 1,objtype1, src, MPI_ANY_TAG, MPI_COMM_WORLD,&
   status, ierr )
   CALL MPI_RECV(obj(2), 1,objtype2, src, MPI_ANY_TAG, MPI_COMM_WORLD,&
   status, ierr )
   PRINT*, 'DONE RECEIVING rank',rank

 ENDIF

 call mpi_finalize(ierr)

 end

% pgf90 -Mmpi=mpich2 uf_10_22_13d.F90
% mpirun -np 2 a.out
 OFFSETS            1 else sending
                        0                      400
                     6400                    12400                    18400
                    24400                    24520                    24640
                    24760                    24772                    24784
                    24820                    24856                    24860
                    24864                    24868                    24872
                    24876                    24880                    24884
                    24888                    24892                    24912
                    24932                    24952                    24956
                    51324                    77692                   104060
                   130428
 OFFSETS                        0                      400
                     6400                    12400                    18400
                    24400                    24520                    24640
                    24760                    24772                    24784
                    24820                    24856                    24860
                    24864                    24868                    24872
                    24876                    24880                    24884
                    24888                    24892                    24912
                    24932                    24952                    24956
                    51324                    77692                   701712
                   728288
 OFFSETS                        0                      400
                     6400                    12400                    18400
                    24400                    24520                    24640
                    24760                    24772                    24784
                    24820                    24856                    24860
                    24864                    24868                    24872
                    24876                    24880                    24884
                    24888                    24892                    24912
                    24932                    24952                    24956
                    51324                    77692                   779536
                   832368
            0 if receiving
            0 if received from 1            1
            1 else receiving
            0 if receiving from 2            1
 SENDING
 DONE SENDING
 DONE RECEIVING rank            1
Back to top
View user's profile
David Norton



Joined: 18 Aug 2004
Posts: 20

PostPosted: Wed Oct 30, 2013 3:57 pm    Post subject: Reply with quote

The "offsets" array needs to have the real addresses in it - and since the last members are allocatable, they won't necessarily be in memory that is contiguous with the rest of the data structure. You can see by what is printed out the differences in addresses.

Note that because of how you have the sends and receives set up, the code will only run successfully with 3 MPI tasks.
Back to top
View user's profile
Display posts from previous:   
Post new topic   Reply to topic    PGI User Forum Forum Index -> Programming and Compiling All times are GMT - 7 Hours
Goto page 1, 2  Next
Page 1 of 2

 
Jump to:  
You cannot post new topics in this forum
You cannot reply to topics in this forum
You cannot edit your posts in this forum
You cannot delete your posts in this forum
You cannot vote in polls in this forum


Powered by phpBB © phpBB Group