Problem at compiling kernel 2.4.18 with lvm 1.0.7

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 






--- the forwarded message follows ---
--- Begin Message --- Hi LVM friends,


I have problem with the following scenario:


- I use SuSE linux 7.1 (I know, this might be a bit old, but I follow the rule "never touch a running system") with kernel 2.4.16 and lvm 1.0.7

- Since SuSE rolled out the latest kernel for 7.1 and I would like to keep my OS up to date I installed the 2.4.18 kernel source from SuSE on my machine

- I executed exactly the steps written in INSTALL and PATCH/README. Everything was ok

- Then I configured the rest of the kernel and tried to compile. But after a while, when it was compiling the LVM stuff, it stopped compiling with the following message:

gcc -D__KERNEL__ -I/usr/src/linux-2.4.18.SuSE/include -Wall -Wstrict-prototypes -Wno-trigraphs -O2 -fno-strict-aliasing -fno-common -fomit-frame-pointer -pipe -mpreferred-stack-boundary=2 -march=i686 -DKBUILD_BASENAME=lvm -c -o lvm.o lvm.c
gcc -D__KERNEL__ -I/usr/src/linux-2.4.18.SuSE/include -Wall -Wstrict-prototypes -Wno-trigraphs -O2 -fno-strict-aliasing -fno-common -fomit-frame-pointer -pipe -mpreferred-stack-boundary=2 -march=i686 -DKBUILD_BASENAME=lvm_snap -c -o lvm-snap.o lvm-snap.c
lvm-snap.c: In function `lvm_snapshot_COW':
lvm-snap.c:365: structure has no member named `blocks'
make[3]: *** [lvm-snap.o] Error 1
make[3]: Leaving directory `/usr/src/linux-2.4.18.SuSE/drivers/md'
make[2]: *** [first_rule] Error 2
make[2]: Leaving directory `/usr/src/linux-2.4.18.SuSE/drivers/md'
make[1]: *** [_subdir_md] Error 2
make[1]: Leaving directory `/usr/src/linux-2.4.18.SuSE/drivers'
make: *** [_dir_drivers] Error 2


Afterwards I looked into the patch file, and into the lvm-snap.c at the given position, line 365. There is the reference to the "blocks" variable of the iobuf structure. This iobuf structure is defined in the /usr/src/linux/include/linux/iobuf.h header file. But this header file is completly different in the source of the 2.4.18 kernel than in the source of the 2.4.16 kernel, where the patch and compiling were ok.

Do you have any idea what can I do to get it compiled?

I attach the lvm patch file generated by make and the iobuf.h file for both versions 2.4.16 and 2.4.18.


Hope to hear from you soon, and thanks in advance



Krisztian
/*
 * iobuf.h
 *
 * Defines the structures used to track abstract kernel-space io buffers.
 *
 */

#ifndef __LINUX_IOBUF_H
#define __LINUX_IOBUF_H

#include <linux/mm.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <asm/atomic.h>

/*
 * The kiobuf structure describes a physical set of pages reserved
 * locked for IO.  The reference counts on each page will have been
 * incremented, and the flags field will indicate whether or not we have
 * pre-locked all of the pages for IO.
 *
 * kiobufs may be passed in arrays to form a kiovec, but we must
 * preserve the property that no page is present more than once over the
 * entire iovec.
 */

#define KIO_MAX_ATOMIC_IO	512 /* in kb */
#define KIO_STATIC_PAGES	(KIO_MAX_ATOMIC_IO / (PAGE_SIZE >> 10) + 1)
#define KIO_MAX_SECTORS		(KIO_MAX_ATOMIC_IO * 2)

/* The main kiobuf struct used for all our IO! */

struct kiobuf 
{
	int		nr_pages;	/* Pages actually referenced */
	int		array_len;	/* Space in the allocated lists */
	int		offset;		/* Offset to start of valid data */
	int		length;		/* Number of valid bytes of data */

	/* Keep separate track of the physical addresses and page
	 * structs involved.  If we do IO to a memory-mapped device
	 * region, there won't necessarily be page structs defined for
	 * every address. */

	struct page **	maplist;

	unsigned int	locked : 1;	/* If set, pages has been locked */
	
	/* Always embed enough struct pages for atomic IO */
	struct page *	map_array[KIO_STATIC_PAGES];
	struct buffer_head * bh[KIO_MAX_SECTORS];
	unsigned long blocks[KIO_MAX_SECTORS];

	/* Dynamic state for IO completion: */
	atomic_t	io_count;	/* IOs still in progress */
	int		errno;		/* Status of completed IO */
	void		(*end_io) (struct kiobuf *); /* Completion callback */
	wait_queue_head_t wait_queue;
};


/* mm/memory.c */

int	map_user_kiobuf(int rw, struct kiobuf *, unsigned long va, size_t len);
void	unmap_kiobuf(struct kiobuf *iobuf);
int	lock_kiovec(int nr, struct kiobuf *iovec[], int wait);
int	unlock_kiovec(int nr, struct kiobuf *iovec[]);
void	mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes);

/* fs/iobuf.c */

void	end_kio_request(struct kiobuf *, int);
void	simple_wakeup_kiobuf(struct kiobuf *);
int	alloc_kiovec(int nr, struct kiobuf **);
void	free_kiovec(int nr, struct kiobuf **);
int	expand_kiobuf(struct kiobuf *, int);
void	kiobuf_wait_for_io(struct kiobuf *);
extern int alloc_kiobuf_bhs(struct kiobuf *);
extern void free_kiobuf_bhs(struct kiobuf *);

/* fs/buffer.c */

int	brw_kiovec(int rw, int nr, struct kiobuf *iovec[], 
		   kdev_t dev, unsigned long b[], int size);

#endif /* __LINUX_IOBUF_H */
/*
 * iobuf.h
 *
 * Defines the structures used to track abstract kernel-space io buffers.
 *
 */

#ifndef __LINUX_IOBUF_H
#define __LINUX_IOBUF_H

#include <linux/mm.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <asm/atomic.h>

/*
 * The kiobuf structure describes a physical set of pages reserved
 * locked for IO.  The reference counts on each page will have been
 * incremented, and the flags field will indicate whether or not we have
 * pre-locked all of the pages for IO.
 *
 * kiobufs may be passed in arrays to form a kiovec, but we must
 * preserve the property that no page is present more than once over the
 * entire iovec.
 */

#define KIO_MAX_ATOMIC_IO	512 /* in kb */
#define KIO_STATIC_PAGES	(KIO_MAX_ATOMIC_IO / (PAGE_SIZE >> 10) + 1)
#define KIO_MAX_SECTORS		(KIO_MAX_ATOMIC_IO * 2)

#define RAWIO_BLOCKSIZE		4096
#define RAWIO_BLOCKMASK		(RAWIO_BLOCKSIZE-1)

/* The main kiobuf struct used for all our IO! */

struct kiobuf 
{
	int		nr_pages;	/* Pages actually referenced */
	int		array_len;	/* Space in the allocated lists */
	int		offset;		/* Offset to start of valid data */
	int		length;		/* Number of valid bytes of data */

	/* Keep separate track of the physical addresses and page
	 * structs involved.  If we do IO to a memory-mapped device
	 * region, there won't necessarily be page structs defined for
	 * every address. */

	struct page **	maplist;

	unsigned int	locked : 1,	/* If set, pages has been locked */
			dovary : 1;	/* If set, do variable size IO */
	
	struct buffer_head ** kio_bh;
	unsigned long * kio_blocks;

	/* Dynamic state for IO completion: */
	atomic_t	io_count;	/* IOs still in progress */
	int		errno;		/* Status of completed IO */
	void		(*end_io) (struct kiobuf *); /* Completion callback */
	wait_queue_head_t wait_queue;
};


/* mm/memory.c */

int	map_user_kiobuf(int rw, struct kiobuf *, unsigned long va, size_t len);
void	unmap_kiobuf(struct kiobuf *iobuf);
int	lock_kiovec(int nr, struct kiobuf *iovec[], int wait);
int	unlock_kiovec(int nr, struct kiobuf *iovec[]);
void	mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes);

/* fs/iobuf.c */

void	end_kio_request(struct kiobuf *, int);
void	simple_wakeup_kiobuf(struct kiobuf *);
int	alloc_kiovec(int nr, struct kiobuf **);
void	free_kiovec(int nr, struct kiobuf **);
int	expand_kiobuf(struct kiobuf *, int);
void	kiobuf_wait_for_io(struct kiobuf *);
extern int alloc_kiobuf_bhs(struct kiobuf *);
extern void free_kiobuf_bhs(struct kiobuf *);

/* fs/buffer.c */

int	brw_kiovec(int rw, int nr, struct kiobuf *iovec[], 
		   kdev_t dev, unsigned long b[], int size);

#endif /* __LINUX_IOBUF_H */
--- linux/include/linux/lvm.h.orig	Mon Mar 24 15:07:25 2003
+++ linux/include/linux/lvm.h	Tue Oct 28 12:43:03 2003
@@ -3,13 +3,14 @@
  * kernel/lvm.h
  * tools/lib/lvm.h
  *
- * Copyright (C) 1997 - 2001  Heinz Mauelshagen, Sistina Software
+ * Copyright (C) 1997 - 2002  Heinz Mauelshagen, Sistina Software
  *
  * February-November 1997
  * May-July 1998
  * January-March,July,September,October,Dezember 1999
  * January,February,July,November 2000
  * January-March,June,July 2001
+ * May 2002
  *
  * lvm is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -79,8 +80,8 @@
 #ifndef _LVM_H_INCLUDE
 #define _LVM_H_INCLUDE
 
-#define LVM_RELEASE_NAME "1.0.3"
-#define LVM_RELEASE_DATE "19/02/2002"
+#define LVM_RELEASE_NAME "1.0.7"
+#define LVM_RELEASE_DATE "28/03/2003"
 
 #define	_LVM_KERNEL_H_VERSION	"LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
 
@@ -93,7 +94,7 @@
 #define	LVM_TOTAL_RESET
 
 #ifdef __KERNEL__
-#undef LVM_HD_NAME /* display nice names in /proc/partitions */
+#undef LVM_HD_NAME		/* display nice names in /proc/partitions */
 
 /* lots of debugging output (see driver source)
    #define DEBUG_LVM_GET_INFO
@@ -117,7 +118,7 @@
    causes problems on some platforms. It's not nice but then
    neither is the alternative. */
 struct list_head {
-        struct list_head *next, *prev;
+	struct list_head *next, *prev;
 };
 #define __KERNEL__
 #include <linux/kdev_t.h>
@@ -208,40 +209,35 @@
 /*
  * VGDA: default disk spaces and offsets
  *
- *   There's space after the structures for later extensions.
- *   The physical volume structure holds offset and size definitions
- *   for itself (well, kind of redundant ;-) and all other structure{s| arrays};
- *
- *   In recent versions since LVM 0.9.1 we align to 4k offsets in order to ease
- *   future kernel reads of the metadata.
- *
- *   offset               what                               size
- *   ---------------      --------------------------------   ------------
- *   0                    physical volume structure          pv->pv_on_disk.size
- *                                                           (~500 byte)
- *   pv->vg_on_disk.base  volume group structure             pv->vg_on_disk.size
- *
- *   pv->uuidlist_on_disk.base                               128 byte each
- *                        uuidlist of physical volumes
- *                        holding one uuid per physical volume
+ *   there's space after the structures for later extensions.
  *
- *   pv->lv_on_disk.base  logical volume structures;         pv->lv_on_disk.size
- *                        one structure per logical volume   (~300 byte each)
+ *   offset            what                                size
+ *   ---------------   ----------------------------------  ------------
+ *   0                 physical volume structure           ~500 byte
  *
- *   pv->pe_on_disk.base  physical extent alloc. structs     pv->pe_on_disk.size
- *                        one strcuture per physical extent  (4 byte each)
+ *   1K                volume group structure              ~200 byte
  *
- *   End of disk -        first physical extent              default 4 megabyte
+ *   6K                namelist of physical volumes        128 byte each
+ *
+ *   6k + n * ~300byte n logical volume structures         ~300 byte each
+ *
+ *   + m * 4byte       m physical extent alloc. structs    4 byte each
+ *
+ *   End of disk -     first physical extent               typically 4 megabyte
  *   PE total *
  *   PE size
- *   (rounded to 64k offset today)
  *
- *   pv->pe_on_disk.base + pv->pe_on_disk.size == start of first physical extent
  *
  */
 
 /* DONT TOUCH THESE !!! */
 
+
+
+
+
+
+
 /*
  * LVM_PE_T_MAX corresponds to:
  *
@@ -253,7 +249,7 @@
  *
  * Maximum PE size of 16GB gives a maximum logical volume size of 1024 TB.
  *
- * AFAIK, the actual kernels limit this to 2 TB.
+ * AFAIK, the actual kernels limit this to 1 TB.
  *
  * Should be a sufficient spectrum ;*)
  */
@@ -262,9 +258,9 @@
 #define	LVM_PE_T_MAX		( ( 1 << ( sizeof ( uint16_t) * 8)) - 2)
 
 #define	LVM_LV_SIZE_MAX(a)	( ( long long) LVM_PE_T_MAX * (a)->pe_size > ( long long) 1024*1024/SECTOR_SIZE*1024*1024 ? ( long long) 1024*1024/SECTOR_SIZE*1024*1024 : ( long long) LVM_PE_T_MAX * (a)->pe_size)
-#define	LVM_MIN_PE_SIZE		( 8192L / SECTOR_SIZE) /* 8 KB in sectors */
+#define	LVM_MIN_PE_SIZE		( 8192L / SECTOR_SIZE)	/* 8 KB in sectors */
 #define	LVM_MAX_PE_SIZE		( 16L * 1024L * 1024L / SECTOR_SIZE * 1024)	/* 16GB in sectors */
-#define	LVM_DEFAULT_PE_SIZE	( 4096L * 1024 / SECTOR_SIZE)	/* 4 MB in sectors */
+#define	LVM_DEFAULT_PE_SIZE	( 32768L * 1024 / SECTOR_SIZE)	/* 32 MB in sectors */
 #define	LVM_DEFAULT_STRIPE_SIZE	16L	/* 16 KB  */
 #define	LVM_MIN_STRIPE_SIZE	( PAGE_SIZE/SECTOR_SIZE)	/* PAGESIZE in sectors */
 #define	LVM_MAX_STRIPE_SIZE	( 512L * 1024 / SECTOR_SIZE)	/* 512 KB in sectors */
@@ -273,7 +269,7 @@
 #define	LVM_MAX_MIRRORS    	2	/* future use */
 #define	LVM_MIN_READ_AHEAD	0	/* minimum read ahead sectors */
 #define	LVM_DEFAULT_READ_AHEAD	1024	/* sectors for 512k scsi segments */
-#define	LVM_MAX_READ_AHEAD	10000	/* maximum read ahead sectors */
+#define	LVM_MAX_READ_AHEAD	1024	/* maximum read ahead sectors */
 #define	LVM_MAX_LV_IO_TIMEOUT	60	/* seconds I/O timeout (future use) */
 #define	LVM_PARTITION           0xfe	/* LVM partition id */
 #define	LVM_NEW_PARTITION       0x8e	/* new LVM partition id (10/09/1999) */
@@ -420,9 +416,9 @@
 typedef struct lv_block_exception_v1 {
 	struct list_head hash;
 	uint32_t rsector_org;
-	kdev_t   rdev_org;
+	kdev_t rdev_org;
 	uint32_t rsector_new;
-	kdev_t   rdev_new;
+	kdev_t rdev_new;
 } lv_block_exception_t;
 
 /* disk stored pe information */
@@ -466,7 +462,7 @@
 	uint pe_stale;		/* for future use */
 	pe_disk_t *pe;		/* HM */
 	struct block_device *bd;
-	char pv_uuid[UUID_LEN+1];
+	char pv_uuid[UUID_LEN + 1];
 
 #ifndef __KERNEL__
 	uint32_t pe_start;	/* in sectors */
@@ -477,7 +473,7 @@
 /* disk */
 typedef struct pv_disk_v2 {
 	uint8_t id[2];		/* Identifier */
-	uint16_t version;		/* HM lvm version */
+	uint16_t version;	/* HM lvm version */
 	lvm_disk_data_t pv_on_disk;
 	lvm_disk_data_t vg_on_disk;
 	lvm_disk_data_t pv_uuidlist_on_disk;
@@ -490,14 +486,14 @@
 	uint32_t pv_number;
 	uint32_t pv_status;
 	uint32_t pv_allocatable;
-	uint32_t pv_size;		/* HM */
+	uint32_t pv_size;	/* HM */
 	uint32_t lv_cur;
 	uint32_t pe_size;
 	uint32_t pe_total;
 	uint32_t pe_allocated;
-	
+
 	/* new in struct version 2 */
-	uint32_t pe_start;	        /* in sectors */
+	uint32_t pe_start;	/* in sectors */
 
 } pv_disk_t;
 
@@ -571,8 +567,8 @@
 	uint32_t lv_snapshot_hash_table_size;
 	uint32_t lv_snapshot_hash_mask;
 	wait_queue_head_t lv_snapshot_wait;
-	int	lv_snapshot_use_rate;
-	struct vg_v3	*vg;
+	int lv_snapshot_use_rate;
+	struct vg_v3 *vg;
 
 	uint lv_allocated_snapshot_le;
 #else
@@ -586,14 +582,14 @@
 	uint8_t vg_name[NAME_LEN];
 	uint32_t lv_access;
 	uint32_t lv_status;
-	uint32_t lv_open;		/* HM */
-	uint32_t lv_dev;		/* HM */
+	uint32_t lv_open;	/* HM */
+	uint32_t lv_dev;	/* HM */
 	uint32_t lv_number;	/* HM */
 	uint32_t lv_mirror_copies;	/* for future use */
 	uint32_t lv_recovery;	/*       "        */
 	uint32_t lv_schedule;	/*       "        */
 	uint32_t lv_size;
-	uint32_t lv_snapshot_minor;/* minor number of original */
+	uint32_t lv_snapshot_minor;	/* minor number of original */
 	uint16_t lv_chunk_size;	/* chunk size of snapshot */
 	uint16_t dummy;
 	uint32_t lv_allocated_le;
@@ -630,7 +626,7 @@
 	struct proc_dir_entry *proc;
 	pv_t *pv[ABS_MAX_PV + 1];	/* physical volume struct pointers */
 	lv_t *lv[ABS_MAX_LV + 1];	/* logical  volume struct pointers */
-	char vg_uuid[UUID_LEN+1];	/* volume group UUID */
+	char vg_uuid[UUID_LEN + 1];	/* volume group UUID */
 #ifdef __KERNEL__
 	struct proc_dir_entry *vg_dir_pde;
 	struct proc_dir_entry *lv_subdir_pde;
@@ -644,20 +640,20 @@
 /* disk */
 typedef struct vg_disk_v2 {
 	uint8_t vg_uuid[UUID_LEN];	/* volume group UUID */
-	uint8_t vg_name_dummy[NAME_LEN-UUID_LEN];	/* rest of v1 VG name */
+	uint8_t vg_name_dummy[NAME_LEN - UUID_LEN];	/* rest of v1 VG name */
 	uint32_t vg_number;	/* volume group number */
 	uint32_t vg_access;	/* read/write */
 	uint32_t vg_status;	/* active or not */
-	uint32_t lv_max;		/* maximum logical volumes */
-	uint32_t lv_cur;		/* current logical volumes */
-	uint32_t lv_open;		/* open    logical volumes */
-	uint32_t pv_max;		/* maximum physical volumes */
-	uint32_t pv_cur;		/* current physical volumes FU */
-	uint32_t pv_act;		/* active physical volumes */
+	uint32_t lv_max;	/* maximum logical volumes */
+	uint32_t lv_cur;	/* current logical volumes */
+	uint32_t lv_open;	/* open    logical volumes */
+	uint32_t pv_max;	/* maximum physical volumes */
+	uint32_t pv_cur;	/* current physical volumes FU */
+	uint32_t pv_act;	/* active physical volumes */
 	uint32_t dummy;
 	uint32_t vgda;		/* volume group descriptor arrays FU */
-	uint32_t pe_size;		/* physical extent size in sectors */
-	uint32_t pe_total;		/* total of physical extents */
+	uint32_t pe_size;	/* physical extent size in sectors */
+	uint32_t pe_total;	/* total of physical extents */
 	uint32_t pe_allocated;	/* allocated physical extents */
 	uint32_t pvg_total;	/* physical volume groups FU */
 } vg_disk_t;
@@ -716,40 +712,44 @@
 
 /* Request structure LV_SNAPSHOT_USE_RATE */
 typedef struct {
-	int	block;
-	int	rate;
+	int block;
+	int rate;
 } lv_snapshot_use_rate_req_t;
 
 
 
 /* useful inlines */
-static inline ulong round_up(ulong n, ulong size) {
+static inline ulong round_up(ulong n, ulong size)
+{
 	size--;
 	return (n + size) & ~size;
 }
 
-static inline ulong div_up(ulong n, ulong size) {
+static inline ulong div_up(ulong n, ulong size)
+{
 	return round_up(n, size) / size;
 }
 
 /* FIXME: nasty capital letters */
-static int inline LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg_t *vg, lv_t *lv) {
+static int inline LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg_t * vg, lv_t * lv)
+{
 	return vg->pe_size / lv->lv_chunk_size;
 }
 
-static int inline LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg_t *vg, lv_t *lv) {
+static int inline LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg_t * vg, lv_t * lv)
+{
 	ulong chunks = vg->pe_size / lv->lv_chunk_size;
 	ulong entry_size = sizeof(lv_COW_table_disk_t);
 	ulong chunk_size = lv->lv_chunk_size * SECTOR_SIZE;
 	ulong entries = (vg->pe_size * SECTOR_SIZE) /
-		(entry_size + chunk_size);
+	    (entry_size + chunk_size);
 
-	if(chunks < 2)
+	if (chunks < 2)
 		return 0;
 
-	for(; entries; entries--)
-		if((div_up(entries * entry_size, chunk_size) + entries) <=
-		   chunks)
+	for (; entries; entries--)
+		if ((div_up(entries * entry_size, chunk_size) + entries) <=
+		    chunks)
 			break;
 
 	return entries;
@@ -757,4 +757,3 @@
 
 
 #endif				/* #ifndef _LVM_H_INCLUDE */
-
--- linux/drivers/md/lvm.c.orig	Mon Mar 24 15:07:37 2003
+++ linux/drivers/md/lvm.c	Tue Oct 28 12:43:03 2003
@@ -1,13 +1,15 @@
 /*
  * kernel/lvm.c
  *
- * Copyright (C) 1997 - 2001  Heinz Mauelshagen, Sistina Software
+ * Copyright (C) 1997 - 2002  Heinz Mauelshagen, Sistina Software
  *
  * February-November 1997
  * April-May,July-August,November 1998
  * January-March,May,July,September,October 1999
  * January,February,July,September-November 2000
- * January-April 2001
+ * January-May,June,October 2001
+ * May-August 2002
+ * February 2003
  *
  *
  * LVM driver is free software; you can redistribute it and/or modify
@@ -214,6 +216,17 @@
  *                 in the LV each time.  [AED]
  *    12/10/2001 - Use add/del_gendisk() routines in 2.4.10+
  *    01/11/2001 - Backport read_ahead change from Linus kernel [AED]
+ *    24/05/2002 - fixed locking bug in lvm_do_le_remap() introduced with 1.0.4
+ *    13/06/2002 - use blk_ioctl() to support various standard block ioctls
+ *               - support HDIO_GETGEO_BIG ioctl
+ *    05/07/2002 - fixed OBO error on vg array access [benh@kernel.crashing.org]
+ *    22/07/2002 - streamlined blk_ioctl() call
+ *    14/08/2002 - stored fs handle in lvm_do_lv_rename
+ *                 [kaoru@bsd.tnes.nec.co.jp]
+ *    06/02/2003 - fix persistent snapshot extend/reduce bug in
+ *		   lvm_do_lv_extend_reduce() [dalestephenson@mac.com]
+ *    04/03/2003 - snapshot extend/reduce memory leak
+ *               - VG PE counter wrong [dalestephenson@mac.com]
  *
  */
 
@@ -223,6 +236,9 @@
 #define DEVICE_OFF(device)
 #define LOCAL_END_REQUEST
 
+/* lvm_do_lv_create calls fsync_dev_lockfs()/unlockfs() */
+/* #define	LVM_VFS_ENHANCEMENT */
+
 #include <linux/config.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -354,14 +370,13 @@
 
 
 /* volume group descriptor area pointers */
-vg_t *vg[ABS_MAX_VG];
+vg_t *vg[ABS_MAX_VG + 1];
 
 /* map from block minor number to VG and LV numbers */
-typedef struct {
+static struct {
 	int vg_number;
 	int lv_number;
-} vg_lv_map_t;
-static vg_lv_map_t vg_lv_map[ABS_MAX_LV];
+} vg_lv_map[ABS_MAX_LV];
 
 
 /* Request structures (lvm_chr_ioctl()) */
@@ -391,6 +406,7 @@
 
 
 struct file_operations lvm_chr_fops = {
+	owner:THIS_MODULE,
 	open:lvm_chr_open,
 	release:lvm_chr_close,
 	ioctl:lvm_chr_ioctl,
@@ -398,10 +414,10 @@
 
 /* block device operations structure needed for 2.3.38? and above */
 struct block_device_operations lvm_blk_dops = {
-	owner:THIS_MODULE,
-	open:lvm_blk_open,
-	release:lvm_blk_close,
-	ioctl:lvm_blk_ioctl,
+	.owner		= THIS_MODULE,
+	.open		= lvm_blk_open,
+	.release	= lvm_blk_close,
+	.ioctl		= lvm_blk_ioctl,
 };
 
 
@@ -412,13 +428,13 @@
 static int lvm_size[MAX_LV];
 
 static struct gendisk lvm_gendisk = {
-	major:MAJOR_NR,
-	major_name:LVM_NAME,
-	minor_shift:0,
-	max_p:1,
-	part:lvm_hd_struct,
-	sizes:lvm_size,
-	nr_real:MAX_LV,
+	.major		= MAJOR_NR,
+	.major_name	= LVM_NAME,
+	.minor_shift	= 0,
+	.max_p		= 1,
+	.part		= lvm_hd_struct,
+	.sizes		= lvm_size,
+	.nr_real	= MAX_LV,
 };
 
 
@@ -531,7 +547,7 @@
 	pe_lock_req.data.pv_offset = 0;
 
 	/* Initialize VG pointers */
-	for (v = 0; v < ABS_MAX_VG; v++)
+	for (v = 0; v < ABS_MAX_VG + 1; v++)
 		vg[v] = NULL;
 
 	/* Initialize LV -> VG association */
@@ -624,7 +640,8 @@
 		/* check lvm version to ensure driver/tools+lib
 		   interoperability */
 		if (copy_to_user(arg, &lvm_iop_version, sizeof(ushort)) !=
-		    0) return -EFAULT;
+		    0)
+			return -EFAULT;
 		return 0;
 
 #ifdef LVM_TOTAL_RESET
@@ -687,7 +704,8 @@
 		if (vg_ptr == NULL)
 			return -ENXIO;
 		if (copy_from_user(&extendable, arg, sizeof(extendable)) !=
-		    0) return -EFAULT;
+		    0)
+			return -EFAULT;
 
 		if (extendable == VG_EXTENDABLE ||
 		    extendable == ~VG_EXTENDABLE) {
@@ -743,7 +761,8 @@
 
 		if (command != LV_REMOVE) {
 			if (copy_from_user(&lv, lv_req.lv, sizeof(lv_t)) !=
-			    0) return -EFAULT;
+			    0)
+				return -EFAULT;
 		}
 		switch (command) {
 		case LV_CREATE:
@@ -799,7 +818,7 @@
 		printk(KERN_WARNING
 		       "%s -- lvm_chr_ioctl: unknown command 0x%x\n",
 		       lvm_name, command);
-		return -EINVAL;
+		return -ENOTTY;
 	}
 
 	return 0;
@@ -880,10 +899,12 @@
 		/* We need to be able to "read" an inactive LV
 		   to re-activate it again */
 		if ((file->f_mode & FMODE_WRITE) &&
-		    (!(lv_ptr->lv_status & LV_ACTIVE))) return -EPERM;
+		    (!(lv_ptr->lv_status & LV_ACTIVE)))
+			return -EPERM;
 
 		if (!(lv_ptr->lv_access & LV_WRITE) &&
-		    (file->f_mode & FMODE_WRITE)) return -EACCES;
+		    (file->f_mode & FMODE_WRITE))
+			return -EACCES;
 
 
 		/* be sure to increment VG counter */
@@ -900,112 +921,118 @@
 	return -ENXIO;
 }				/* lvm_blk_open() */
 
+/* Deliver "hard disk geometry" */
+static int _hdio_getgeo(ulong a, lv_t * lv_ptr, int what)
+{
+	int ret = 0;
+	uchar heads = 128;
+	uchar sectors = 128;
+	ulong start = 0;
+	uint cylinders;
+
+	while (heads * sectors > lv_ptr->lv_size) {
+		heads >>= 1;
+		sectors >>= 1;
+	}
+	cylinders = lv_ptr->lv_size / heads / sectors;
+
+	switch (what) {
+	case 0:
+		{
+			struct hd_geometry *hd = (struct hd_geometry *) a;
+
+			if (put_user(heads, &hd->heads) ||
+			    put_user(sectors, &hd->sectors) ||
+			    put_user((ushort) cylinders, &hd->cylinders) ||
+			    put_user(start, &hd->start))
+				return -EFAULT;
+			break;
+		}
+
+#ifdef HDIO_GETGEO_BIG
+	case 1:
+		{
+			struct hd_big_geometry *hd =
+			    (struct hd_big_geometry *) a;
+
+			if (put_user(heads, &hd->heads) ||
+			    put_user(sectors, &hd->sectors) ||
+			    put_user(cylinders, &hd->cylinders) ||
+			    put_user(start, &hd->start))
+				return -EFAULT;
+			break;
+		}
+#endif
+
+	}
+
+	P_IOCTL("%s -- lvm_blk_ioctl -- cylinders: %d\n",
+		lvm_name, cylinders);
+	return ret;
+}
+
 
 /*
  * block device i/o-control routine
  */
 static int lvm_blk_ioctl(struct inode *inode, struct file *file,
-			 uint command, ulong a)
+			 uint cmd, ulong a)
 {
-	int minor = MINOR(inode->i_rdev);
+	kdev_t dev = inode->i_rdev;
+	int minor = MINOR(dev), ret;
 	vg_t *vg_ptr = vg[VG_BLK(minor)];
 	lv_t *lv_ptr = vg_ptr->lv[LV_BLK(minor)];
 	void *arg = (void *) a;
-	struct hd_geometry *hd = (struct hd_geometry *) a;
 
-	P_IOCTL("blk MINOR: %d  command: 0x%X  arg: %p  VG#: %d  LV#: %d  "
-		"mode: %s%s\n", minor, command, arg, VG_BLK(minor),
+	P_IOCTL("blk MINOR: %d  cmd: 0x%X  arg: %p  VG#: %d  LV#: %d  "
+		"mode: %s%s\n", minor, cmd, arg, VG_BLK(minor),
 		LV_BLK(minor), MODE_TO_STR(file->f_mode));
 
-	switch (command) {
-	case BLKSSZGET:
-		/* get block device sector size as needed e.g. by fdisk */
-		return put_user(lvm_sectsize(inode->i_rdev), (int *) arg);
-
-	case BLKGETSIZE:
-		/* return device size */
-		P_IOCTL("BLKGETSIZE: %u\n", lv_ptr->lv_size);
-		if (put_user(lv_ptr->lv_size, (unsigned long *) arg))
-			return -EFAULT;
-		break;
-
-#ifdef BLKGETSIZE64
-	case BLKGETSIZE64:
-		if (put_user((u64) lv_ptr->lv_size << 9, (u64 *) arg))
-			return -EFAULT;
-		break;
-#endif
-
-	case BLKFLSBUF:
-		/* flush buffer cache */
-		if (!capable(CAP_SYS_ADMIN))
-			return -EACCES;
-
-		P_IOCTL("BLKFLSBUF\n");
-
-		fsync_dev(inode->i_rdev);
-		invalidate_buffers(inode->i_rdev);
-		break;
-
-
+	switch (cmd) {
 	case BLKRASET:
 		/* set read ahead for block device */
-		if (!capable(CAP_SYS_ADMIN))
-			return -EACCES;
-
-		P_IOCTL("BLKRASET: %ld sectors for %s\n",
-			(long) arg, kdevname(inode->i_rdev));
-
-		if ((long) arg < LVM_MIN_READ_AHEAD ||
-		    (long) arg > LVM_MAX_READ_AHEAD) return -EINVAL;
-		lv_ptr->lv_read_ahead = (long) arg;
+		ret = blk_ioctl(dev, cmd, a);
+		if (ret)
+			return ret;
+		lv_ptr->lv_read_ahead = (long) a;
 		LVM_CORRECT_READ_AHEAD(lv_ptr->lv_read_ahead);
-		read_ahead[MAJOR_NR] = lv_ptr->lv_read_ahead;
-		break;
-
-
-	case BLKRAGET:
-		/* get current read ahead setting */
-		P_IOCTL("BLKRAGET %d\n", lv_ptr->lv_read_ahead);
-		if (put_user(lv_ptr->lv_read_ahead, (long *) arg))
-			return -EFAULT;
 		break;
 
-
 	case HDIO_GETGEO:
+#ifdef HDIO_GETGEO_BIG
+	case HDIO_GETGEO_BIG:
+#endif
 		/* get disk geometry */
 		P_IOCTL("%s -- lvm_blk_ioctl -- HDIO_GETGEO\n", lvm_name);
-		if (hd == NULL)
+		if (!a)
 			return -EINVAL;
-		{
-			unsigned char heads = 64;
-			unsigned char sectors = 32;
-			long start = 0;
-			short cylinders =
-			    lv_ptr->lv_size / heads / sectors;
-
-			if (copy_to_user((char *) &hd->heads, &heads,
-					 sizeof(heads)) != 0 ||
-			    copy_to_user((char *) &hd->sectors, &sectors,
-					 sizeof(sectors)) != 0 ||
-			    copy_to_user((short *) &hd->cylinders,
-					 &cylinders,
-					 sizeof(cylinders)) != 0
-			    || copy_to_user((long *) &hd->start, &start,
-					    sizeof(start)) != 0)
-				return -EFAULT;
 
-			P_IOCTL("%s -- lvm_blk_ioctl -- cylinders: %d\n",
-				lvm_name, cylinders);
+		switch (cmd) {
+		case HDIO_GETGEO:
+			return _hdio_getgeo(a, lv_ptr, 0);
+#ifdef HDIO_GETGEO_BIG
+		case HDIO_GETGEO_BIG:
+			return _hdio_getgeo(a, lv_ptr, 1);
+#endif
 		}
-		break;
 
+	case LV_BMAP:
+		/* turn logical block into (dev_t, block). non privileged. */
+		/* don't bmap a snapshot, since the mapping can change */
+		if (lv_ptr->lv_access & LV_SNAPSHOT)
+			return -EPERM;
+
+		return lvm_user_bmap(inode, (struct lv_bmap *) arg);
 
 	case LV_SET_ACCESS:
 		/* set access flags of a logical volume */
 		if (!capable(CAP_SYS_ADMIN))
 			return -EACCES;
+
+		down_write(&lv_ptr->lv_lock);
 		lv_ptr->lv_access = (ulong) arg;
+		up_write(&lv_ptr->lv_lock);
+
 		if (lv_ptr->lv_access & LV_WRITE)
 			set_device_ro(lv_ptr->lv_dev, 0);
 		else
@@ -1013,38 +1040,37 @@
 		break;
 
 
+	case LV_SET_ALLOCATION:
+		/* set allocation flags of a logical volume */
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+		down_write(&lv_ptr->lv_lock);
+		lv_ptr->lv_allocation = (ulong) arg;
+		up_write(&lv_ptr->lv_lock);
+		break;
+
 	case LV_SET_STATUS:
 		/* set status flags of a logical volume */
 		if (!capable(CAP_SYS_ADMIN))
 			return -EACCES;
 		if (!((ulong) arg & LV_ACTIVE) && lv_ptr->lv_open > 1)
 			return -EPERM;
+		down_write(&lv_ptr->lv_lock);
 		lv_ptr->lv_status = (ulong) arg;
-		break;
-
-	case LV_BMAP:
-		/* turn logical block into (dev_t, block). non privileged. */
-		/* don't bmap a snapshot, since the mapping can change */
-		if (lv_ptr->lv_access & LV_SNAPSHOT)
-			return -EPERM;
-
-		return lvm_user_bmap(inode, (struct lv_bmap *) arg);
-
-	case LV_SET_ALLOCATION:
-		/* set allocation flags of a logical volume */
-		if (!capable(CAP_SYS_ADMIN))
-			return -EACCES;
-		lv_ptr->lv_allocation = (ulong) arg;
+		up_write(&lv_ptr->lv_lock);
 		break;
 
 	case LV_SNAPSHOT_USE_RATE:
 		return lvm_get_snapshot_use_rate(lv_ptr, arg);
 
 	default:
-		printk(KERN_WARNING
-		       "%s -- lvm_blk_ioctl: unknown command 0x%x\n",
-		       lvm_name, command);
-		return -EINVAL;
+		/* Handle rest here */
+		ret = blk_ioctl(dev, cmd, a);
+		if (ret)
+			printk(KERN_WARNING
+			       "%s -- lvm_blk_ioctl: unknown "
+			       "cmd 0x%x\n", lvm_name, cmd);
+		return ret;
 	}
 
 	return 0;
@@ -1076,8 +1102,12 @@
 {
 	lv_snapshot_use_rate_req_t lv_rate_req;
 
-	if (!(lv->lv_access & LV_SNAPSHOT))
+	down_read(&lv->lv_lock);
+	if (!(lv->lv_access & LV_SNAPSHOT)) {
+		up_read(&lv->lv_lock);
 		return -EPERM;
+	}
+	up_read(&lv->lv_lock);
 
 	if (copy_from_user(&lv_rate_req, arg, sizeof(lv_rate_req)))
 		return -EFAULT;
@@ -1087,10 +1117,17 @@
 
 	switch (lv_rate_req.block) {
 	case 0:
+		down_write(&lv->lv_lock);
 		lv->lv_snapshot_use_rate = lv_rate_req.rate;
+		up_write(&lv->lv_lock);
+		down_read(&lv->lv_lock);
 		if (lv->lv_remap_ptr * 100 / lv->lv_remap_end <
-		    lv->lv_snapshot_use_rate)
-			    interruptible_sleep_on(&lv->lv_snapshot_wait);
+		    lv->lv_snapshot_use_rate) {
+			up_read(&lv->lv_lock);
+			interruptible_sleep_on(&lv->lv_snapshot_wait);
+			down_read(&lv->lv_lock);
+		}
+		up_read(&lv->lv_lock);
 		break;
 
 	case O_NONBLOCK:
@@ -1099,7 +1136,9 @@
 	default:
 		return -EINVAL;
 	}
+	down_read(&lv->lv_lock);
 	lv_rate_req.rate = lv->lv_remap_ptr * 100 / lv->lv_remap_end;
+	up_read(&lv->lv_lock);
 
 	return copy_to_user(arg, &lv_rate_req,
 			    sizeof(lv_rate_req)) ? -EFAULT : 0;
@@ -1144,8 +1183,7 @@
 
 	/* we must redo lvm_snapshot_remap_block in order to avoid a
 	   race condition in the gap where no lock was held */
-	if (lv->lv_block_exception && 
-	    !lvm_snapshot_remap_block(&rdev, &rsector, pe_start, lv) &&
+	if (!lvm_snapshot_remap_block(&rdev, &rsector, pe_start, lv) &&
 	    !lvm_snapshot_COW(rdev, rsector, pe_start, rsector, vg, lv))
 		lvm_write_COW_table_block(vg, lv);
 
@@ -1155,12 +1193,11 @@
 static inline void _remap_snapshot(kdev_t rdev, ulong rsector,
 				   ulong pe_start, lv_t * lv, vg_t * vg)
 {
-	int r = 0;
+	int r;
 
 	/* check to see if this chunk is already in the snapshot */
 	down_read(&lv->lv_lock);
-	if (lv->lv_block_exception)
-		r = lvm_snapshot_remap_block(&rdev, &rsector, pe_start, lv);
+	r = lvm_snapshot_remap_block(&rdev, &rsector, pe_start, lv);
 	up_read(&lv->lv_lock);
 
 	if (!r)
@@ -1248,7 +1285,7 @@
 		    (rsector_org % vg_this->pe_size);
 		rdev_map = lv->lv_current_pe[index].dev;
 
-		P_MAP("lv_current_pe[%lu].pe: %u  rdev: %s  rsector:%lu\n",
+		P_MAP("lv_current_pe[%ld].pe: %d  rdev: %s  rsector:%ld\n",
 		      index, lv->lv_current_pe[index].pe,
 		      kdevname(rdev_map), rsector_map);
 
@@ -1270,8 +1307,8 @@
 		    (lv->lv_stripes - 1) * lv->lv_stripesize;
 		rdev_map = lv->lv_current_pe[index].dev;
 
-		P_MAP("lv_current_pe[%lu].pe: %d  rdev: %s  rsector:%lu\n"
-		      "stripe_length: %lu  stripe_index: %lu\n",
+		P_MAP("lv_current_pe[%ld].pe: %d  rdev: %s  rsector:%ld\n"
+		      "stripe_length: %ld  stripe_index: %ld\n",
 		      index, lv->lv_current_pe[index].pe,
 		      kdevname(rdev_map), rsector_map, stripe_length,
 		      stripe_index);
@@ -1299,9 +1336,10 @@
 		goto out;
 
 	if (lv->lv_access & LV_SNAPSHOT) {	/* remap snapshot */
-		if (lvm_snapshot_remap_block
-		    (&rdev_map, &rsector_map, pe_start, lv) < 0)
-				goto bad;
+		if (lvm_snapshot_remap_block(&rdev_map, &rsector_map,
+					     pe_start, lv) < 0)
+			goto bad;
+
 	} else if (rw == WRITE || rw == WRITEA) {	/* snapshot origin */
 		lv_t *snap;
 
@@ -1327,7 +1365,8 @@
 	return 1;
 
       bad:
-	if (bh->b_end_io) buffer_IO_error(bh);
+	if (bh->b_end_io)
+		buffer_IO_error(bh);
 	up_read(&lv->lv_lock);
 	return -1;
 }				/* lvm_map() */
@@ -1481,12 +1520,17 @@
 	if (vg_ptr == NULL)
 		return -ENXIO;
 	if (copy_from_user(&le_remap_req, arg,
-			   sizeof(le_remap_req_t)) != 0) return -EFAULT;
+			   sizeof(le_remap_req_t)) != 0)
+		return -EFAULT;
 
 	for (l = 0; l < vg_ptr->lv_max; l++) {
 		lv_ptr = vg_ptr->lv[l];
-		if (lv_ptr != NULL &&
-		    strcmp(lv_ptr->lv_name, le_remap_req.lv_name) == 0) {
+
+		if (!lv_ptr)
+			continue;
+
+		if (strcmp(lv_ptr->lv_name, le_remap_req.lv_name) == 0) {
+			down_write(&lv_ptr->lv_lock);
 			for (le = 0; le < lv_ptr->lv_allocated_le; le++) {
 				if (lv_ptr->lv_current_pe[le].dev ==
 				    le_remap_req.old_dev &&
@@ -1496,11 +1540,12 @@
 					    le_remap_req.new_dev;
 					lv_ptr->lv_current_pe[le].pe =
 					    le_remap_req.new_pe;
-
 					__update_hardsectsize(lv_ptr);
+					up_write(&lv_ptr->lv_lock);
 					return 0;
 				}
 			}
+			up_write(&lv_ptr->lv_lock);
 			return -EINVAL;
 		}
 	}
@@ -1538,6 +1583,10 @@
 	if (minor == -1)
 		minor = vg_ptr->vg_number;
 
+	/* check limits */
+	if (minor >= ABS_MAX_VG)
+		return -EFAULT;
+
 	/* Validate it */
 	if (vg[VG_CHR(minor)] != NULL) {
 		P_IOCTL("lvm_do_vg_create ERROR: VG %d in use\n", minor);
@@ -1733,15 +1782,16 @@
 	for (l = 0; l < vg_ptr->lv_max; l++) {
 		if ((lv_ptr = vg_ptr->lv[l]) == NULL)
 			continue;
+		memset(lv_ptr->vg_name, 0, sizeof(*vg_name));
 		strncpy(lv_ptr->vg_name, vg_name, sizeof(vg_name));
 		ptr = strrchr(lv_ptr->lv_name, '/');
-		if (ptr == NULL)
-			ptr = lv_ptr->lv_name;
+		ptr = ptr ? ptr + 1 : lv_ptr->lv_name;
 		strncpy(lv_name, ptr, sizeof(lv_name));
 		len = sizeof(LVM_DIR_PREFIX);
 		strcpy(lv_ptr->lv_name, LVM_DIR_PREFIX);
 		strncat(lv_ptr->lv_name, vg_name, NAME_LEN - len);
-		len += strlen(vg_name);
+		strcat(lv_ptr->lv_name, "/");
+		len += strlen(vg_name) + 1;
 		strncat(lv_ptr->lv_name, lv_name, NAME_LEN - len);
 	}
 	for (p = 0; p < vg_ptr->pv_max; p++) {
@@ -1752,6 +1802,25 @@
 
 	lvm_fs_create_vg(vg_ptr);
 
+	/* Need to add PV entries */
+	for (p = 0; p < vg_ptr->pv_act; p++) {
+		pv_t *pv_ptr = vg_ptr->pv[p];
+
+		if (pv_ptr)
+			lvm_fs_create_pv(vg_ptr, pv_ptr);
+	}
+
+	/* Need to add LV entries */
+	for (l = 0; l < vg_ptr->lv_max; l++) {
+		lv_t *lv_ptr = vg_ptr->lv[l];
+
+		if (!lv_ptr)
+			continue;
+
+		lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de =
+		    lvm_fs_create_lv(vg_ptr, lv_ptr);
+	}
+
 	return 0;
 }				/* lvm_do_vg_rename */
 
@@ -1828,6 +1897,9 @@
 	pv_t *pv;
 	int err;
 
+	if (!vg_ptr)
+		return -ENXIO;
+
 	pv = kmalloc(sizeof(pv_t), GFP_KERNEL);
 	if (pv == NULL) {
 		printk(KERN_CRIT
@@ -2060,8 +2132,7 @@
 					return -EINVAL;
 				}
 
-				if (
-				    (lv_ptr->lv_block_exception =
+				if ((lv_ptr->lv_block_exception =
 				     vmalloc(size)) == NULL) {
 					printk(KERN_CRIT
 					       "%s -- lvm_do_lv_create: vmalloc error LV_BLOCK_EXCEPTION "
@@ -2120,8 +2191,8 @@
 				vg_ptr->pe_allocated +=
 				    lv_ptr->lv_allocated_snapshot_le;
 
-				if ((ret = lvm_snapshot_alloc(lv_ptr)) !=
-				    0) {
+				if ((ret =
+				     lvm_snapshot_alloc(lv_ptr)) != 0) {
 					vfree(lv_ptr->lv_block_exception);
 					kfree(lv_ptr);
 					vg_ptr->lv[l] = NULL;
@@ -2177,8 +2248,12 @@
 	if (lv_ptr->lv_access & LV_SNAPSHOT) {
 		lv_t *org = lv_ptr->lv_snapshot_org, *last;
 
+		/* sync the original logical volume */
+		fsync_dev(org->lv_dev);
+#ifdef	LVM_VFS_ENHANCEMENT
 		/* VFS function call to sync and lock the filesystem */
 		fsync_dev_lockfs(org->lv_dev);
+#endif
 
 		down_write(&org->lv_lock);
 		org->lv_access |= LV_SNAPSHOT_ORG;
@@ -2204,9 +2279,11 @@
 	else
 		set_device_ro(lv_ptr->lv_dev, 1);
 
+#ifdef	LVM_VFS_ENHANCEMENT
 /* VFS function call to unlock the filesystem */
 	if (lv_ptr->lv_access & LV_SNAPSHOT)
 		unlockfs(lv_ptr->lv_snapshot_org->lv_dev);
+#endif
 
 	lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de =
 	    lvm_fs_create_lv(vg_ptr, lv_ptr);
@@ -2223,6 +2300,9 @@
 	vg_t *vg_ptr = vg[VG_CHR(minor)];
 	lv_t *lv_ptr;
 
+	if (!vg_ptr)
+		return -ENXIO;
+
 	if (l == -1) {
 		for (l = 0; l < vg_ptr->lv_max; l++) {
 			if (vg_ptr->lv[l] != NULL &&
@@ -2408,7 +2488,7 @@
 		}
 	}
 
-	/* save availiable i/o statistic data */
+	/* save available i/o statistic data */
 	if (old_lv->lv_stripes < 2) {	/* linear logical volume */
 		end = min(old_lv->lv_current_le, new_lv->lv_current_le);
 		for (l = 0; l < end; l++) {
@@ -2454,12 +2534,16 @@
 	lv_t *old_lv;
 	pe_t *pe;
 
+	if (!vg_ptr)
+		return -ENXIO;
+
 	if ((pe = new_lv->lv_current_pe) == NULL)
 		return -EINVAL;
 
 	for (l = 0; l < vg_ptr->lv_max; l++)
 		if (vg_ptr->lv[l]
-		    && !strcmp(vg_ptr->lv[l]->lv_name, lv_name)) break;
+		    && !strcmp(vg_ptr->lv[l]->lv_name, lv_name))
+			break;
 
 	if (l == vg_ptr->lv_max)
 		return -ENXIO;
@@ -2469,8 +2553,7 @@
 	if (old_lv->lv_access & LV_SNAPSHOT) {
 		/* only perform this operation on active snapshots */
 		if (old_lv->lv_status & LV_ACTIVE)
-			r =
-			    __extend_reduce_snapshot(vg_ptr, old_lv,
+			r = __extend_reduce_snapshot(vg_ptr, old_lv,
 						     new_lv);
 		else
 			r = -EPERM;
@@ -2481,7 +2564,7 @@
 	if (r)
 		return r;
 
-	/* copy relevent fields */
+	/* copy relevant fields */
 	down_write(&old_lv->lv_lock);
 
 	if (new_lv->lv_access & LV_SNAPSHOT) {
@@ -2490,6 +2573,8 @@
 		size *= sizeof(lv_block_exception_t);
 		memcpy(new_lv->lv_block_exception,
 		       old_lv->lv_block_exception, size);
+		vfree(old_lv->lv_block_exception);
+		vfree(old_lv->lv_snapshot_hash_table);
 
 		old_lv->lv_remap_end = new_lv->lv_remap_end;
 		old_lv->lv_block_exception = new_lv->lv_block_exception;
@@ -2500,15 +2585,16 @@
 		old_lv->lv_snapshot_hash_mask =
 		    new_lv->lv_snapshot_hash_mask;
 
-		for (e = 0; e < new_lv->lv_remap_ptr; e++)
+		for (e = 0; e < old_lv->lv_remap_ptr; e++)
 			lvm_hash_link(new_lv->lv_block_exception + e,
 				      new_lv->lv_block_exception[e].
 				      rdev_org,
 				      new_lv->lv_block_exception[e].
 				      rsector_org, new_lv);
 
+		vg_ptr->pe_allocated -= old_lv->lv_allocated_le;
+		vg_ptr->pe_allocated += new_lv->lv_allocated_le;
 	} else {
-
 		vfree(old_lv->lv_current_pe);
 		vfree(old_lv->lv_snapshot_hash_table);
 
@@ -2625,9 +2711,6 @@
 
 	if (lv_status_byindex_req.lv == NULL)
 		return -EINVAL;
-	if (lv_status_byindex_req.lv_index < 0 ||
-	    lv_status_byindex_req.lv_index >= MAX_LV)
-		return -EINVAL;
 	if ((lv_ptr = vg_ptr->lv[lv_status_byindex_req.lv_index]) == NULL)
 		return -ENXIO;
 
@@ -2642,12 +2725,14 @@
 		return -EFAULT;
 
 	if (copy_to_user(lv_status_byindex_req.lv, lv_ptr, sizeof(lv_t)) !=
-	    0) return -EFAULT;
+	    0)
+		return -EFAULT;
 	if (saved_ptr1 != NULL) {
 		if (copy_to_user(saved_ptr1,
 				 lv_ptr->lv_current_pe,
 				 lv_ptr->lv_allocated_le *
-				 sizeof(pe_t)) != 0) return -EFAULT;
+				 sizeof(pe_t)) != 0)
+			return -EFAULT;
 	}
 
 	/* Restore usermode pointers */
@@ -2699,12 +2784,14 @@
 		return -EFAULT;
 
 	if (copy_to_user(lv_status_bydev_req.lv, lv_ptr, sizeof(lv_t)) !=
-	    0) return -EFAULT;
+	    0)
+		return -EFAULT;
 	if (saved_ptr1 != NULL) {
 		if (copy_to_user(saved_ptr1,
 				 lv_ptr->lv_current_pe,
 				 lv_ptr->lv_allocated_le *
-				 sizeof(pe_t)) != 0) return -EFAULT;
+				 sizeof(pe_t)) != 0)
+			return -EFAULT;
 	}
 	/* Restore usermode pointers */
 	if (copy_to_user
@@ -2725,6 +2812,9 @@
 	int ret = 0;
 	lv_t *lv_ptr = NULL;
 
+	if (!vg_ptr)
+		return -ENXIO;
+
 	for (l = 0; l < vg_ptr->lv_max; l++) {
 		if ((lv_ptr = vg_ptr->lv[l]) == NULL)
 			continue;
@@ -2732,7 +2822,8 @@
 			lvm_fs_remove_lv(vg_ptr, lv_ptr);
 			strncpy(lv_ptr->lv_name, lv_req->lv_name,
 				NAME_LEN);
-			lvm_fs_create_lv(vg_ptr, lv_ptr);
+			lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de =
+				lvm_fs_create_lv(vg_ptr, lv_ptr);
 			break;
 		}
 	}
@@ -2755,7 +2846,8 @@
 	if (vg_ptr == NULL)
 		return -ENXIO;
 	if (copy_from_user(&pv_change_req, arg,
-			   sizeof(pv_change_req)) != 0) return -EFAULT;
+			   sizeof(pv_change_req)) != 0)
+		return -EFAULT;
 
 	for (p = 0; p < vg_ptr->pv_max; p++) {
 		pv_ptr = vg_ptr->pv[p];
@@ -2789,7 +2881,8 @@
 	if (vg_ptr == NULL)
 		return -ENXIO;
 	if (copy_from_user(&pv_status_req, arg,
-			   sizeof(pv_status_req)) != 0) return -EFAULT;
+			   sizeof(pv_status_req)) != 0)
+		return -EFAULT;
 
 	for (p = 0; p < vg_ptr->pv_max; p++) {
 		pv_ptr = vg_ptr->pv[p];
--- linux/drivers/md/lvm-internal.h.orig	Mon Mar 24 15:07:22 2003
+++ linux/drivers/md/lvm-internal.h	Tue Oct 28 12:43:03 2003
@@ -49,6 +49,10 @@
 extern vg_t *vg[];
 extern struct file_operations lvm_chr_fops;
 
+#ifndef	uchar
+typedef unsigned char uchar;
+#endif
+
 extern struct block_device_operations lvm_blk_dops;
 
 #define lvm_sectsize(dev) get_hardsect_size(dev)
@@ -85,24 +89,24 @@
 int lvm_get_blksize(kdev_t);
 int lvm_snapshot_alloc(lv_t *);
 int lvm_snapshot_fill_COW_page(vg_t *, lv_t *);
-int lvm_snapshot_COW(kdev_t, ulong, ulong, ulong, vg_t *vg, lv_t *);
+int lvm_snapshot_COW(kdev_t, ulong, ulong, ulong, vg_t * vg, lv_t *);
 int lvm_snapshot_remap_block(kdev_t *, ulong *, ulong, lv_t *);
 void lvm_snapshot_release(lv_t *);
 int lvm_write_COW_table_block(vg_t *, lv_t *);
 void lvm_hash_link(lv_block_exception_t *, kdev_t, ulong, lv_t *);
 int lvm_snapshot_alloc_hash_table(lv_t *);
-void lvm_drop_snapshot(vg_t *vg, lv_t *, const char *);
+void lvm_drop_snapshot(vg_t * vg, lv_t *, const char *);
 
 
 /* lvm_fs.c */
 void lvm_init_fs(void);
 void lvm_fin_fs(void);
 
-void lvm_fs_create_vg(vg_t *vg_ptr);
-void lvm_fs_remove_vg(vg_t *vg_ptr);
-devfs_handle_t lvm_fs_create_lv(vg_t *vg_ptr, lv_t *lv);
-void lvm_fs_remove_lv(vg_t *vg_ptr, lv_t *lv);
-void lvm_fs_create_pv(vg_t *vg_ptr, pv_t *pv);
-void lvm_fs_remove_pv(vg_t *vg_ptr, pv_t *pv);
+void lvm_fs_create_vg(vg_t * vg_ptr);
+void lvm_fs_remove_vg(vg_t * vg_ptr);
+devfs_handle_t lvm_fs_create_lv(vg_t * vg_ptr, lv_t * lv);
+void lvm_fs_remove_lv(vg_t * vg_ptr, lv_t * lv);
+void lvm_fs_create_pv(vg_t * vg_ptr, pv_t * pv);
+void lvm_fs_remove_pv(vg_t * vg_ptr, pv_t * pv);
 
 #endif
--- linux/drivers/md/lvm-snap.c.orig	Mon Mar 24 15:07:32 2003
+++ linux/drivers/md/lvm-snap.c	Tue Oct 28 12:43:03 2003
@@ -2,7 +2,7 @@
  * kernel/lvm-snap.c
  *
  * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
- *               2000 - 2001 Heinz Mauelshagen, Sistina Software
+ *               2000 - 2002 Heinz Mauelshagen, Sistina Software
  *
  * LVM snapshot driver is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -41,6 +41,9 @@
  *                 lvm_snapshot_fill_COW_table has a return value too.
  *    15/10/2001 - fix snapshot alignment problem [CM]
  *               - fix snapshot full oops (always check lv_block_exception) [CM]
+ *    26/06/2002 - support for new list_move macro [patch@luckynet.dynu.com]
+ *    26/07/2002 - removed conditional list_move macro because we will
+ *                 discontinue LVM1 before 2.6 anyway
  *
  */
 
@@ -56,7 +59,8 @@
 
 #include "lvm-internal.h"
 
-static char *lvm_snap_version __attribute__ ((unused)) = "LVM "LVM_RELEASE_NAME" snapshot code ("LVM_RELEASE_DATE")\n";
+static char *lvm_snap_version __attribute__ ((unused)) =
+    "LVM " LVM_RELEASE_NAME " snapshot code (" LVM_RELEASE_DATE ")\n";
 
 
 extern const char *const lvm_name;
@@ -64,19 +68,20 @@
 
 void lvm_snapshot_release(lv_t *);
 
-static int _write_COW_table_block(vg_t *vg, lv_t *lv, int idx,
+static int _write_COW_table_block(vg_t * vg, lv_t * lv, int idx,
 				  const char **reason);
-static void _disable_snapshot(vg_t *vg, lv_t *lv);
+static void _disable_snapshot(vg_t * vg, lv_t * lv);
 
 
 static inline int __brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
 			       kdev_t dev, unsigned long b[], int size,
-			       lv_t *lv) {
+			       lv_t * lv)
+{
 	return brw_kiovec(rw, nr, iovec, dev, b, size);
 }
 
 
-static int _pv_get_number(vg_t * vg, kdev_t rdev, uint *pvn)
+static int _pv_get_number(vg_t * vg, kdev_t rdev, uint * pvn)
 {
 	uint p;
 	for (p = 0; p < vg->pv_max; p++) {
@@ -103,29 +108,29 @@
 #define hashfn(dev,block,mask,chunk_size) \
 	((HASHDEV(dev)^((block)/(chunk_size))) & (mask))
 
-static inline lv_block_exception_t *
-lvm_find_exception_table(kdev_t org_dev, unsigned long org_start, lv_t * lv)
+static inline lv_block_exception_t *lvm_find_exception_table(kdev_t
+							     org_dev,
+							     unsigned long
+							     org_start,
+							     lv_t * lv)
 {
-	struct list_head * hash_table = lv->lv_snapshot_hash_table, * next;
+	struct list_head *hash_table = lv->lv_snapshot_hash_table, *next;
 	unsigned long mask = lv->lv_snapshot_hash_mask;
 	int chunk_size = lv->lv_chunk_size;
-	lv_block_exception_t * ret;
+	lv_block_exception_t *ret;
 	int i = 0;
 
-	if (!hash_table)
-		BUG() ;
-	hash_table = &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
+	hash_table =
+	    &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
 	ret = NULL;
-	for (next = hash_table->next; next != hash_table; next = next->next)
-	{
-		lv_block_exception_t * exception;
+	for (next = hash_table->next; next != hash_table;
+	     next = next->next) {
+		lv_block_exception_t *exception;
 
 		exception = list_entry(next, lv_block_exception_t, hash);
 		if (exception->rsector_org == org_start &&
-		    exception->rdev_org == org_dev)
-		{
-			if (i)
-			{
+		    exception->rdev_org == org_dev) {
+			if (i) {
 				/* fun, isn't it? :) */
 				list_del(next);
 				list_add(next, hash_table);
@@ -142,13 +147,14 @@
 			  kdev_t org_dev, unsigned long org_start,
 			  lv_t * lv)
 {
-	struct list_head * hash_table = lv->lv_snapshot_hash_table;
+	struct list_head *hash_table = lv->lv_snapshot_hash_table;
 	unsigned long mask = lv->lv_snapshot_hash_mask;
 	int chunk_size = lv->lv_chunk_size;
 
 	if (!hash_table)
 		BUG();
-	hash_table = &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
+	hash_table =
+	    &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
 	list_add(&exception->hash, hash_table);
 }
 
@@ -160,26 +166,25 @@
  *
  * We need to be holding at least a read lock on lv->lv_lock.
  */
-int lvm_snapshot_remap_block(kdev_t * org_dev, unsigned long * org_sector,
+int lvm_snapshot_remap_block(kdev_t * org_dev, unsigned long *org_sector,
 			     unsigned long pe_start, lv_t * lv)
 {
 	int ret;
 	unsigned long pe_off, pe_adjustment, __org_start;
 	kdev_t __org_dev;
 	int chunk_size = lv->lv_chunk_size;
-	lv_block_exception_t * exception;
+	lv_block_exception_t *exception;
 
 	if (!lv->lv_block_exception)
 		return -1;
 
 	pe_off = pe_start % chunk_size;
-	pe_adjustment = (*org_sector-pe_off) % chunk_size;
+	pe_adjustment = (*org_sector - pe_off) % chunk_size;
 	__org_start = *org_sector - pe_adjustment;
 	__org_dev = *org_dev;
 	ret = 0;
 	exception = lvm_find_exception_table(__org_dev, __org_start, lv);
-	if (exception)
-	{
+	if (exception) {
 		*org_dev = exception->rdev_new;
 		*org_sector = exception->rsector_new + pe_adjustment;
 		ret = 1;
@@ -187,7 +192,7 @@
 	return ret;
 }
 
-void lvm_drop_snapshot(vg_t *vg, lv_t *lv_snap, const char *reason)
+void lvm_drop_snapshot(vg_t * vg, lv_t * lv_snap, const char *reason)
 {
 	kdev_t last_dev;
 	int i;
@@ -200,7 +205,7 @@
 	_disable_snapshot(vg, lv_snap);
 
 	for (i = last_dev = 0; i < lv_snap->lv_remap_ptr; i++) {
-		if ( lv_snap->lv_block_exception[i].rdev_new != last_dev) {
+		if (lv_snap->lv_block_exception[i].rdev_new != last_dev) {
 			last_dev = lv_snap->lv_block_exception[i].rdev_new;
 			invalidate_buffers(last_dev);
 		}
@@ -211,14 +216,14 @@
 
 	printk(KERN_INFO
 	       "%s -- giving up to snapshot %s on %s: %s\n",
-	       lvm_name, lv_snap->lv_snapshot_org->lv_name, lv_snap->lv_name,
-	       reason);
+	       lvm_name, lv_snap->lv_snapshot_org->lv_name,
+	       lv_snap->lv_name, reason);
 }
 
 static inline int lvm_snapshot_prepare_blocks(unsigned long *blocks,
-					       unsigned long start,
-					       int nr_sectors,
-					       int blocksize)
+					      unsigned long start,
+					      int nr_sectors,
+					      int blocksize)
 {
 	int i, sectors_per_block, nr_blocks;
 
@@ -241,8 +246,7 @@
 	int correct_size = BLOCK_SIZE, i, major;
 
 	major = MAJOR(dev);
-	if (blksize_size[major])
-	{
+	if (blksize_size[major]) {
 		i = blksize_size[major][MINOR(dev)];
 		if (i)
 			correct_size = i;
@@ -251,10 +255,10 @@
 }
 
 #ifdef DEBUG_SNAPSHOT
-static inline void invalidate_snap_cache(unsigned long start, unsigned long nr,
-					 kdev_t dev)
+static inline void invalidate_snap_cache(unsigned long start,
+					 unsigned long nr, kdev_t dev)
 {
-	struct buffer_head * bh;
+	struct buffer_head *bh;
 	int sectors_per_block, i, blksize, minor;
 
 	minor = MINOR(dev);
@@ -263,8 +267,7 @@
 	nr /= sectors_per_block;
 	start /= sectors_per_block;
 
-	for (i = 0; i < nr; i++)
-	{
+	for (i = 0; i < nr; i++) {
 		bh = get_hash_table(dev, start++, blksize);
 		if (bh)
 			bforget(bh);
@@ -277,40 +280,44 @@
 {
 	int id = 0, is = lv_snap->lv_remap_ptr;
 	ulong blksize_snap;
-	lv_COW_table_disk_t * lv_COW_table = (lv_COW_table_disk_t *)
-		page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
+	lv_COW_table_disk_t *lv_COW_table = (lv_COW_table_disk_t *)
+	    page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
 
 	if (is == 0)
 		return 0;
 
 	is--;
 	blksize_snap =
-		lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
+	    lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
 	is -= is % (blksize_snap / sizeof(lv_COW_table_disk_t));
 
 	memset(lv_COW_table, 0, blksize_snap);
-	for ( ; is < lv_snap->lv_remap_ptr; is++, id++) {
+	for (; is < lv_snap->lv_remap_ptr; is++, id++) {
 		/* store new COW_table entry */
-		lv_block_exception_t *be = lv_snap->lv_block_exception + is;
+		lv_block_exception_t *be =
+		    lv_snap->lv_block_exception + is;
 		uint pvn;
 
 		if (_pv_get_number(vg, be->rdev_org, &pvn))
 			goto bad;
 
 		lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
-		lv_COW_table[id].pv_org_rsector = cpu_to_le64(be->rsector_org);
+		lv_COW_table[id].pv_org_rsector =
+		    cpu_to_le64(be->rsector_org);
 
 		if (_pv_get_number(vg, be->rdev_new, &pvn))
 			goto bad;
 
 		lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
-		lv_COW_table[id].pv_snap_rsector = cpu_to_le64(be->rsector_new);
+		lv_COW_table[id].pv_snap_rsector =
+		    cpu_to_le64(be->rsector_new);
 	}
 
 	return 0;
 
- bad:
-	printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", lvm_name);
+      bad:
+	printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed",
+	       lvm_name);
 	return -1;
 }
 
@@ -320,12 +327,12 @@
  *
  * We need to hold a write lock on lv_snap->lv_lock.
  */
-int lvm_write_COW_table_block(vg_t * vg, lv_t *lv_snap)
+int lvm_write_COW_table_block(vg_t * vg, lv_t * lv_snap)
 {
 	int r;
 	const char *err;
-	if((r = _write_COW_table_block(vg, lv_snap,
-				       lv_snap->lv_remap_ptr - 1, &err)))
+	if ((r = _write_COW_table_block(vg, lv_snap,
+					lv_snap->lv_remap_ptr - 1, &err)))
 		lvm_drop_snapshot(vg, lv_snap, err);
 	return r;
 }
@@ -346,14 +353,16 @@
 		     unsigned long org_phys_sector,
 		     unsigned long org_pe_start,
 		     unsigned long org_virt_sector,
-		     vg_t *vg, lv_t* lv_snap)
+		     vg_t * vg, lv_t * lv_snap)
 {
-	const char * reason;
-	unsigned long org_start, snap_start, snap_phys_dev, virt_start, pe_off;
+	const char *reason;
+	unsigned long org_start, snap_start, snap_phys_dev, virt_start,
+	    pe_off;
 	unsigned long phys_start;
-	int idx = lv_snap->lv_remap_ptr, chunk_size = lv_snap->lv_chunk_size;
-	struct kiobuf * iobuf = lv_snap->lv_iobuf;
-	unsigned long *blocks = iobuf->kio_blocks;
+	int idx = lv_snap->lv_remap_ptr, chunk_size =
+	    lv_snap->lv_chunk_size;
+	struct kiobuf *iobuf = lv_snap->lv_iobuf;
+	unsigned long *blocks = iobuf->blocks;
 	int blksize_snap, blksize_org, min_blksize, max_blksize;
 	int max_sectors, nr_sectors;
 
@@ -363,7 +372,8 @@
 
 	/* calculate physical boundaries of source chunk */
 	pe_off = org_pe_start % chunk_size;
-	org_start = org_phys_sector - ((org_phys_sector-pe_off) % chunk_size);
+	org_start =
+	    org_phys_sector - ((org_phys_sector - pe_off) % chunk_size);
 	virt_start = org_virt_sector - (org_phys_sector - org_start);
 
 	/* calculate physical boundaries of destination chunk */
@@ -378,25 +388,22 @@
 	       lvm_name,
 	       kdevname(org_phys_dev), org_phys_sector, org_start,
 	       kdevname(snap_phys_dev), snap_start,
-	       chunk_size,
-	       org_pe_start, pe_off,
-	       org_virt_sector);
+	       chunk_size, org_pe_start, pe_off, org_virt_sector);
 #endif
 
 	blksize_org = lvm_sectsize(org_phys_dev);
 	blksize_snap = lvm_sectsize(snap_phys_dev);
 	max_blksize = max(blksize_org, blksize_snap);
 	min_blksize = min(blksize_org, blksize_snap);
-	max_sectors = KIO_MAX_SECTORS * (min_blksize>>9);
+	max_sectors = KIO_MAX_SECTORS * (min_blksize >> 9);
 
-	if (chunk_size % (max_blksize>>9))
+	if (chunk_size % (max_blksize >> 9))
 		goto fail_blksize;
 
 	/* Don't change org_start, we need it to fill in the exception table */
 	phys_start = org_start;
 
-	while (chunk_size)
-	{
+	while (chunk_size) {
 		nr_sectors = min(chunk_size, max_sectors);
 		chunk_size -= nr_sectors;
 
@@ -407,7 +414,8 @@
 			goto fail_prepare;
 
 		if (__brw_kiovec(READ, 1, &iobuf, org_phys_dev, blocks,
-				 blksize_org, lv_snap) != (nr_sectors<<9))
+				 blksize_org,
+				 lv_snap) != (nr_sectors << 9))
 			goto fail_raw_read;
 
 		if (!lvm_snapshot_prepare_blocks(blocks, snap_start,
@@ -415,7 +423,8 @@
 			goto fail_prepare;
 
 		if (__brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev, blocks,
-				 blksize_snap, lv_snap) != (nr_sectors<<9))
+				 blksize_snap,
+				 lv_snap) != (nr_sectors << 9))
 			goto fail_raw_write;
 
 		phys_start += nr_sectors;
@@ -437,53 +446,55 @@
 		      org_phys_dev, org_start, lv_snap);
 	lv_snap->lv_remap_ptr = idx + 1;
 	if (lv_snap->lv_snapshot_use_rate > 0) {
-		if (lv_snap->lv_remap_ptr * 100 / lv_snap->lv_remap_end >= lv_snap->lv_snapshot_use_rate)
+		if (lv_snap->lv_remap_ptr * 100 / lv_snap->lv_remap_end >=
+		    lv_snap->lv_snapshot_use_rate)
 			wake_up_interruptible(&lv_snap->lv_snapshot_wait);
 	}
 	return 0;
 
 	/* slow path */
-out:
+      out:
 	lvm_drop_snapshot(vg, lv_snap, reason);
 	return 1;
 
-fail_out_of_space:
+      fail_out_of_space:
 	reason = "out of space";
 	goto out;
-fail_raw_read:
+      fail_raw_read:
 	reason = "read error";
 	goto out;
-fail_raw_write:
+      fail_raw_write:
 	reason = "write error";
 	goto out;
-fail_blksize:
+      fail_blksize:
 	reason = "blocksize error";
 	goto out;
 
-fail_prepare:
+      fail_prepare:
 	reason = "couldn't prepare kiovec blocks "
-		"(start probably isn't block aligned)";
+	    "(start probably isn't block aligned)";
 	goto out;
 }
 
-int lvm_snapshot_alloc_iobuf_pages(struct kiobuf * iobuf, int sectors)
+int lvm_snapshot_alloc_iobuf_pages(struct kiobuf *iobuf, int sectors)
 {
 	int bytes, nr_pages, err, i;
 
 	bytes = sectors * SECTOR_SIZE;
 	nr_pages = (bytes + ~PAGE_MASK) >> PAGE_SHIFT;
 	err = expand_kiobuf(iobuf, nr_pages);
-	if (err) goto out;
+	if (err)
+		goto out;
 
 	err = -ENOMEM;
 	iobuf->locked = 1;
 	iobuf->nr_pages = 0;
-	for (i = 0; i < nr_pages; i++)
-	{
-		struct page * page;
+	for (i = 0; i < nr_pages; i++) {
+		struct page *page;
 
 		page = alloc_page(GFP_KERNEL);
-		if (!page) goto out;
+		if (!page)
+			goto out;
 
 		iobuf->maplist[i] = page;
 		LockPage(page);
@@ -493,7 +504,7 @@
 
 	err = 0;
 
-out:
+      out:
 	return err;
 }
 
@@ -513,13 +524,13 @@
 {
 	int err;
 	unsigned long buckets, max_buckets, size;
-	struct list_head * hash;
+	struct list_head *hash;
 
 	buckets = lv->lv_remap_end;
 	max_buckets = calc_max_buckets();
 	buckets = min(buckets, max_buckets);
-	while (buckets & (buckets-1))
-		buckets &= (buckets-1);
+	while (buckets & (buckets - 1))
+		buckets &= (buckets - 1);
 
 	size = buckets * sizeof(struct list_head);
 
@@ -531,11 +542,11 @@
 		goto out;
 	lv->lv_snapshot_hash_table_size = size;
 
-	lv->lv_snapshot_hash_mask = buckets-1;
+	lv->lv_snapshot_hash_mask = buckets - 1;
 	while (buckets--)
-		INIT_LIST_HEAD(hash+buckets);
+		INIT_LIST_HEAD(hash + buckets);
 	err = 0;
-out:
+      out:
 	return err;
 }
 
@@ -545,33 +556,39 @@
 
 	/* allocate kiovec to do chunk io */
 	ret = alloc_kiovec(1, &lv_snap->lv_iobuf);
-	if (ret) goto out;
+	if (ret)
+		goto out;
 
-	max_sectors = KIO_MAX_SECTORS << (PAGE_SHIFT-9);
+	max_sectors = KIO_MAX_SECTORS << (PAGE_SHIFT - 9);
 
-	ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_iobuf, max_sectors);
-	if (ret) goto out_free_kiovec;
+	ret =
+	    lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_iobuf, max_sectors);
+	if (ret)
+		goto out_free_kiovec;
 
 	/* allocate kiovec to do exception table io */
 	ret = alloc_kiovec(1, &lv_snap->lv_COW_table_iobuf);
-	if (ret) goto out_free_kiovec;
+	if (ret)
+		goto out_free_kiovec;
 
 	ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_COW_table_iobuf,
-					     PAGE_SIZE/SECTOR_SIZE);
-	if (ret) goto out_free_both_kiovecs;
+					     PAGE_SIZE / SECTOR_SIZE);
+	if (ret)
+		goto out_free_both_kiovecs;
 
 	ret = lvm_snapshot_alloc_hash_table(lv_snap);
-	if (ret) goto out_free_both_kiovecs;
+	if (ret)
+		goto out_free_both_kiovecs;
 
-out:
+      out:
 	return ret;
 
-out_free_both_kiovecs:
+      out_free_both_kiovecs:
 	unmap_kiobuf(lv_snap->lv_COW_table_iobuf);
 	free_kiovec(1, &lv_snap->lv_COW_table_iobuf);
 	lv_snap->lv_COW_table_iobuf = NULL;
 
-out_free_kiovec:
+      out_free_kiovec:
 	unmap_kiobuf(lv_snap->lv_iobuf);
 	free_kiovec(1, &lv_snap->lv_iobuf);
 	lv_snap->lv_iobuf = NULL;
@@ -582,27 +599,23 @@
 
 void lvm_snapshot_release(lv_t * lv)
 {
-	if (lv->lv_block_exception)
-	{
+	if (lv->lv_block_exception) {
 		vfree(lv->lv_block_exception);
 		lv->lv_block_exception = NULL;
 	}
-	if (lv->lv_snapshot_hash_table)
-	{
+	if (lv->lv_snapshot_hash_table) {
 		vfree(lv->lv_snapshot_hash_table);
 		lv->lv_snapshot_hash_table = NULL;
 		lv->lv_snapshot_hash_table_size = 0;
 	}
-	if (lv->lv_iobuf)
-	{
-	        kiobuf_wait_for_io(lv->lv_iobuf);
+	if (lv->lv_iobuf) {
+		kiobuf_wait_for_io(lv->lv_iobuf);
 		unmap_kiobuf(lv->lv_iobuf);
 		free_kiovec(1, &lv->lv_iobuf);
 		lv->lv_iobuf = NULL;
 	}
-	if (lv->lv_COW_table_iobuf)
-	{
-	        kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
+	if (lv->lv_COW_table_iobuf) {
+		kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
 		unmap_kiobuf(lv->lv_COW_table_iobuf);
 		free_kiovec(1, &lv->lv_COW_table_iobuf);
 		lv->lv_COW_table_iobuf = NULL;
@@ -610,55 +623,67 @@
 }
 
 
-static int _write_COW_table_block(vg_t *vg, lv_t *lv_snap,
-				  int idx, const char **reason) {
+static int _write_COW_table_block(vg_t * vg, lv_t * lv_snap,
+				  int idx, const char **reason)
+{
 	int blksize_snap;
 	int end_of_table;
 	int idx_COW_table;
 	uint pvn;
 	ulong snap_pe_start, COW_table_sector_offset,
-	      COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
+	    COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
 	ulong blocks[1];
 	kdev_t snap_phys_dev;
 	lv_block_exception_t *be;
 	struct kiobuf *COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
-	lv_COW_table_disk_t * lv_COW_table =
-	   ( lv_COW_table_disk_t *) page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
+	lv_COW_table_disk_t *lv_COW_table =
+	    (lv_COW_table_disk_t *) page_address(lv_snap->
+						 lv_COW_table_iobuf->
+						 maplist[0]);
 
 	COW_chunks_per_pe = LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg, lv_snap);
 	COW_entries_per_pe = LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg, lv_snap);
 
 	/* get physical addresse of destination chunk */
 	snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
-	snap_pe_start = lv_snap->lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->lv_chunk_size;
+	snap_pe_start =
+	    lv_snap->lv_block_exception[idx -
+					(idx %
+					 COW_entries_per_pe)].rsector_new -
+	    lv_snap->lv_chunk_size;
 
 	blksize_snap = lvm_sectsize(snap_phys_dev);
 
-        COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t);
-        idx_COW_table = idx % COW_entries_per_pe % COW_entries_per_block;
+	COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t);
+	idx_COW_table = idx % COW_entries_per_pe % COW_entries_per_block;
 
-	if ( idx_COW_table == 0) memset(lv_COW_table, 0, blksize_snap);
+	if (idx_COW_table == 0)
+		memset(lv_COW_table, 0, blksize_snap);
 
 	/* sector offset into the on disk COW table */
-	COW_table_sector_offset = (idx % COW_entries_per_pe) / (SECTOR_SIZE / sizeof(lv_COW_table_disk_t));
-
-        /* COW table block to write next */
-	blocks[0] = (snap_pe_start + COW_table_sector_offset) >> (blksize_snap >> 10);
+	COW_table_sector_offset =
+	    (idx % COW_entries_per_pe) / (SECTOR_SIZE /
+					  sizeof(lv_COW_table_disk_t));
+
+	/* COW table block to write next */
+	blocks[0] =
+	    (snap_pe_start +
+	     COW_table_sector_offset) >> (blksize_snap >> 10);
 
 	/* store new COW_table entry */
 	be = lv_snap->lv_block_exception + idx;
-	if(_pv_get_number(vg, be->rdev_org, &pvn))
+	if (_pv_get_number(vg, be->rdev_org, &pvn))
 		goto fail_pv_get_number;
 
 	lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
 	lv_COW_table[idx_COW_table].pv_org_rsector =
-		cpu_to_le64(be->rsector_org);
-	if(_pv_get_number(vg, snap_phys_dev, &pvn))
+	    cpu_to_le64(be->rsector_org);
+	if (_pv_get_number(vg, snap_phys_dev, &pvn))
 		goto fail_pv_get_number;
 
 	lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
 	lv_COW_table[idx_COW_table].pv_snap_rsector =
-		cpu_to_le64(be->rsector_new);
+	    cpu_to_le64(be->rsector_new);
 
 	COW_table_iobuf->length = blksize_snap;
 	/* COW_table_iobuf->nr_pages = 1; */
@@ -669,36 +694,42 @@
 
 	/* initialization of next COW exception table block with zeroes */
 	end_of_table = idx % COW_entries_per_pe == COW_entries_per_pe - 1;
-	if (idx_COW_table % COW_entries_per_block == COW_entries_per_block - 1 || end_of_table)
-	{
+	if (idx_COW_table % COW_entries_per_block ==
+	    COW_entries_per_block - 1 || end_of_table) {
 		/* don't go beyond the end */
-		if (idx + 1 >= lv_snap->lv_remap_end) goto out;
+		if (idx + 1 >= lv_snap->lv_remap_end)
+			goto out;
 
 		memset(lv_COW_table, 0, blksize_snap);
 
-		if (end_of_table)
-		{
+		if (end_of_table) {
 			idx++;
-			snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
-			snap_pe_start = lv_snap->lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->lv_chunk_size;
+			snap_phys_dev =
+			    lv_snap->lv_block_exception[idx].rdev_new;
+			snap_pe_start =
+			    lv_snap->lv_block_exception[idx -
+							(idx %
+							 COW_entries_per_pe)].
+			    rsector_new - lv_snap->lv_chunk_size;
 			blksize_snap = lvm_sectsize(snap_phys_dev);
 			blocks[0] = snap_pe_start >> (blksize_snap >> 10);
-		} else blocks[0]++;
+		} else
+			blocks[0]++;
 
 		if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
-                                 blocks, blksize_snap, lv_snap) !=
-                    blksize_snap)
+				 blocks, blksize_snap, lv_snap) !=
+		    blksize_snap)
 			goto fail_raw_write;
 	}
 
-out:
+      out:
 	return 0;
 
-fail_raw_write:
+      fail_raw_write:
 	*reason = "write error";
 	return 1;
 
-fail_pv_get_number:
+      fail_pv_get_number:
 	*reason = "_pv_get_number failed";
 	return 1;
 }
@@ -714,10 +745,12 @@
  * to activate the snapshot and prevent this from happening.
  */
 
-static void _disable_snapshot(vg_t *vg, lv_t *lv) {
+static void _disable_snapshot(vg_t * vg, lv_t * lv)
+{
 	const char *err;
-	lv->lv_block_exception[0].rsector_org = LVM_SNAPSHOT_DROPPED_SECTOR;
-	if(_write_COW_table_block(vg, lv, 0, &err) < 0) {
+	lv->lv_block_exception[0].rsector_org =
+	    LVM_SNAPSHOT_DROPPED_SECTOR;
+	if (_write_COW_table_block(vg, lv, 0, &err) < 0) {
 		printk(KERN_ERR "%s -- couldn't disable snapshot: %s\n",
 		       lvm_name, err);
 	}
--- linux/drivers/md/lvm-fs.c.orig	Mon Mar 24 15:07:22 2003
+++ linux/drivers/md/lvm-fs.c	Tue Oct 28 12:43:03 2003
@@ -1,9 +1,10 @@
 /*
  * kernel/lvm-fs.c
  *
- * Copyright (C) 2001 Sistina Software
+ * Copyright (C) 2001-2002 Sistina Software
  *
- * January-April 2001
+ * January-May,December 2001
+ * May 2002
  *
  * LVM driver is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -58,9 +59,9 @@
 static int _proc_read_global(char *page, char **start, off_t off,
 			     int count, int *eof, void *data);
 
-static int _vg_info(vg_t *vg_ptr, char *buf);
-static int _lv_info(vg_t *vg_ptr, lv_t *lv_ptr, char *buf);
-static int _pv_info(pv_t *pv_ptr, char *buf);
+static int _vg_info(vg_t * vg_ptr, char *buf);
+static int _lv_info(vg_t * vg_ptr, lv_t * lv_ptr, char *buf);
+static int _pv_info(pv_t * pv_ptr, char *buf);
 
 static void _show_uuid(const char *src, char *b, char *e);
 
@@ -77,81 +78,95 @@
 /* inline functions */
 
 /* public interface */
-void __init lvm_init_fs() {
+void __init lvm_init_fs()
+{
 	struct proc_dir_entry *pde;
 
 /* User-space has already registered this */
 #if 0
-	lvm_devfs_handle = devfs_register(
-		0 , "lvm", 0, LVM_CHAR_MAJOR, 0,
-		S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
-		&lvm_chr_fops, NULL);
+	lvm_devfs_handle = devfs_register(0, "lvm", 0, LVM_CHAR_MAJOR, 0,
+					  S_IFCHR | S_IRUSR | S_IWUSR |
+					  S_IRGRP, &lvm_chr_fops, NULL);
 #endif
 	lvm_proc_dir = create_proc_entry(LVM_DIR, S_IFDIR, &proc_root);
 	if (lvm_proc_dir) {
-		lvm_proc_vg_subdir = create_proc_entry(LVM_VG_SUBDIR, S_IFDIR,
-						       lvm_proc_dir);
+		lvm_proc_vg_subdir =
+		    create_proc_entry(LVM_VG_SUBDIR, S_IFDIR,
+				      lvm_proc_dir);
 		pde = create_proc_entry(LVM_GLOBAL, S_IFREG, lvm_proc_dir);
-		if ( pde != NULL) pde->read_proc = _proc_read_global;
+		if (pde != NULL)
+			pde->read_proc = _proc_read_global;
 	}
 }
 
-void lvm_fin_fs() {
+void lvm_fin_fs()
+{
 #if 0
-	devfs_unregister (lvm_devfs_handle);
+	devfs_unregister(lvm_devfs_handle);
 #endif
 	remove_proc_entry(LVM_GLOBAL, lvm_proc_dir);
 	remove_proc_entry(LVM_VG_SUBDIR, lvm_proc_dir);
 	remove_proc_entry(LVM_DIR, &proc_root);
 }
 
-void lvm_fs_create_vg(vg_t *vg_ptr) {
+void lvm_fs_create_vg(vg_t * vg_ptr)
+{
 	struct proc_dir_entry *pde;
 
+	if (!vg_ptr)
+		return;
+
 	vg_devfs_handle[vg_ptr->vg_number] =
-		devfs_mk_dir(0, vg_ptr->vg_name, NULL);
+	    devfs_mk_dir(0, vg_ptr->vg_name, NULL);
 
-	ch_devfs_handle[vg_ptr->vg_number] = devfs_register(
-		vg_devfs_handle[vg_ptr->vg_number] , "group",
-		DEVFS_FL_DEFAULT, LVM_CHAR_MAJOR, vg_ptr->vg_number,
-		S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
-		&lvm_chr_fops, NULL);
+	ch_devfs_handle[vg_ptr->vg_number] =
+	    devfs_register(vg_devfs_handle[vg_ptr->vg_number], "group",
+			   DEVFS_FL_DEFAULT, LVM_CHAR_MAJOR,
+			   vg_ptr->vg_number,
+			   S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
+			   &lvm_chr_fops, NULL);
 
 	vg_ptr->vg_dir_pde = create_proc_entry(vg_ptr->vg_name, S_IFDIR,
 					       lvm_proc_vg_subdir);
 
-	if((pde = create_proc_entry("group", S_IFREG, vg_ptr->vg_dir_pde))) {
+	if ((pde =
+	     create_proc_entry("group", S_IFREG, vg_ptr->vg_dir_pde))) {
 		pde->read_proc = _proc_read_vg;
 		pde->data = vg_ptr;
 	}
 
 	vg_ptr->lv_subdir_pde =
-		create_proc_entry(LVM_LV_SUBDIR, S_IFDIR, vg_ptr->vg_dir_pde);
+	    create_proc_entry(LVM_LV_SUBDIR, S_IFDIR, vg_ptr->vg_dir_pde);
 
 	vg_ptr->pv_subdir_pde =
-		create_proc_entry(LVM_PV_SUBDIR, S_IFDIR, vg_ptr->vg_dir_pde);
+	    create_proc_entry(LVM_PV_SUBDIR, S_IFDIR, vg_ptr->vg_dir_pde);
 }
 
-void lvm_fs_remove_vg(vg_t *vg_ptr) {
+void lvm_fs_remove_vg(vg_t * vg_ptr)
+{
 	int i;
 
+	if (!vg_ptr)
+		return;
+
 	devfs_unregister(ch_devfs_handle[vg_ptr->vg_number]);
 	ch_devfs_handle[vg_ptr->vg_number] = NULL;
 
-	/* remove pv's */
-	for(i = 0; i < vg_ptr->pv_max; i++)
-		if(vg_ptr->pv[i]) lvm_fs_remove_pv(vg_ptr, vg_ptr->pv[i]);
-
 	/* remove lv's */
-	for(i = 0; i < vg_ptr->lv_max; i++)
-		if(vg_ptr->lv[i]) lvm_fs_remove_lv(vg_ptr, vg_ptr->lv[i]);
-
+	for (i = 0; i < vg_ptr->lv_max; i++)
+		if (vg_ptr->lv[i])
+			lvm_fs_remove_lv(vg_ptr, vg_ptr->lv[i]);
 
 	/* must not remove directory before leaf nodes */
 	devfs_unregister(vg_devfs_handle[vg_ptr->vg_number]);
 	vg_devfs_handle[vg_ptr->vg_number] = NULL;
 
-	if(vg_ptr->vg_dir_pde) {
+	/* remove pv's */
+	for (i = 0; i < vg_ptr->pv_max; i++)
+		if (vg_ptr->pv[i])
+			lvm_fs_remove_pv(vg_ptr, vg_ptr->pv[i]);
+
+	if (vg_ptr->vg_dir_pde) {
 		remove_proc_entry(LVM_LV_SUBDIR, vg_ptr->vg_dir_pde);
 		vg_ptr->lv_subdir_pde = NULL;
 
@@ -166,73 +181,97 @@
 }
 
 
-static inline const char *_basename(const char *str) {
+static inline const char *_basename(const char *str)
+{
 	const char *name = strrchr(str, '/');
 	name = name ? name + 1 : str;
 	return name;
 }
 
-devfs_handle_t lvm_fs_create_lv(vg_t *vg_ptr, lv_t *lv) {
+devfs_handle_t lvm_fs_create_lv(vg_t * vg_ptr, lv_t * lv)
+{
 	struct proc_dir_entry *pde;
-	const char *name = _basename(lv->lv_name);
+	const char *name;
 
-	lv_devfs_handle[MINOR(lv->lv_dev)] = devfs_register(
-		vg_devfs_handle[vg_ptr->vg_number], name,
-		DEVFS_FL_DEFAULT, LVM_BLK_MAJOR, MINOR(lv->lv_dev),
-		S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
-		&lvm_blk_dops, NULL);
+	if (!vg_ptr || !lv)
+		return NULL;
 
-	if(vg_ptr->lv_subdir_pde &&
-	   (pde = create_proc_entry(name, S_IFREG, vg_ptr->lv_subdir_pde))) {
+	name = _basename(lv->lv_name);
+
+	lv_devfs_handle[MINOR(lv->lv_dev)] =
+	    devfs_register(vg_devfs_handle[vg_ptr->vg_number], name,
+			   DEVFS_FL_DEFAULT, LVM_BLK_MAJOR,
+			   MINOR(lv->lv_dev),
+			   S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
+			   &lvm_blk_dops, NULL);
+
+	if (vg_ptr->lv_subdir_pde &&
+	    (pde =
+	     create_proc_entry(name, S_IFREG, vg_ptr->lv_subdir_pde))) {
 		pde->read_proc = _proc_read_lv;
 		pde->data = lv;
 	}
 	return lv_devfs_handle[MINOR(lv->lv_dev)];
 }
 
-void lvm_fs_remove_lv(vg_t *vg_ptr, lv_t *lv) {
+void lvm_fs_remove_lv(vg_t * vg_ptr, lv_t * lv)
+{
+
+	if (!vg_ptr || !lv)
+		return;
+
 	devfs_unregister(lv_devfs_handle[MINOR(lv->lv_dev)]);
 	lv_devfs_handle[MINOR(lv->lv_dev)] = NULL;
 
-	if(vg_ptr->lv_subdir_pde) {
+	if (vg_ptr->lv_subdir_pde) {
 		const char *name = _basename(lv->lv_name);
 		remove_proc_entry(name, vg_ptr->lv_subdir_pde);
 	}
 }
 
 
-static inline void _make_pv_name(const char *src, char *b, char *e) {
+static inline void _make_pv_name(const char *src, char *b, char *e)
+{
 	int offset = strlen(LVM_DIR_PREFIX);
-	if(strncmp(src, LVM_DIR_PREFIX, offset))
+	if (strncmp(src, LVM_DIR_PREFIX, offset))
 		offset = 0;
 
 	e--;
 	src += offset;
-	while(*src && (b != e)) {
+	while (*src && (b != e)) {
 		*b++ = (*src == '/') ? '_' : *src;
 		src++;
 	}
 	*b = '\0';
 }
 
-void lvm_fs_create_pv(vg_t *vg_ptr, pv_t *pv) {
+void lvm_fs_create_pv(vg_t * vg_ptr, pv_t * pv)
+{
 	struct proc_dir_entry *pde;
 	char name[NAME_LEN];
 
-	if(!vg_ptr->pv_subdir_pde)
+	if (!vg_ptr || !pv)
+		return;
+
+	if (!vg_ptr->pv_subdir_pde)
 		return;
 
 	_make_pv_name(pv->pv_name, name, name + sizeof(name));
-	if((pde = create_proc_entry(name, S_IFREG, vg_ptr->pv_subdir_pde))) {
+	if ((pde =
+	     create_proc_entry(name, S_IFREG, vg_ptr->pv_subdir_pde))) {
 		pde->read_proc = _proc_read_pv;
 		pde->data = pv;
 	}
 }
 
-void lvm_fs_remove_pv(vg_t *vg_ptr, pv_t *pv) {
+void lvm_fs_remove_pv(vg_t * vg_ptr, pv_t * pv)
+{
 	char name[NAME_LEN];
 
-	if(!vg_ptr->pv_subdir_pde)
+	if (!vg_ptr || !pv)
+		return;
+
+	if (!vg_ptr->pv_subdir_pde)
 		return;
 
 	_make_pv_name(pv->pv_name, name, name + sizeof(name));
@@ -241,7 +280,8 @@
 
 
 static int _proc_read_vg(char *page, char **start, off_t off,
-			  int count, int *eof, void *data) {
+			 int count, int *eof, void *data)
+{
 	int sz = 0;
 	vg_t *vg_ptr = data;
 	char uuid[NAME_LEN];
@@ -258,9 +298,11 @@
 	sz += sprintf(page + sz, "PV max:       %u\n", vg_ptr->pv_max);
 	sz += sprintf(page + sz, "PV current:   %u\n", vg_ptr->pv_cur);
 	sz += sprintf(page + sz, "PV active:    %u\n", vg_ptr->pv_act);
-	sz += sprintf(page + sz, "PE size:      %u\n", vg_ptr->pe_size / 2);
+	sz +=
+	    sprintf(page + sz, "PE size:      %u\n", vg_ptr->pe_size / 2);
 	sz += sprintf(page + sz, "PE total:     %u\n", vg_ptr->pe_total);
-	sz += sprintf(page + sz, "PE allocated: %u\n", vg_ptr->pe_allocated);
+	sz +=
+	    sprintf(page + sz, "PE allocated: %u\n", vg_ptr->pe_allocated);
 
 	_show_uuid(vg_ptr->vg_uuid, uuid, uuid + sizeof(uuid));
 	sz += sprintf(page + sz, "uuid:         %s\n", uuid);
@@ -269,7 +311,8 @@
 }
 
 static int _proc_read_lv(char *page, char **start, off_t off,
-			  int count, int *eof, void *data) {
+			 int count, int *eof, void *data)
+{
 	int sz = 0;
 	lv_t *lv = data;
 
@@ -280,7 +323,7 @@
 	sz += sprintf(page + sz, "number:       %u\n", lv->lv_number);
 	sz += sprintf(page + sz, "open:         %u\n", lv->lv_open);
 	sz += sprintf(page + sz, "allocation:   %u\n", lv->lv_allocation);
-	if(lv->lv_stripes > 1) {
+	if (lv->lv_stripes > 1) {
 		sz += sprintf(page + sz, "stripes:      %u\n",
 			      lv->lv_stripes);
 		sz += sprintf(page + sz, "stripesize:   %u\n",
@@ -293,7 +336,8 @@
 }
 
 static int _proc_read_pv(char *page, char **start, off_t off,
-			 int count, int *eof, void *data) {
+			 int count, int *eof, void *data)
+{
 	int sz = 0;
 	pv_t *pv = data;
 	char uuid[NAME_LEN];
@@ -308,7 +352,7 @@
 	sz += sprintf(page + sz, "PE total:     %u\n", pv->pe_total);
 	sz += sprintf(page + sz, "PE allocated: %u\n", pv->pe_allocated);
 	sz += sprintf(page + sz, "device:       %02u:%02u\n",
-                      MAJOR(pv->pv_dev), MINOR(pv->pv_dev));
+		      MAJOR(pv->pv_dev), MINOR(pv->pv_dev));
 
 	_show_uuid(pv->pv_uuid, uuid, uuid + sizeof(uuid));
 	sz += sprintf(page + sz, "uuid:         %s\n", uuid);
@@ -316,13 +360,15 @@
 	return sz;
 }
 
-static int _proc_read_global(char *page, char **start, off_t pos, int count,
-			     int *eof, void *data) {
+static int _proc_read_global(char *page, char **start, off_t pos,
+			     int count, int *eof, void *data)
+{
 
 #define  LVM_PROC_BUF   ( i == 0 ? dummy_buf : &buf[sz])
 
-	int c, i, l, p, v, vg_counter, pv_counter, lv_counter, lv_open_counter,
-		lv_open_total, pe_t_bytes, hash_table_bytes, lv_block_exception_t_bytes, seconds;
+	int c, i, l, p, v, vg_counter, pv_counter, lv_counter,
+	    lv_open_counter, lv_open_total, pe_t_bytes, hash_table_bytes,
+	    lv_block_exception_t_bytes, seconds;
 	static off_t sz;
 	off_t sz_last;
 	static char *buf = NULL;
@@ -338,12 +384,12 @@
 	       lvm_name, pos, count);
 #endif
 
-	if(pos != 0 && buf != NULL)
+	if (pos != 0 && buf != NULL)
 		goto out;
 
-	sz_last = vg_counter = pv_counter = lv_counter = lv_open_counter = \
-		lv_open_total = pe_t_bytes = hash_table_bytes = \
-		lv_block_exception_t_bytes = 0;
+	sz_last = vg_counter = pv_counter = lv_counter = lv_open_counter =
+	    lv_open_total = pe_t_bytes = hash_table_bytes =
+	    lv_block_exception_t_bytes = 0;
 
 	/* get some statistics */
 	for (v = 0; v < ABS_MAX_VG; v++) {
@@ -353,14 +399,26 @@
 			lv_counter += vg_ptr->lv_cur;
 			if (vg_ptr->lv_cur > 0) {
 				for (l = 0; l < vg[v]->lv_max; l++) {
-					if ((lv_ptr = vg_ptr->lv[l]) != NULL) {
-						pe_t_bytes += lv_ptr->lv_allocated_le;
-						hash_table_bytes += lv_ptr->lv_snapshot_hash_table_size;
-						if (lv_ptr->lv_block_exception != NULL)
-							lv_block_exception_t_bytes += lv_ptr->lv_remap_end;
+					if ((lv_ptr =
+					     vg_ptr->lv[l]) != NULL) {
+						pe_t_bytes +=
+						    lv_ptr->
+						    lv_allocated_le;
+						hash_table_bytes +=
+						    lv_ptr->
+						    lv_snapshot_hash_table_size;
+						if (lv_ptr->
+						    lv_block_exception !=
+						    NULL)
+							lv_block_exception_t_bytes
+							    +=
+							    lv_ptr->
+							    lv_remap_end;
 						if (lv_ptr->lv_open > 0) {
 							lv_open_counter++;
-							lv_open_total += lv_ptr->lv_open;
+							lv_open_total +=
+							    lv_ptr->
+							    lv_open;
 						}
 					}
 				}
@@ -382,8 +440,7 @@
 	   2nd to fill the malloced buffer */
 	for (i = 0; i < 2; i++) {
 		sz = 0;
-		sz += sprintf(LVM_PROC_BUF,
-			      "LVM "
+		sz += sprintf(LVM_PROC_BUF, "LVM "
 #ifdef MODULE
 			      "module"
 #else
@@ -401,8 +458,7 @@
 			      lv_open_counter == 1 ? "" : "s");
 		if (lv_open_total > 0)
 			sz += sprintf(LVM_PROC_BUF,
-				      " %d times)\n",
-				      lv_open_total);
+				      " %d times)\n", lv_open_total);
 		else
 			sz += sprintf(LVM_PROC_BUF, ")");
 		sz += sprintf(LVM_PROC_BUF,
@@ -410,7 +466,8 @@
 			      vg_counter * sizeof(vg_t) +
 			      pv_counter * sizeof(pv_t) +
 			      lv_counter * sizeof(lv_t) +
-			      pe_t_bytes + hash_table_bytes + lv_block_exception_t_bytes + sz_last,
+			      pe_t_bytes + hash_table_bytes +
+			      lv_block_exception_t_bytes + sz_last,
 			      lvm_iop_version);
 
 		seconds = CURRENT_TIME - loadtime;
@@ -424,46 +481,70 @@
 		}
 		sz += sprintf(LVM_PROC_BUF, "%d:%02d:%02d active\n",
 			      (seconds % 86400) / 3600,
-			      (seconds % 3600) / 60,
-			      seconds % 60);
+			      (seconds % 3600) / 60, seconds % 60);
 
 		if (vg_counter > 0) {
 			for (v = 0; v < ABS_MAX_VG; v++) {
 				/* volume group */
 				if ((vg_ptr = vg[v]) != NULL) {
-					sz += _vg_info(vg_ptr, LVM_PROC_BUF);
+					sz +=
+					    _vg_info(vg_ptr, LVM_PROC_BUF);
 
 					/* physical volumes */
 					sz += sprintf(LVM_PROC_BUF,
 						      "\n  PV%s ",
-						      vg_ptr->pv_cur == 1 ? ": " : "s:");
+						      vg_ptr->pv_cur ==
+						      1 ? ": " : "s:");
 					c = 0;
-					for (p = 0; p < vg_ptr->pv_max; p++) {
-						if ((pv_ptr = vg_ptr->pv[p]) != NULL) {
-							sz += _pv_info(pv_ptr, LVM_PROC_BUF);
+					for (p = 0; p < vg_ptr->pv_max;
+					     p++) {
+						if ((pv_ptr =
+						     vg_ptr->pv[p]) !=
+						    NULL) {
+							sz +=
+							    _pv_info
+							    (pv_ptr,
+							     LVM_PROC_BUF);
 
 							c++;
-							if (c < vg_ptr->pv_cur)
-								sz += sprintf(LVM_PROC_BUF,
-									      "\n       ");
+							if (c <
+							    vg_ptr->pv_cur)
+								sz +=
+								    sprintf
+								    (LVM_PROC_BUF,
+								     "\n       ");
 						}
 					}
 
 					/* logical volumes */
 					sz += sprintf(LVM_PROC_BUF,
 						      "\n    LV%s ",
-						      vg_ptr->lv_cur == 1 ? ": " : "s:");
+						      vg_ptr->lv_cur ==
+						      1 ? ": " : "s:");
 					c = 0;
-					for (l = 0; l < vg_ptr->lv_max; l++) {
-						if ((lv_ptr = vg_ptr->lv[l]) != NULL) {
-							sz += _lv_info(vg_ptr, lv_ptr, LVM_PROC_BUF);
+					for (l = 0; l < vg_ptr->lv_max;
+					     l++) {
+						if ((lv_ptr =
+						     vg_ptr->lv[l]) !=
+						    NULL) {
+							sz +=
+							    _lv_info
+							    (vg_ptr,
+							     lv_ptr,
+							     LVM_PROC_BUF);
 							c++;
-							if (c < vg_ptr->lv_cur)
-								sz += sprintf(LVM_PROC_BUF,
-									      "\n         ");
+							if (c <
+							    vg_ptr->lv_cur)
+								sz +=
+								    sprintf
+								    (LVM_PROC_BUF,
+								     "\n         ");
 						}
 					}
-					if (vg_ptr->lv_cur == 0) sz += sprintf(LVM_PROC_BUF, "none");
+					if (vg_ptr->lv_cur == 0)
+						sz +=
+						    sprintf(LVM_PROC_BUF,
+							    "none");
 					sz += sprintf(LVM_PROC_BUF, "\n");
 				}
 			}
@@ -474,14 +555,15 @@
 			unlock_kernel();
 			if (buf == NULL) {
 				sz = 0;
-				return sprintf(page, "%s - vmalloc error at line %d\n",
+				return sprintf(page,
+					       "%s - vmalloc error at line %d\n",
 					       lvm_name, __LINE__);
 			}
 		}
 		sz_last = sz;
 	}
 
- out:
+      out:
 	if (pos > sz - 1) {
 		lock_kernel();
 		vfree(buf);
@@ -501,11 +583,13 @@
 /*
  * provide VG info for proc filesystem use (global)
  */
-static int _vg_info(vg_t *vg_ptr, char *buf) {
+static int _vg_info(vg_t * vg_ptr, char *buf)
+{
 	int sz = 0;
 	char inactive_flag = ' ';
 
-	if (!(vg_ptr->vg_status & VG_ACTIVE)) inactive_flag = 'I';
+	if (!(vg_ptr->vg_status & VG_ACTIVE))
+		inactive_flag = 'I';
 	sz = sprintf(buf,
 		     "\nVG: %c%s  [%d PV, %d LV/%d open] "
 		     " PE Size: %d KB\n"
@@ -516,13 +600,13 @@
 		     vg_ptr->pv_cur,
 		     vg_ptr->lv_cur,
 		     vg_ptr->lv_open,
-	     	     vg_ptr->pe_size >> 1,
+		     vg_ptr->pe_size >> 1,
 		     vg_ptr->pe_size * vg_ptr->pe_total >> 1,
 		     vg_ptr->pe_total,
 		     vg_ptr->pe_allocated * vg_ptr->pe_size >> 1,
-	     	     vg_ptr->pe_allocated,
+		     vg_ptr->pe_allocated,
 		     (vg_ptr->pe_total - vg_ptr->pe_allocated) *
-	     	     vg_ptr->pe_size >> 1,
+		     vg_ptr->pe_size >> 1,
 		     vg_ptr->pe_total - vg_ptr->pe_allocated);
 	return sz;
 }
@@ -531,10 +615,11 @@
 /*
  * provide LV info for proc filesystem use (global)
  */
-static int _lv_info(vg_t *vg_ptr, lv_t *lv_ptr, char *buf) {
+static int _lv_info(vg_t * vg_ptr, lv_t * lv_ptr, char *buf)
+{
 	int sz = 0;
 	char inactive_flag = 'A', allocation_flag = ' ',
-		stripes_flag = ' ', rw_flag = ' ', *basename;
+	    stripes_flag = ' ', rw_flag = ' ', *basename;
 
 	if (!(lv_ptr->lv_status & LV_ACTIVE))
 		inactive_flag = 'I';
@@ -547,35 +632,33 @@
 	stripes_flag = 'L';
 	if (lv_ptr->lv_stripes > 1)
 		stripes_flag = 'S';
-	sz += sprintf(buf+sz,
+	sz += sprintf(buf + sz,
 		      "[%c%c%c%c",
 		      inactive_flag,
-	 rw_flag,
-		      allocation_flag,
-		      stripes_flag);
+		      rw_flag, allocation_flag, stripes_flag);
 	if (lv_ptr->lv_stripes > 1)
-		sz += sprintf(buf+sz, "%-2d",
-			      lv_ptr->lv_stripes);
+		sz += sprintf(buf + sz, "%-2d", lv_ptr->lv_stripes);
 	else
-		sz += sprintf(buf+sz, "  ");
+		sz += sprintf(buf + sz, "  ");
 
 	/* FIXME: use _basename */
 	basename = strrchr(lv_ptr->lv_name, '/');
-	if ( basename == 0) basename = lv_ptr->lv_name;
-	else                basename++;
-	sz += sprintf(buf+sz, "] %-25s", basename);
+	if (basename == 0)
+		basename = lv_ptr->lv_name;
+	else
+		basename++;
+	sz += sprintf(buf + sz, "] %-25s", basename);
 	if (strlen(basename) > 25)
-		sz += sprintf(buf+sz,
+		sz += sprintf(buf + sz,
 			      "\n                              ");
-	sz += sprintf(buf+sz, "%9d /%-6d   ",
+	sz += sprintf(buf + sz, "%9d /%-6d   ",
 		      lv_ptr->lv_size >> 1,
 		      lv_ptr->lv_size / vg_ptr->pe_size);
 
 	if (lv_ptr->lv_open == 0)
-		sz += sprintf(buf+sz, "close");
+		sz += sprintf(buf + sz, "close");
 	else
-		sz += sprintf(buf+sz, "%dx open",
-			      lv_ptr->lv_open);
+		sz += sprintf(buf + sz, "%dx open", lv_ptr->lv_open);
 
 	return sz;
 }
@@ -584,7 +667,8 @@
 /*
  * provide PV info for proc filesystem use (global)
  */
-static int _pv_info(pv_t *pv, char *buf) {
+static int _pv_info(pv_t * pv, char *buf)
+{
 	int sz = 0;
 	char inactive_flag = 'A', allocation_flag = ' ';
 	char *pv_name = NULL;
@@ -594,9 +678,11 @@
 	allocation_flag = 'A';
 	if (!(pv->pv_allocatable & PV_ALLOCATABLE))
 		allocation_flag = 'N';
-	pv_name = strchr(pv->pv_name+1,'/');
-	if ( pv_name == 0) pv_name = pv->pv_name;
-	else               pv_name++;
+	pv_name = strchr(pv->pv_name + 1, '/');
+	if (pv_name == 0)
+		pv_name = pv->pv_name;
+	else
+		pv_name++;
 	sz = sprintf(buf,
 		     "[%c%c] %-21s %8d /%-6d  "
 		     "%8d /%-6d  %8d /%-6d",
@@ -608,17 +694,17 @@
 		     pv->pe_allocated * pv->pe_size >> 1,
 		     pv->pe_allocated,
 		     (pv->pe_total - pv->pe_allocated) *
-		     pv->pe_size >> 1,
-		     pv->pe_total - pv->pe_allocated);
+		     pv->pe_size >> 1, pv->pe_total - pv->pe_allocated);
 	return sz;
 }
 
-static void _show_uuid(const char *src, char *b, char *e) {
+static void _show_uuid(const char *src, char *b, char *e)
+{
 	int i;
 
 	e--;
-	for(i = 0; *src && (b != e); i++) {
-		if(i && !(i & 0x3))
+	for (i = 0; *src && (b != e); i++) {
+		if (i && !(i & 0x3))
 			*b++ = '-';
 		*b++ = *src++;
 	}

--- End Message ---

[Index of Archives]     [Gluster Users]     [Kernel Development]     [Linux Clusters]     [Device Mapper]     [Security]     [Bugtraq]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]

  Powered by Linux