Index: linux-work/arch/powerpc/Kconfig
===================================================================
--- linux-work.orig/arch/powerpc/Kconfig	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/Kconfig	2006-01-10 16:29:05.000000000 +1100
@@ -167,6 +167,13 @@ config PHYS_64BIT
 
 	  If in doubt, say N here.
 
+config PPC_DYNAMIC_VSID
+       bool "Dynamic VSIDs"
+       depends on PPC64
+       default n
+       ---help---
+         If you don't know what that is, say N
+
 config ALTIVEC
 	bool "AltiVec Support"
 	depends on 6xx || POWER4
Index: linux-work/arch/powerpc/kernel/asm-offsets.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/asm-offsets.c	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/kernel/asm-offsets.c	2006-01-10 16:29:05.000000000 +1100
@@ -123,9 +123,10 @@ int main(void)
 	DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
 	DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
 	DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
-	DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
-#ifdef CONFIG_PPC_64K_PAGES
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 	DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
+#else
+	DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
 #endif
 #ifdef CONFIG_HUGETLB_PAGE
 	DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
Index: linux-work/arch/powerpc/kernel/head_64.S
===================================================================
--- linux-work.orig/arch/powerpc/kernel/head_64.S	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/kernel/head_64.S	2006-01-10 16:29:05.000000000 +1100
@@ -422,11 +422,11 @@ data_access_slb_pSeries:
 	mfspr	r3,SPRN_DAR
 	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
 	mfcr	r9
-#ifdef __DISABLED__
-	/* Keep that around for when we re-implement dynamic VSIDs */
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+	/* Go virtual for handling user addresses */
 	cmpdi	r3,0
 	bge	slb_miss_user_pseries
-#endif /* __DISABLED__ */
+#endif
 	std	r10,PACA_EXSLB+EX_R10(r13)
 	std	r11,PACA_EXSLB+EX_R11(r13)
 	std	r12,PACA_EXSLB+EX_R12(r13)
@@ -448,11 +448,11 @@ instruction_access_slb_pSeries:
 	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
 	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
 	mfcr	r9
-#ifdef __DISABLED__
-	/* Keep that around for when we re-implement dynamic VSIDs */
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+	/* Go virtual for handling user addresses */
 	cmpdi	r3,0
 	bge	slb_miss_user_pseries
-#endif /* __DISABLED__ */
+#endif
 	std	r10,PACA_EXSLB+EX_R10(r13)
 	std	r11,PACA_EXSLB+EX_R11(r13)
 	std	r12,PACA_EXSLB+EX_R12(r13)
@@ -523,15 +523,14 @@ _GLOBAL(do_stab_bolted_pSeries)
  * away from slb_miss_user_common to avoid problems with rfid
  *
  * This is used for when the SLB miss handler has to go virtual,
- * which doesn't happen for now anymore but will once we re-implement
- * dynamic VSIDs for shared page tables
+ * which happens when enabling dynamic VSIDs for shared page tables
  */
-#ifdef __DISABLED__
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 slb_miss_user_pseries:
 	std	r10,PACA_EXGEN+EX_R10(r13)
 	std	r11,PACA_EXGEN+EX_R11(r13)
 	std	r12,PACA_EXGEN+EX_R12(r13)
-	mfspr	r10,SPRG1
+	mfspr	r10,SPRN_SPRG1
 	ld	r11,PACA_EXSLB+EX_R9(r13)
 	ld	r12,PACA_EXSLB+EX_R3(r13)
 	std	r10,PACA_EXGEN+EX_R13(r13)
@@ -539,15 +538,15 @@ slb_miss_user_pseries:
 	std	r12,PACA_EXGEN+EX_R3(r13)
 	clrrdi	r12,r13,32
 	mfmsr	r10
-	mfspr	r11,SRR0			/* save SRR0 */
+	mfspr	r11,SPRN_SRR0			/* save SRR0 */
 	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
 	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
-	mtspr	SRR0,r12
-	mfspr	r12,SRR1			/* and SRR1 */
-	mtspr	SRR1,r10
+	mtspr	SPRN_SRR0,r12
+	mfspr	r12,SPRN_SRR1			/* and SRR1 */
+	mtspr	SPRN_SRR1,r10
 	rfid
 	b	.				/* prevent spec. execution */
-#endif /* __DISABLED__ */
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 /*
  * Vectors for the FWNMI option.  Share common code.
@@ -605,7 +604,8 @@ data_access_slb_iSeries:
 	mfspr	r3,SPRN_DAR
 	std	r9,PACA_EXSLB+EX_R9(r13)
 	mfcr	r9
-#ifdef __DISABLED__
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+	/* Go virtual for handling user addresses */
 	cmpdi	r3,0
 	bge	slb_miss_user_iseries
 #endif
@@ -627,7 +627,8 @@ instruction_access_slb_iSeries:
 	ld	r3,PACALPPACA+LPPACASRR0(r13)	/* get SRR0 value */
 	std	r9,PACA_EXSLB+EX_R9(r13)
 	mfcr	r9
-#ifdef __DISABLED__
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+	/* Go virtual for handling user addresses */
 	cmpdi	r3,0
 	bge	.slb_miss_user_iseries
 #endif
@@ -639,7 +640,7 @@ instruction_access_slb_iSeries:
 	ld	r12,PACALPPACA+LPPACASRR1(r13);
 	b	.slb_miss_realmode
 
-#ifdef __DISABLED__
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 slb_miss_user_iseries:
 	std	r10,PACA_EXGEN+EX_R10(r13)
 	std	r11,PACA_EXGEN+EX_R11(r13)
@@ -652,7 +653,7 @@ slb_miss_user_iseries:
 	std	r12,PACA_EXGEN+EX_R3(r13)
 	EXCEPTION_PROLOG_ISERIES_2
 	b	slb_miss_user_common
-#endif
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 	MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
 	STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
@@ -889,9 +890,9 @@ instruction_access_common:
 
 /*
  * Here is the common SLB miss user that is used when going to virtual
- * mode for SLB misses, that is currently not used
+ * mode for SLB misses.
  */
-#ifdef __DISABLED__
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 	.align	7
 	.globl	slb_miss_user_common
 slb_miss_user_common:
@@ -921,8 +922,8 @@ slb_miss_user_common:
 	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
 	mtmsrd	r10,1
 
-	mtspr	SRR0,r11
-	mtspr	SRR1,r12
+	mtspr	SPRN_SRR0,r11
+	mtspr	SPRN_SRR1,r12
 
 	ld	r9,PACA_EXGEN+EX_R9(r13)
 	ld	r10,PACA_EXGEN+EX_R10(r13)
@@ -948,7 +949,7 @@ unrecov_user_slb:
 	bl	.unrecoverable_exception
 	b	1b
 
-#endif /* __DISABLED__ */
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 
 /*
Index: linux-work/arch/powerpc/mm/fault.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/fault.c	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/mm/fault.c	2006-01-10 16:29:05.000000000 +1100
@@ -147,7 +147,7 @@ int __kprobes do_page_fault(struct pt_re
 				11, SIGSEGV) == NOTIFY_STOP)
 		return 0;
 
-	if (trap == 0x300) {
+	if (trap == 0x300 || trap == 0x380) {
 		if (debugger_fault_handler(regs))
 			return 0;
 	}
Index: linux-work/arch/powerpc/mm/hash_utils_64.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/hash_utils_64.c	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/mm/hash_utils_64.c	2006-01-10 16:29:05.000000000 +1100
@@ -574,7 +574,7 @@ int hash_page(unsigned long ea, unsigned
 			DBG_LOW(" user region with no mm !\n");
 			return 1;
 		}
-		vsid = get_vsid(mm->context.id, ea);
+		vsid = get_vsid(mm, ea);
 		break;
 	case VMALLOC_REGION_ID:
 		mm = &init_mm;
@@ -670,7 +670,7 @@ void hash_preload(struct mm_struct *mm, 
 	ptep = find_linux_pte(pgdir, ea);
 	if (!ptep)
 		return;
-	vsid = get_vsid(mm->context.id, ea);
+	vsid = get_vsid(mm, ea);
 
 	/* Hash it in */
 	local_irq_save(flags);
Index: linux-work/arch/powerpc/mm/init_64.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/init_64.c	2005-11-21 11:53:14.000000000 +1100
+++ linux-work/arch/powerpc/mm/init_64.c	2006-01-10 16:29:05.000000000 +1100
@@ -73,6 +73,7 @@
 #define DBG(fmt...)
 #endif
 
+#if 0 /* CHECK IF THOSE ARE STILL WORTH PORTING */
 #if PGTABLE_RANGE > USER_VSID_RANGE
 #warning Limited user VSID range means pagetable space is wasted
 #endif
@@ -80,6 +81,7 @@
 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
 #warning TASK_SIZE is smaller than it needs to be.
 #endif
+#endif
 
 /* max amount of RAM to use */
 unsigned long __max_memory;
Index: linux-work/arch/powerpc/mm/mmu_context_64.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/mmu_context_64.c	2005-11-01 14:13:52.000000000 +1100
+++ linux-work/arch/powerpc/mm/mmu_context_64.c	2006-01-10 16:29:05.000000000 +1100
@@ -22,6 +22,7 @@
 
 #include <asm/mmu_context.h>
 
+#ifndef CONFIG_PPC_DYNAMIC_VSID
 static DEFINE_SPINLOCK(mmu_context_lock);
 static DEFINE_IDR(mmu_context_idr);
 
@@ -61,3 +62,5 @@ void destroy_context(struct mm_struct *m
 
 	mm->context.id = NO_CONTEXT;
 }
+
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
Index: linux-work/arch/powerpc/mm/slb.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/slb.c	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/mm/slb.c	2006-01-10 16:30:03.000000000 +1100
@@ -31,15 +31,12 @@
 #endif
 
 extern void slb_allocate_realmode(unsigned long ea);
-extern void slb_allocate_user(unsigned long ea);
 
-static void slb_allocate(unsigned long ea)
-{
-	/* Currently, we do real mode for all SLBs including user, but
-	 * that will change if we bring back dynamic VSIDs
-	 */
-	slb_allocate_realmode(ea);
-}
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+extern void slb_allocate_user(unsigned long ea);
+#else
+#define slb_allocate_user slb_allocate_realmode
+#endif
 
 static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
 {
@@ -122,9 +119,9 @@ void switch_slb(struct task_struct *tsk,
 
 	get_paca()->slb_cache_ptr = 0;
 	get_paca()->context = mm->context;
-#ifdef CONFIG_PPC_64K_PAGES
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 	get_paca()->pgdir = mm->pgd;
-#endif /* CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 	/*
 	 * preload some userspace segments into the SLB.
Index: linux-work/arch/powerpc/mm/slb_low.S
===================================================================
--- linux-work.orig/arch/powerpc/mm/slb_low.S	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/mm/slb_low.S	2006-01-10 16:29:05.000000000 +1100
@@ -24,6 +24,7 @@
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm/pgtable.h>
+#include <asm/bug.h>
 
 /* void slb_allocate_realmode(unsigned long ea);
  *
@@ -69,7 +70,18 @@ _GLOBAL(slb_miss_kernel_load_virtual)
 	b	slb_finish_load
 
 
-0:	/* user address: proto-VSID = context << 15 | ESID. First check
+0:
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+
+	/* We should never have called this with a user address when
+	 * dynamic VSIDs are enabled
+	 */
+	BUG_OPCODE
+	blr
+
+#else /* CONFIG_PPC_DYNAMIC_VSID */
+
+	/* user address: proto-VSID = context << 15 | ESID. First check
 	 * if the address is within the boundaries of the user region
 	 */
 	srdi.	r9,r10,USER_ESID_BITS
@@ -111,7 +123,15 @@ _GLOBAL(slb_miss_user_load_normal)
 	li	r11,SLB_VSID_USER	/* flags don't much matter */
 	b	slb_finish_load
 
-#ifdef __DISABLED__
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
+
+
+/*
+ * Now the virtual mode implementation used when dynamic VSIDs are
+ * enabled
+ */
+
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 
 /* void slb_allocate_user(unsigned long ea);
  *
@@ -134,33 +154,71 @@ _GLOBAL(slb_allocate_user)
 	crnot	4*cr0+eq,4*cr0+eq
 	beqlr
 
-	/* now we need to get to the page tables in order to get the page
-	 * size encoding from the PMD. In the future, we'll be able to deal
-	 * with 1T segments too by getting the encoding from the PGD instead
-	 */
-	ld	r9,PACAPGDIR(r13)
-	cmpldi	cr0,r9,0
-	beqlr
-	rlwinm	r11,r10,8,25,28
-	ldx	r9,r9,r11		/* get pgd_t */
-	cmpldi	cr0,r9,0
-	beqlr
-	rlwinm	r11,r10,3,17,28
-	ldx	r9,r9,r11		/* get pmd_t */
+	/* Figure out if the segment contains huge pages */
+#ifdef CONFIG_HUGETLB_PAGE
+BEGIN_FTR_SECTION
+	b	1f
+END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
+	cmpldi	r10,16
+
+	lhz	r9,PACALOWHTLBAREAS(r13)
+	mr	r11,r10
+	blt	5f
+
+	lhz	r9,PACAHIGHHTLBAREAS(r13)
+	srdi	r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
+
+5:	srd	r9,r9,r11
+	andi.	r9,r9,1
+	beq	1f
+_GLOBAL(slb_miss_user_load_huge)
+	li	r11,0
+	b	2f
+1:
+#endif /* CONFIG_HUGETLB_PAGE */
+
+_GLOBAL(slb_miss_user_load_normal)
+	li	r11,0
+2:
+	/* 64k user address: proto-VSID = pte page address / PTE_TABLE_SIZE
+	 *  4k user address: proto-VSID = pmd page address / PMD_TABLE_SIZE
+	 *
+	 * 1) we get to pgd: (addr >> PGDIR_SHIFT) * sizeof(pgd_t)
+	 * that is (addr >> 35) << 3, or (addr >> 32) & ~7. r10 contains
+	 * addr >> 28, so we need to do (r10 >> 4) & ~7, which fits in a
+	 * 32 bits rlwinm
+	 * 2) we get to pud using back r3 as r10 was clobbered
+	 * 3) we convert to a proto-VSID by shifting right 10 bits, that
+	 *    is 7 bits + sizeof(pmd_t) to strip the 0 bits caused by the
+	 *    natural alignment of the pmd page
+ 	 */
+ 	ld	r9,PACAPGDIR(r13)
+ 	cmpldi	cr0,r9,0
+ 	beqlr
+
+	rlwinm	r10,r10,32-(PGDIR_SHIFT-28-3),(PGDIR_SHIFT-28-3),31-3
+	ldx	r9,r9,r10		/* get pgd_t */
 	cmpldi	cr0,r9,0
 	beqlr
 
-	/* build vsid flags */
-	andi.	r11,r9,SLB_VSID_LLP
-	ori	r11,r11,SLB_VSID_USER
-
-	/* get context to calculate proto-VSID */
-	ld	r9,PACACONTEXTID(r13)
-	rldimi	r10,r9,USER_ESID_BITS,0
+#ifdef CONFIG_PPC_64K_PAGES
+	srdi	r10,r3,28		/* re-get esid */
+	rlwinm	r10,r10,3,17,31-3
+	ldx	r9,r9,r10		/* get pmd_t */
+	srdi.	r10,r9,16		/* strip low '0' bits */
+#else
+	srdi	r10,r3,28		/* re-get esid */
+	rlwinm	r10,r10,3,22,28
+	ldx	r9,r9,r10		/* get pud_t */
+	srdi.	r10,r9,12		/* HACK strip low '0' bits */
 
-	/* fall through slb_finish_load */
+	/* WARNING ! We assume full pages are used at that level, make sure
+	 * allocation has a similar hack
+	 */
+#endif
+	beqlr
 
-#endif /* __DISABLED__ */
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 
 /*
Index: linux-work/arch/powerpc/mm/stab.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/stab.c	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/mm/stab.c	2006-01-10 16:29:05.000000000 +1100
@@ -128,7 +128,7 @@ static int __ste_allocate(unsigned long 
 		if ((ea >= TASK_SIZE_USER64) || (! mm))
 			return 1;
 
-		vsid = get_vsid(mm->context.id, ea);
+		vsid = get_vsid(mm, ea);
 	}
 
 	stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
@@ -200,9 +200,10 @@ void switch_stab(struct task_struct *tsk
 
 	__get_cpu_var(stab_cache_ptr) = 0;
 
-#ifdef CONFIG_PPC_64K_PAGES
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 	get_paca()->pgdir = mm->pgd;
-#endif /* CONFIG_PPC_64K_PAGES */
+	get_paca()->context = mm->context;
+#endif
 
 	/* Now preload some entries for the new task */
 	if (test_tsk_thread_flag(tsk, TIF_32BIT))
Index: linux-work/arch/powerpc/mm/tlb_64.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/tlb_64.c	2006-01-10 16:22:13.000000000 +1100
+++ linux-work/arch/powerpc/mm/tlb_64.c	2006-01-10 16:30:20.000000000 +1100
@@ -46,6 +46,10 @@ struct pte_freelist_batch
 {
 	struct rcu_head	rcu;
 	unsigned int	index;
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+	unsigned int	need_flush;
+	cpumask_t	cpus;
+#endif
 	pgtable_free_t	tables[0];
 };
 
@@ -56,12 +60,42 @@ unsigned long pte_freelist_forced_free;
 	((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
 	  / sizeof(pgtable_free_t))
 
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+static inline void pgtable_flush(pgtable_free_t pgf)
+{
+	unsigned long addr = pgf.val & PGF_ADDRESS_MASK;
+
+	if (pgf.val & PGF_NEED_FLUSH)
+		asm volatile("sync; slbie %0; isync" : : "r"
+			     ((addr & ~0xffffffful) | SLBIE_C));
+}
+#ifdef CONFIG_SMP
+static void pte_flush_smp_sync(void *arg)
+{
+	struct pte_freelist_batch *batch = arg;
+	unsigned int i;
+
+	for (i = 0; i < batch->index; i++)
+		pgtable_flush(batch->tables[i]);
+}
+#endif /* CONFIG_SMP */
+#else /* CONFIG_PPC_DYNAMIC_VSID */
+#define pgtable_flush(pgf) do { } while(0)
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
+
+
 #ifdef CONFIG_SMP
 static void pte_free_smp_sync(void *arg)
 {
-	/* Do nothing, just ensure we sync with all CPUs */
+	pgtable_free_t *pgf = arg;
+
+	pgtable_flush(*pgf);
+
+	/* Do nothing if not using dynamic VSIDs, just ensure we sync
+	 * with all CPUs
+	 */
 }
-#endif
+#endif /* CONFIG_SMP */
 
 /* This is only called when we are critically out of memory
  * (and fail to get a page in pte_free_tlb).
@@ -70,7 +104,7 @@ static void pgtable_free_now(pgtable_fre
 {
 	pte_freelist_forced_free++;
 
-	smp_call_function(pte_free_smp_sync, NULL, 0, 1);
+	smp_call_function(pte_free_smp_sync, &pgf, 0, 1);
 
 	pgtable_free(pgf);
 }
@@ -81,6 +115,11 @@ static void pte_free_rcu_callback(struct
 		container_of(head, struct pte_freelist_batch, rcu);
 	unsigned int i;
 
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+	if (batch->need_flush)
+		smp_call_function(pte_flush_smp_sync, batch, 0, 1);
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
+
 	for (i = 0; i < batch->index; i++)
 		pgtable_free(batch->tables[i]);
 
@@ -98,24 +137,38 @@ void pgtable_free_tlb(struct mmu_gather 
 	/* This is safe since tlb_gather_mmu has disabled preemption */
         cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
 	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+	struct pte_freelist_batch *batch;
 
 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
 	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
 		pgtable_free(pgf);
+		pgtable_flush(pgf);
 		return;
 	}
 
-	if (*batchp == NULL) {
-		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
-		if (*batchp == NULL) {
+	batch = *batchp;
+	if (batch == NULL) {
+		batch = (struct pte_freelist_batch *)
+			__get_free_page(GFP_ATOMIC);
+		if (batch == NULL) {
 			pgtable_free_now(pgf);
 			return;
 		}
-		(*batchp)->index = 0;
-	}
-	(*batchp)->tables[(*batchp)->index++] = pgf;
-	if ((*batchp)->index == PTE_FREELIST_SIZE) {
-		pte_free_submit(*batchp);
+		batch->index = 0;
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+		batch->need_flush = 0;
+		cpus_clear(batch->cpus);
+#endif /* CONFIG_PPC_DYNAMIC_VSIDS */
+		*batchp = batch;
+	}
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+	if (pgf.val & PGF_NEED_FLUSH)
+		batch->need_flush = 1;
+	cpus_or(batch->cpus, batch->cpus, tlb->mm->cpu_vm_mask);
+#endif /* CONFIG_PPC_DYNAMIC_VSIDS */
+	batch->tables[batch->index++] = pgf;
+	if (batch->index == PTE_FREELIST_SIZE) {
+		pte_free_submit(batch);
 		*batchp = NULL;
 	}
 }
@@ -169,7 +222,7 @@ void hpte_update(struct mm_struct *mm, u
 		batch->psize = psize;
 	}
 	if (addr < KERNELBASE) {
-		vsid = get_vsid(mm->context.id, addr);
+		vsid = get_vsid(mm, addr);
 		WARN_ON(vsid == 0);
 	} else
 		vsid = get_kernel_vsid(addr);
Index: linux-work/include/asm-powerpc/mmu.h
===================================================================
--- linux-work.orig/include/asm-powerpc/mmu.h	2006-01-10 16:22:27.000000000 +1100
+++ linux-work/include/asm-powerpc/mmu.h	2006-01-10 16:29:05.000000000 +1100
@@ -351,10 +351,13 @@ extern void stab_initialize(unsigned lon
 
 #ifndef __ASSEMBLY__
 
+
 typedef unsigned long mm_context_id_t;
 
 typedef struct {
+#ifndef CONFIG_PPC_DYNAMIC_VSID
 	mm_context_id_t id;
+#endif
 #ifdef CONFIG_HUGETLB_PAGE
 	u16 low_htlb_areas, high_htlb_areas;
 #endif
@@ -378,6 +381,8 @@ static inline unsigned long vsid_scrambl
 #endif /* 1 */
 }
 
+#ifndef CONFIG_PPC_DYNAMIC_VSID
+
 /* This is only valid for addresses >= KERNELBASE */
 static inline unsigned long get_kernel_vsid(unsigned long ea)
 {
@@ -385,11 +390,11 @@ static inline unsigned long get_kernel_v
 }
 
 /* This is only valid for user addresses (which are below 2^41) */
-static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
-{
-	return vsid_scramble((context << USER_ESID_BITS)
-			     | (ea >> SID_SHIFT));
-}
+#define get_vsid(mm, ea) \
+	vsid_scramble((((mm)->context.id) << USER_ESID_BITS) | \
+		      ((ea) >> SID_SHIFT))
+
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 #define VSID_SCRAMBLE(pvsid)	(((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
 #define KERNEL_VSID(ea)		VSID_SCRAMBLE(GET_ESID(ea))
Index: linux-work/include/asm-powerpc/mmu_context.h
===================================================================
--- linux-work.orig/include/asm-powerpc/mmu_context.h	2006-01-10 16:22:27.000000000 +1100
+++ linux-work/include/asm-powerpc/mmu_context.h	2006-01-10 16:29:05.000000000 +1100
@@ -26,16 +26,25 @@
 static inline void enter_lazy_tlb(struct mm_struct *mm,
 				  struct task_struct *tsk)
 {
-#ifdef CONFIG_PPC_64K_PAGES
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 	get_paca()->pgdir = NULL;
-#endif /* CONFIG_PPC_64K_PAGES */
+#endif
 }
 
 #define NO_CONTEXT	0
 #define MAX_CONTEXT	(0x100000-1)
 
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+static inline int init_new_context(struct task_struct *tsk,
+				   struct mm_struct *mm)
+{
+	return 0;
+}
+static inline void destroy_context(struct mm_struct *mm) {}
+#else /* CONFIG_PPC_DYNAMIC_VSID */
 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 extern void destroy_context(struct mm_struct *mm);
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
@@ -51,13 +60,13 @@ static inline void switch_mm(struct mm_s
 		cpu_set(smp_processor_id(), next->cpu_vm_mask);
 
 	/* No need to flush userspace segments if the mm doesnt change */
-#ifdef CONFIG_PPC_64K_PAGES
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 	if (prev == next && get_paca()->pgdir == next->pgd)
 		return;
-#else
+#else /* CONFIG_PPC_DYNAMIC_VSID */
 	if (prev == next)
 		return;
-#endif /* CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 #ifdef CONFIG_ALTIVEC
 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
Index: linux-work/include/asm-powerpc/paca.h
===================================================================
--- linux-work.orig/include/asm-powerpc/paca.h	2006-01-10 16:22:27.000000000 +1100
+++ linux-work/include/asm-powerpc/paca.h	2006-01-10 16:29:05.000000000 +1100
@@ -76,9 +76,9 @@ struct paca_struct {
 	u64 exmc[10];		/* used for machine checks */
 	u64 exslb[10];		/* used for SLB/segment table misses
  				 * on the linear mapping */
-#ifdef CONFIG_PPC_64K_PAGES
+#ifdef CONFIG_PPC_DYNAMIC_VSID
 	pgd_t *pgdir;
-#endif /* CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
 
 	mm_context_t context;
 	u16 slb_cache[SLB_CACHE_ENTRIES];
Index: linux-work/include/asm-powerpc/pgalloc.h
===================================================================
--- linux-work.orig/include/asm-powerpc/pgalloc.h	2006-01-10 16:22:27.000000000 +1100
+++ linux-work/include/asm-powerpc/pgalloc.h	2006-01-10 16:29:05.000000000 +1100
@@ -12,16 +12,33 @@
 
 extern kmem_cache_t *pgtable_cache[];
 
+#define PGF_CACHENUM_MASK	0x3ul
+#define PGF_FLAGS_MASK		0x4ul
+#define PGF_ADDRESS_MASK	(~(PGF_CACHENUM_MASK | PGF_FLAGS_MASK))
+#define PGF_NEED_FLUSH		0x4ul
+
 #ifdef CONFIG_PPC_64K_PAGES
+
 #define PTE_CACHE_NUM	0
 #define PMD_CACHE_NUM	1
 #define PGD_CACHE_NUM	2
-#else
+
+#define PTE_FREE_FLAGS	PGF_NEED_FLUSH
+#define PMD_FREE_FLAGS	0
+#define PUD_FREE_FLAGS	0
+
+#else /* CONFIG_PPC_64K_PAGES */
+
 #define PTE_CACHE_NUM	0
 #define PMD_CACHE_NUM	1
 #define PUD_CACHE_NUM	1
 #define PGD_CACHE_NUM	0
-#endif
+
+#define PTE_FREE_FLAGS	0
+#define PMD_FREE_FLAGS	PGF_NEED_FLUSH
+#define PUD_FREE_FLAGS	0
+
+#endif /* CONFIG_PPC_64K_PAGES */
 
 /*
  * This program is free software; you can redistribute it and/or
@@ -114,23 +131,19 @@ static inline void pte_free(struct page 
 	pte_free_kernel(page_address(ptepage));
 }
 
-#define PGF_CACHENUM_MASK	0xf
-
 typedef struct pgtable_free {
 	unsigned long val;
 } pgtable_free_t;
 
-static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
+static inline pgtable_free_t pgtable_free_cache(void *p, int flags,
 						unsigned long mask)
 {
-	BUG_ON(cachenum > PGF_CACHENUM_MASK);
-
-	return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
+	return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | flags};
 }
 
 static inline void pgtable_free(pgtable_free_t pgf)
 {
-	void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
+	void *p = (void *)(pgf.val & PGF_ADDRESS_MASK);
 	int cachenum = pgf.val & PGF_CACHENUM_MASK;
 
 	kmem_cache_free(pgtable_cache[cachenum], p);
@@ -140,14 +153,14 @@ extern void pgtable_free_tlb(struct mmu_
 
 #define __pte_free_tlb(tlb, ptepage)	\
 	pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
-		PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
+		PTE_CACHE_NUM | PTE_FREE_FLAGS, PTE_TABLE_SIZE-1))
 #define __pmd_free_tlb(tlb, pmd) 	\
 	pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
-		PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
+		PMD_CACHE_NUM | PMD_FREE_FLAGS, PMD_TABLE_SIZE-1))
 #ifndef CONFIG_PPC_64K_PAGES
 #define __pud_free_tlb(tlb, pmd)	\
 	pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
-		PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
+		PUD_CACHE_NUM | PUD_FREE_FLAGS, PUD_TABLE_SIZE-1))
 #endif /* CONFIG_PPC_64K_PAGES */
 
 #define check_pgt_cache()	do { } while (0)
Index: linux-work/include/asm-powerpc/pgtable-4k.h
===================================================================
--- linux-work.orig/include/asm-powerpc/pgtable-4k.h	2005-11-21 11:53:15.000000000 +1100
+++ linux-work/include/asm-powerpc/pgtable-4k.h	2006-01-10 16:29:05.000000000 +1100
@@ -9,7 +9,15 @@
 #define PGD_INDEX_SIZE  9
 
 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+/* Hack for shared page tables, we need the share point to match a
+ * struct page
+ */
+#define PMD_TABLE_SIZE	(PAGE_SIZE)
+#else /*CONFIG_PPC_DYNAMIC_VSID */
 #define PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
+#endif
+
 #define PUD_TABLE_SIZE	(sizeof(pud_t) << PUD_INDEX_SIZE)
 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
 
Index: linux-work/include/asm-powerpc/pgtable.h
===================================================================
--- linux-work.orig/include/asm-powerpc/pgtable.h	2006-01-10 16:22:27.000000000 +1100
+++ linux-work/include/asm-powerpc/pgtable.h	2006-01-10 16:29:05.000000000 +1100
@@ -516,6 +516,41 @@ void pgtable_cache_init(void);
 	return pt;
 }
 
+#ifdef CONFIG_PPC_DYNAMIC_VSID
+
+/* This is only valid for addresses >= KERNELBASE */
+static inline unsigned long get_kernel_vsid(unsigned long ea)
+{
+	return vsid_scramble(ea >> SID_SHIFT);
+}
+
+struct mm_struct;
+
+/* This is only valid for user addresses (which are below 2^41) */
+static inline unsigned long __get_vsid(pgd_t *pgd, unsigned long ea)
+{
+	pud_t *pud;
+#ifdef CONFIG_PPC_64K_PAGES
+	pmd_t *pmd;
+#endif
+	if (pgd_none(*pgd))
+		return 0;
+	pud = pud_offset(pgd, ea);
+	if (pud_none(*pud))
+		return 0;
+#ifdef CONFIG_PPC_64K_PAGES
+	pmd = pmd_offset(pud, ea);
+	if (pmd_none(*pmd))
+		return 0;
+	return vsid_scramble(pmd_val(*pmd) / PTE_TABLE_SIZE);
+#else
+	return vsid_scramble(pud_val(*pud) / PMD_TABLE_SIZE);
+#endif
+}
+#define get_vsid(mm, ea)	__get_vsid(pgd_offset((mm), (ea)),(ea))
+
+#endif /* CONFIG_PPC_DYNAMIC_VSID */
+
 #include <asm-generic/pgtable.h>
 
 #endif /* __ASSEMBLY__ */
