diff -urN linux/arch/ppc/kernel/entry.S bk/arch/ppc/kernel/entry.S --- linux/arch/ppc/kernel/entry.S Thu Aug 10 14:06:27 2000 +++ bk/arch/ppc/kernel/entry.S Fri Aug 25 11:44:36 2000 @@ -85,7 +85,7 @@ beq- 10f cmpi 0,r0,0x6666 /* Special case for 'sys_rt_sigreturn' */ beq- 16f - lwz r10,TASK_FLAGS(r2) + lwz r10,TASK_PTRACE(r2) andi. r10,r10,PT_TRACESYS bne- 50f cmpli 0,r0,NR_syscalls diff -urN linux/arch/ppc/kernel/irq.c bk/arch/ppc/kernel/irq.c --- linux/arch/ppc/kernel/irq.c Thu Aug 10 14:06:27 2000 +++ bk/arch/ppc/kernel/irq.c Tue Aug 29 09:48:33 2000 @@ -143,6 +143,8 @@ save_flags(flags); cli(); *p = action->next; + if (*p == NULL) + disable_irq(irq); restore_flags(flags); irq_kfree(action); return 0; @@ -300,7 +302,7 @@ } } -asmlinkage int do_IRQ(struct pt_regs *regs, int isfake) +int do_IRQ(struct pt_regs *regs, int isfake) { int cpu = smp_processor_id(); int irq; diff -urN linux/arch/ppc/kernel/misc.S bk/arch/ppc/kernel/misc.S --- linux/arch/ppc/kernel/misc.S Thu Aug 24 17:51:59 2000 +++ bk/arch/ppc/kernel/misc.S Tue Aug 29 09:48:33 2000 @@ -484,7 +484,7 @@ stwcx. r5,0,r3 /* Update with new value */ bne- 10b /* Retry if "reservation" (i.e. lock) lost */ cntlzw r3,r5 - srwi r3,r3,5 + srwi r3,r3,5 blr #endif /* 0 */ _GLOBAL(atomic_clear_mask) @@ -629,48 +629,59 @@ blr /* - * Extended precision shifts + * Extended precision shifts. + * + * Updated to be valid for shift counts from 0 to 63 inclusive. + * -- Gabriel * * R3/R4 has 64 bit value * R5 has shift count * result in R3/R4 * - * ashrdi3: XXXYYY/ZZZAAA -> SSSXXX/YYYZZZ - * ashldi3: XXXYYY/ZZZAAA -> YYYZZZ/AAA000 - * lshrdi3: XXXYYY/ZZZAAA -> 000XXX/YYYZZZ + * ashrdi3: arithmetic right shift (sign propagation) + * lshrdi3: logical right shift + * ashldi3: left shift */ _GLOBAL(__ashrdi3) - li r6,32 - sub r6,r6,r5 - slw r7,r3,r6 /* isolate YYY */ - srw r4,r4,r5 /* isolate ZZZ */ - or r4,r4,r7 /* YYYZZZ */ - sraw r3,r3,r5 /* SSSXXX */ + subfic r6,r5,32 + srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count + addi r7,r5,32 # could be xori, or addi with -32 + slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) + rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 + sraw r7,r3,r7 # t2 = MSW >> (count-32) + or r4,r4,r6 # LSW |= t1 + slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 + sraw r3,r3,r5 # MSW = MSW >> count + or r4,r4,r7 # LSW |= t2 blr _GLOBAL(__ashldi3) - li r6,32 - sub r6,r6,r5 - srw r7,r4,r6 /* isolate ZZZ */ - slw r4,r4,r5 /* AAA000 */ - slw r3,r3,r5 /* YYY--- */ - or r3,r3,r7 /* YYYZZZ */ + subfic r6,r5,32 + slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count + addi r7,r5,32 # could be xori, or addi with -32 + srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) + slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) + or r3,r3,r6 # MSW |= t1 + slw r4,r4,r5 # LSW = LSW << count + or r3,r3,r7 # MSW |= t2 blr _GLOBAL(__lshrdi3) - li r6,32 - sub r6,r6,r5 - slw r7,r3,r6 /* isolate YYY */ - srw r4,r4,r5 /* isolate ZZZ */ - or r4,r4,r7 /* YYYZZZ */ - srw r3,r3,r5 /* 000XXX */ + subfic r6,r5,32 + srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count + addi r7,r5,32 # could be xori, or addi with -32 + slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) + srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) + or r4,r4,r6 # LSW |= t1 + srw r3,r3,r5 # MSW = MSW >> count + or r4,r4,r7 # LSW |= t2 blr _GLOBAL(abs) - cmpi 0,r3,0 - bge 10f - neg r3,r3 -10: blr + srawi r4,r3,31 + xor r3,r3,r4 + sub r3,r3,r4 + blr _GLOBAL(_get_SP) mr r3,r1 /* Close enough */ @@ -1217,6 +1228,6 @@ .long sys_pciconfig_iobase /* 200 */ .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */ .long sys_getdents64 /* 202 */ - .rept NR_syscalls-201 + .rept NR_syscalls-(.-sys_call_table)/4 .long sys_ni_syscall .endr diff -urN linux/arch/ppc/kernel/process.c bk/arch/ppc/kernel/process.c --- linux/arch/ppc/kernel/process.c Tue Jun 27 14:52:23 2000 +++ bk/arch/ppc/kernel/process.c Tue Aug 15 16:16:13 2000 @@ -234,7 +234,6 @@ prev->thread.vrsave ) giveup_altivec(prev); #endif /* CONFIG_ALTIVEC */ - prev->last_processor = prev->processor; current_set[smp_processor_id()] = new; #endif /* CONFIG_SMP */ /* Avoid the trap. On smp this this never happens since @@ -266,7 +265,7 @@ last_task_used_altivec); #ifdef CONFIG_SMP - printk(" CPU: %d last CPU: %d", current->processor,current->last_processor); + printk(" CPU: %d", current->processor); #endif /* CONFIG_SMP */ printk("\n"); @@ -378,9 +377,6 @@ childregs->msr &= ~MSR_VEC; #endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SMP - p->last_processor = NO_PROC_ID; -#endif /* CONFIG_SMP */ return 0; } @@ -440,8 +436,8 @@ current->thread.fpscr = 0; } -asmlinkage int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6, - struct pt_regs *regs) +int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6, + struct pt_regs *regs) { unsigned long clone_flags = p1; int res; @@ -459,8 +455,8 @@ return res; } -asmlinkage int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, - struct pt_regs *regs) +int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, + struct pt_regs *regs) { int res; @@ -477,15 +473,15 @@ return res; } -asmlinkage int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, - struct pt_regs *regs) +int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, + struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs); } -asmlinkage int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, - unsigned long a3, unsigned long a4, unsigned long a5, - struct pt_regs *regs) +int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, unsigned long a5, + struct pt_regs *regs) { int error; char * filename; diff -urN linux/arch/ppc/kernel/signal.c bk/arch/ppc/kernel/signal.c --- linux/arch/ppc/kernel/signal.c Fri Jul 14 14:41:36 2000 +++ bk/arch/ppc/kernel/signal.c Tue Aug 15 16:16:13 2000 @@ -154,7 +154,7 @@ } -asmlinkage int +int sys_sigaltstack(const stack_t *uss, stack_t *uoss) { struct pt_regs *regs = (struct pt_regs *) &uss; @@ -232,7 +232,7 @@ * Each of these things must be a multiple of 16 bytes in size. * */ -asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) +int sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe *rt_sf; struct sigcontext_struct sigctx; @@ -301,7 +301,6 @@ return ret; badframe: - lock_kernel(); do_exit(SIGSEGV); } @@ -351,7 +350,6 @@ printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif - lock_kernel(); do_exit(SIGSEGV); } @@ -418,7 +416,6 @@ return ret; badframe: - lock_kernel(); do_exit(SIGSEGV); } @@ -460,7 +457,6 @@ printk("badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif - lock_kernel(); do_exit(SIGSEGV); } @@ -541,7 +537,6 @@ regs, frame, *newspp); printk("sc=%p sig=%d ka=%p info=%p oldset=%p\n", sc, sig, ka, info, oldset); #endif - lock_kernel(); do_exit(SIGSEGV); } @@ -645,7 +640,6 @@ /* FALLTHRU */ default: - lock_kernel(); sigaddset(¤t->signal, signr); recalc_sigpending(current); current->flags |= PF_SIGNALED; @@ -663,6 +657,7 @@ /* Whee! Actually deliver the signal. */ handle_signal(signr, ka, &info, oldset, regs, &newsp, frame); + break; } if (regs->trap == 0x0C00 /* System Call! */ && diff -urN linux/arch/ppc/lib/string.S bk/arch/ppc/lib/string.S --- linux/arch/ppc/lib/string.S Mon Feb 14 05:47:01 2000 +++ bk/arch/ppc/lib/string.S Sat Jul 29 17:59:38 2000 @@ -12,10 +12,67 @@ #include #include -CACHELINE_BYTES = 32 -LG_CACHELINE_BYTES = 5 -CACHELINE_MASK = 0x1f -CACHELINE_WORDS = 8 +#if defined(CONFIG_4xx) || defined(CONFIG_8xx) +#define CACHE_LINE_SIZE 16 +#define LG_CACHE_LINE_SIZE 4 +#elif !defined(CONFIG_PPC64BRIDGE) +#define CACHE_LINE_SIZE 32 +#define LG_CACHE_LINE_SIZE 5 +#else +#define CACHE_LINE_SIZE 128 +#define LG_CACHE_LINE_SIZE 7 +#endif /* CONFIG_4xx || CONFIG_8xx */ + +#define COPY_16_BYTES \ + lwz r7,4(r4); \ + lwz r8,8(r4); \ + lwz r9,12(r4); \ + lwzu r10,16(r4); \ + stw r7,4(r6); \ + stw r8,8(r6); \ + stw r9,12(r6); \ + stwu r10,16(r6) + +#define COPY_16_BYTES_WITHEX(n) \ +8 ## n ## 0: \ + lwz r7,4(r4); \ +8 ## n ## 1: \ + lwz r8,8(r4); \ +8 ## n ## 2: \ + lwz r9,12(r4); \ +8 ## n ## 3: \ + lwzu r10,16(r4); \ +8 ## n ## 4: \ + stw r7,4(r6); \ +8 ## n ## 5: \ + stw r8,8(r6); \ +8 ## n ## 6: \ + stw r9,12(r6); \ +8 ## n ## 7: \ + stwu r10,16(r6) + +#define COPY_16_BYTES_EXCODE(n) \ +9 ## n ## 0: \ + addi r5,r5,-(16 * n); \ + b 104f; \ +9 ## n ## 1: \ + addi r5,r5,-(16 * n); \ + b 105f; \ +.section __ex_table,"a"; \ + .align 2; \ + .long 8 ## n ## 0b,9 ## n ## 0b; \ + .long 8 ## n ## 1b,9 ## n ## 0b; \ + .long 8 ## n ## 2b,9 ## n ## 0b; \ + .long 8 ## n ## 3b,9 ## n ## 0b; \ + .long 8 ## n ## 4b,9 ## n ## 1b; \ + .long 8 ## n ## 5b,9 ## n ## 1b; \ + .long 8 ## n ## 6b,9 ## n ## 1b; \ + .long 8 ## n ## 7b,9 ## n ## 1b; \ +.text + +CACHELINE_BYTES = CACHE_LINE_SIZE +LG_CACHELINE_BYTES = LG_CACHE_LINE_SIZE +CACHELINE_MASK = (CACHE_LINE_SIZE-1) .globl strcpy strcpy: @@ -105,7 +162,14 @@ bdnz 4b 3: mtctr r9 li r7,4 +#if !defined(CONFIG_8xx) 10: dcbz r7,r6 +#else +10: stw r4, 4(r6) + stw r4, 8(r6) + stw r4, 12(r6) + stw r4, 16(r6) +#endif addi r6,r6,CACHELINE_BYTES bdnz 10b clrlwi r5,r8,32-LG_CACHELINE_BYTES @@ -202,23 +266,24 @@ li r11,4 mtctr r0 beq 63f -53: dcbz r11,r6 - lwz r7,4(r4) - lwz r8,8(r4) - lwz r9,12(r4) - lwzu r10,16(r4) - stw r7,4(r6) - stw r8,8(r6) - stw r9,12(r6) - stwu r10,16(r6) - lwz r7,4(r4) - lwz r8,8(r4) - lwz r9,12(r4) - lwzu r10,16(r4) - stw r7,4(r6) - stw r8,8(r6) - stw r9,12(r6) - stwu r10,16(r6) +53: +#if !defined(CONFIG_8xx) + dcbz r11,r6 +#endif + COPY_16_BYTES +#if CACHE_LINE_SIZE >= 32 + COPY_16_BYTES +#if CACHE_LINE_SIZE >= 64 + COPY_16_BYTES + COPY_16_BYTES +#if CACHE_LINE_SIZE >= 128 + COPY_16_BYTES + COPY_16_BYTES + COPY_16_BYTES + COPY_16_BYTES +#endif +#endif +#endif bdnz 53b 63: srwi. r0,r5,2 @@ -382,23 +447,34 @@ li r11,4 mtctr r0 beq 63f -53: dcbz r11,r6 -10: lwz r7,4(r4) -11: lwz r8,8(r4) -12: lwz r9,12(r4) -13: lwzu r10,16(r4) -14: stw r7,4(r6) -15: stw r8,8(r6) -16: stw r9,12(r6) -17: stwu r10,16(r6) -20: lwz r7,4(r4) -21: lwz r8,8(r4) -22: lwz r9,12(r4) -23: lwzu r10,16(r4) -24: stw r7,4(r6) -25: stw r8,8(r6) -26: stw r9,12(r6) -27: stwu r10,16(r6) +53: +#if !defined(CONFIG_8xx) + dcbz r11,r6 +#endif +/* had to move these to keep extable in order */ + .section __ex_table,"a" + .align 2 + .long 70b,100f + .long 71b,101f + .long 72b,102f + .long 73b,103f + .long 53b,105f + .text +/* the main body of the cacheline loop */ + COPY_16_BYTES_WITHEX(0) +#if CACHE_LINE_SIZE >= 32 + COPY_16_BYTES_WITHEX(1) +#if CACHE_LINE_SIZE >= 64 + COPY_16_BYTES_WITHEX(2) + COPY_16_BYTES_WITHEX(3) +#if CACHE_LINE_SIZE >= 128 + COPY_16_BYTES_WITHEX(4) + COPY_16_BYTES_WITHEX(5) + COPY_16_BYTES_WITHEX(6) + COPY_16_BYTES_WITHEX(7) +#endif +#endif +#endif bdnz 53b 63: srwi. r0,r5,2 @@ -434,15 +510,31 @@ 103: li r4,1 91: li r3,2 b 99f -/* read fault in 2nd half of cacheline loop */ -106: addi r5,r5,-16 -/* read fault in 1st half of cacheline loop */ + +/* + * this stuff handles faults in the cacheline loop and branches to either + * 104f (if in read part) or 105f (if in write part), after updating r5 + */ + COPY_16_BYTES_EXCODE(0) +#if CACHE_LINE_SIZE >= 32 + COPY_16_BYTES_EXCODE(1) +#if CACHE_LINE_SIZE >= 64 + COPY_16_BYTES_EXCODE(2) + COPY_16_BYTES_EXCODE(3) +#if CACHE_LINE_SIZE >= 128 + COPY_16_BYTES_EXCODE(4) + COPY_16_BYTES_EXCODE(5) + COPY_16_BYTES_EXCODE(6) + COPY_16_BYTES_EXCODE(7) +#endif +#endif +#endif + +/* read fault in cacheline loop */ 104: li r4,0 b 92f -/* write fault in 2nd half of cacheline loop */ -107: addi r5,r5,-16 /* fault on dcbz (effectively a write fault) */ -/* or write fault in 1st half of cacheline loop */ +/* or write fault in cacheline loop */ 105: li r4,1 92: li r3,LG_CACHELINE_BYTES b 99f @@ -485,36 +577,15 @@ bdnz 114b 120: blr -.section __ex_table,"a" + .section __ex_table,"a" .align 2 - .long 70b,100b - .long 71b,101b - .long 72b,102b - .long 73b,103b - .long 53b,105b - .long 10b,104b - .long 11b,104b - .long 12b,104b - .long 13b,104b - .long 14b,105b - .long 15b,105b - .long 16b,105b - .long 17b,105b - .long 20b,106b - .long 21b,106b - .long 22b,106b - .long 23b,106b - .long 24b,107b - .long 25b,107b - .long 26b,107b - .long 27b,107b .long 30b,108b .long 31b,109b .long 40b,110b .long 41b,111b .long 112b,120b .long 114b,120b -.text + .text .globl __clear_user __clear_user: @@ -546,12 +617,13 @@ blr 99: li r3,-EFAULT blr -.section __ex_table,"a" + + .section __ex_table,"a" .align 2 .long 11b,99b .long 1b,99b .long 8b,99b -.text + .text .globl __strncpy_from_user __strncpy_from_user: @@ -570,10 +642,11 @@ blr 99: li r3,-EFAULT blr -.section __ex_table,"a" + + .section __ex_table,"a" .align 2 .long 1b,99b -.text + .text /* r3 = str, r4 = len (> 0), r5 = top (highest addr) */ .globl __strnlen_user @@ -596,6 +669,7 @@ blr 99: li r3,0 /* bad address, return 0 */ blr -.section __ex_table,"a" + + .section __ex_table,"a" .align 2 .long 1b,99b diff -urN linux/arch/ppc/mm/init.c bk/arch/ppc/mm/init.c --- linux/arch/ppc/mm/init.c Thu Aug 10 14:06:27 2000 +++ bk/arch/ppc/mm/init.c Thu Aug 31 20:15:23 2000 @@ -516,6 +516,7 @@ flush_hash_segments(0xd, 0xffffff); #else __clear_user(Hash, Hash_size); + _tlbia(); #ifdef CONFIG_SMP smp_send_tlb_invalidate(0); #endif /* CONFIG_SMP */ diff -urN linux/include/asm-ppc/atomic.h bk/include/asm-ppc/atomic.h --- linux/include/asm-ppc/atomic.h Wed May 3 06:05:40 2000 +++ bk/include/asm-ppc/atomic.h Tue Aug 15 16:16:13 2000 @@ -21,7 +21,7 @@ extern void atomic_clear_mask(unsigned long mask, unsigned long *addr); extern void atomic_set_mask(unsigned long mask, unsigned long *addr); -extern __inline__ int atomic_add_return(int a, atomic_t *v) +extern __inline__ int atomic_add_return(int a, volatile atomic_t *v) { int t; @@ -37,7 +37,7 @@ return t; } -extern __inline__ int atomic_sub_return(int a, atomic_t *v) +extern __inline__ int atomic_sub_return(int a, volatile atomic_t *v) { int t; @@ -53,7 +53,7 @@ return t; } -extern __inline__ int atomic_inc_return(atomic_t *v) +extern __inline__ int atomic_inc_return(volatile atomic_t *v) { int t; @@ -69,7 +69,7 @@ return t; } -extern __inline__ int atomic_dec_return(atomic_t *v) +extern __inline__ int atomic_dec_return(volatile atomic_t *v) { int t; diff -urN linux/include/asm-ppc/bitops.h bk/include/asm-ppc/bitops.h --- linux/include/asm-ppc/bitops.h Tue Jul 11 12:08:12 2000 +++ bk/include/asm-ppc/bitops.h Fri Aug 18 19:14:12 2000 @@ -230,6 +230,8 @@ tmp = *p; found_first: tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ found_middle: return result + ffz(tmp); } @@ -320,6 +322,8 @@ tmp = cpu_to_le32p(p); found_first: tmp |= ~0U << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ found_middle: return result + ffz(tmp); } diff -urN linux/include/asm-ppc/fcntl.h bk/include/asm-ppc/fcntl.h --- linux/include/asm-ppc/fcntl.h Tue Jul 11 12:08:12 2000 +++ bk/include/asm-ppc/fcntl.h Tue Aug 15 16:16:13 2000 @@ -35,6 +35,10 @@ #define F_SETSIG 10 /* for sockets. */ #define F_GETSIG 11 /* for sockets. */ +#define F_GETLK64 12 /* using 'struct flock64' */ +#define F_SETLK64 13 +#define F_SETLKW64 14 + /* for F_[GET|SET]FL */ #define FD_CLOEXEC 1 /* actually anything with low bit set goes */ @@ -66,6 +70,14 @@ off_t l_start; off_t l_len; pid_t l_pid; +}; + +struct flock64 { + short l_type; + short l_whence; + loff_t l_start; + loff_t l_len; + pid_t l_pid; }; #endif diff -urN linux/include/asm-ppc/mman.h bk/include/asm-ppc/mman.h --- linux/include/asm-ppc/mman.h Wed Mar 22 05:49:32 2000 +++ bk/include/asm-ppc/mman.h Tue Aug 29 09:41:08 2000 @@ -22,8 +22,8 @@ #define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_SYNC 4 /* synchronous memory sync */ -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ +#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ +#define MCL_FUTURE 0x4000 /* lock all additions to address space */ #define MADV_NORMAL 0x0 /* default page-in behavior */ #define MADV_RANDOM 0x1 /* page-in minimum required */ diff -urN linux/include/asm-ppc/spinlock.h bk/include/asm-ppc/spinlock.h --- linux/include/asm-ppc/spinlock.h Mon Feb 14 05:47:01 2000 +++ bk/include/asm-ppc/spinlock.h Tue Aug 22 09:45:16 2000 @@ -41,6 +41,7 @@ } rwlock_t; #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } +#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) extern void _read_lock(rwlock_t *rw); extern void _read_unlock(rwlock_t *rw);