From 97195d6b411fec8e33aa55b6a7c3dde7984d65ca Mon Sep 17 00:00:00 2001 From: Hans-Joachim Picht Date: Fri, 30 May 2008 10:03:24 +0200 Subject: [S390] fix sparsemem related compile error with allnoconfig on s390 On s390 make allnoconfig fails with the following build error: arch/s390/mm/init.c: In function 'show_mem': arch/s390/mm/init.c:55: error: implicit declaration of function 'pfn_valid' make[1]: *** [arch/s390/mm/init.o] Error 1 make: *** [arch/s390/mm] Error 2 This problem can by fixed ensuring that ARCH_SELECT_MEMORY_MODEL is always turned on. Signed-off-by: Hans-Joachim Picht Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1d035082e78e..93acb3c1859d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -308,6 +308,9 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y +config ARCH_SELECT_MEMORY_MODEL + def_bool y + source "mm/Kconfig" comment "I/O subsystem configuration" -- cgit v1.2.3 From 67060d9c1f5d91c917cc51bed464cb5638eaddbc Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 30 May 2008 10:03:27 +0200 Subject: [S390] Fix section mismatch warnings. This fixes the last remaining section mismatch warnings in s390 architecture code. It reveals also a real bug introduced by... me with git commit 2069e978d5a6e7b45d58027e3de7f879b8c5e488 ("[S390] sparsemem vmemmap: initialize memmap.") Calling the generic vmemmap_alloc_block() function to get initialized memory is a nice idea, however that function is __meminit annotated and therefore the function might be gone if we try to call it later. This can happen if a DCSS segment gets added. So basically revert the patch and clear the memmap explicitly to fix the original bug. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 2 +- arch/s390/mm/vmem.c | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1f4228948dc4..42b1d12ebb10 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1089,7 +1089,7 @@ out: #ifdef CONFIG_HOTPLUG_CPU -int smp_rescan_cpus(void) +int __ref smp_rescan_cpus(void) { cpumask_t newcpus; int cpu; diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ea2804808f39..f591188fa2c0 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -27,12 +27,19 @@ struct memory_segment { static LIST_HEAD(mem_segs); -static pud_t *vmem_pud_alloc(void) +static void __ref *vmem_alloc_pages(unsigned int order) +{ + if (slab_is_available()) + return (void *)__get_free_pages(GFP_KERNEL, order); + return alloc_bootmem_pages((1 << order) * PAGE_SIZE); +} + +static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; #ifdef CONFIG_64BIT - pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); + pud = vmem_alloc_pages(2); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); @@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void) return pud; } -static pmd_t *vmem_pmd_alloc(void) +static inline pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; #ifdef CONFIG_64BIT - pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); + pmd = vmem_alloc_pages(2); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); @@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) if (pte_none(*pt_dir)) { unsigned long new_page; - new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); + new_page =__pa(vmem_alloc_pages(0)); if (!new_page) goto out; pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); *pt_dir = pte; } } + memset(start, 0, nr * sizeof(struct page)); ret = 0; out: flush_tlb_kernel_range(start_addr, end_addr); -- cgit v1.2.3 From 1760537b69123905bf4f4b56f5746ae4547e9694 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Fri, 30 May 2008 10:03:28 +0200 Subject: [S390] appldata: prevent cpu hotplug when walking cpu_online_map. Use get_online_cpus() to prevent cpu hotplug in situations where for_each_online_cpu() is called. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/appldata/appldata_base.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 655d52543e2d..ad40729bec3d 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -130,6 +130,7 @@ static void appldata_work_fn(struct work_struct *work) P_DEBUG(" -= Work Queue =-\n"); i = 0; + get_online_cpus(); spin_lock(&appldata_ops_lock); list_for_each(lh, &appldata_ops_list) { ops = list_entry(lh, struct appldata_ops, list); @@ -140,6 +141,7 @@ static void appldata_work_fn(struct work_struct *work) } } spin_unlock(&appldata_ops_lock); + put_online_cpus(); } /* @@ -266,12 +268,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, len = *lenp; if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) return -EFAULT; + get_online_cpus(); spin_lock(&appldata_timer_lock); if (buf[0] == '1') __appldata_vtimer_setup(APPLDATA_ADD_TIMER); else if (buf[0] == '0') __appldata_vtimer_setup(APPLDATA_DEL_TIMER); spin_unlock(&appldata_timer_lock); + put_online_cpus(); out: *lenp = len; *ppos += len; @@ -314,10 +318,12 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, return -EINVAL; } + get_online_cpus(); spin_lock(&appldata_timer_lock); appldata_interval = interval; __appldata_vtimer_setup(APPLDATA_MOD_TIMER); spin_unlock(&appldata_timer_lock); + put_online_cpus(); P_INFO("Monitoring CPU interval set to %u milliseconds.\n", interval); @@ -556,8 +562,10 @@ static int __init appldata_init(void) return -ENOMEM; } + get_online_cpus(); for_each_online_cpu(i) appldata_online_cpu(i); + put_online_cpus(); /* Register cpu hotplug notifier */ register_hotcpu_notifier(&appldata_nb); -- cgit v1.2.3 From c1bb7f31eaef6ed6b9f895b99d9ea12e6b853606 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 30 May 2008 10:03:29 +0200 Subject: [S390] showmem: Only walk spanned pages. Convert show_mem() so its nearly the same as on x86/powerpc. Gives us proper locking and we get also rid of the only use of max_mapnr. Also the number of pages was contained in an int which might not be sufficient not too far in the future. Cc: Johannes Weiner Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/init.c | 49 +++++++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 26 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 29f3a63806b9..05598649b326 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); void show_mem(void) { - int i, total = 0, reserved = 0; - int shared = 0, cached = 0; + unsigned long i, total = 0, reserved = 0; + unsigned long shared = 0, cached = 0; + unsigned long flags; struct page *page; + pg_data_t *pgdat; printk("Mem-info:\n"); show_free_areas(); - i = max_mapnr; - while (i-- > 0) { - if (!pfn_valid(i)) - continue; - page = pfn_to_page(i); - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page) - 1; + for_each_online_pgdat(pgdat) { + pgdat_resize_lock(pgdat, &flags); + for (i = 0; i < pgdat->node_spanned_pages; i++) { + if (!pfn_valid(pgdat->node_start_pfn + i)) + continue; + page = pfn_to_page(pgdat->node_start_pfn + i); + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (page_count(page)) + shared += page_count(page) - 1; + } + pgdat_resize_unlock(pgdat, &flags); } - printk("%d pages of RAM\n", total); - printk("%d reserved pages\n", reserved); - printk("%d pages shared\n", shared); - printk("%d pages swap cached\n", cached); - - printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); - printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK)); - printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); - printk("%lu pages slab\n", - global_page_state(NR_SLAB_RECLAIMABLE) + - global_page_state(NR_SLAB_UNRECLAIMABLE)); - printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE)); + printk("%ld pages of RAM\n", total); + printk("%ld reserved pages\n", reserved); + printk("%ld pages shared\n", shared); + printk("%ld pages swap cached\n", cached); } /* -- cgit v1.2.3 From 209fb9090f4ca5874289c9ca82991393f85c1eff Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 30 May 2008 10:03:34 +0200 Subject: [S390] disassembler: fix idte instruction format. The correct instruction format of idte is "idte r1,r3,r2" with r1 at bit 24, r3 at bit 16 and r2 at bit 28. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/dis.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index c14a336f6300..d2f270c995d9 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -208,7 +208,7 @@ static const unsigned char formats[][7] = { [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ - [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ + [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ -- cgit v1.2.3 From 0066ed55a9a061ed64bbc09c16f45daf0b976ac5 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 30 May 2008 10:03:35 +0200 Subject: [S390] Update default configuration. Signed-off-by: Martin Schwidefsky --- arch/s390/defconfig | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/defconfig b/arch/s390/defconfig index aa341d0ea1e6..c5cdb975d590 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.25 -# Wed Apr 30 11:07:45 2008 +# Linux kernel version: 2.6.26-rc4 +# Fri May 30 09:49:33 2008 # CONFIG_SCHED_MC=y CONFIG_MMU=y @@ -103,6 +103,7 @@ CONFIG_RT_MUTEXES=y # CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set CONFIG_MODVERSIONS=y @@ -173,6 +174,7 @@ CONFIG_PREEMPT=y # CONFIG_PREEMPT_RCU is not set CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_SELECT_MEMORY_MODEL=y # CONFIG_FLATMEM_MANUAL is not set # CONFIG_DISCONTIGMEM_MANUAL is not set @@ -210,6 +212,7 @@ CONFIG_FORCE_MAX_ZONEORDER=9 CONFIG_PFAULT=y # CONFIG_SHARED_KERNEL is not set # CONFIG_CMM is not set +# CONFIG_PAGE_STATES is not set CONFIG_VIRT_TIMER=y CONFIG_VIRT_CPU_ACCOUNTING=y # CONFIG_APPLDATA_BASE is not set @@ -620,6 +623,7 @@ CONFIG_S390_VMUR=m # # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set +CONFIG_ACCESSIBILITY=y # # File systems @@ -754,11 +758,12 @@ CONFIG_FRAME_WARN=2048 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y +# CONFIG_HEADERS_CHECK is not set CONFIG_DEBUG_KERNEL=y # CONFIG_SCHED_DEBUG is not set # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set # CONFIG_DEBUG_SLAB is not set CONFIG_DEBUG_PREEMPT=y # CONFIG_DEBUG_RT_MUTEXES is not set -- cgit v1.2.3 From b8cee18cc75d7b9dbe6c6526dfae9ab49e84fa95 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 21 May 2008 13:37:16 +0200 Subject: KVM: s390: use yield instead of schedule to implement diag 0x44 diag 0x44 is the common way on s390 to yield the cpu to the hypervisor. It is called by the guest in cpu_relax and in the spinlock code to yield to other guest cpus. This semantic is similar to yield. Lets replace the call to schedule with yield to make sure that current is really yielding. Signed-off-by: Christian Borntraeger Signed-off-by: Carsten Otte Signed-off-by: Avi Kivity --- arch/s390/kvm/diag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index f639a152869f..a0775e1f08df 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); vcpu->stat.diagnose_44++; vcpu_put(vcpu); - schedule(); + yield(); vcpu_load(vcpu); return 0; } -- cgit v1.2.3 From 74b6b522ec83f9c44fc7743f2adcb24664aa8f45 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 21 May 2008 13:37:29 +0200 Subject: KVM: s390: fix locking order problem in enable_sie There are potential locking problem in enable_sie. We take the task_lock and the mmap_sem. As exit_mm uses the same locks vice versa, this triggers a lockdep warning. The second problem is that dup_mm and mmput might sleep, so we must not hold the task_lock at that moment. The solution is to dup the mm unconditional and use the task_lock before and afterwards to check if we can use the new mm. dup_mm and mmput are called outside the task_lock, but we run update_mm while holding the task_lock, protection us against ptrace. Signed-off-by: Christian Borntraeger Signed-off-by: Carsten Otte Acked-by: Martin Schwidefsky Signed-off-by: Avi Kivity --- arch/s390/mm/pgtable.c | 44 +++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 17 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5c1aea97cd12..3d98ba82ea67 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) int s390_enable_sie(void) { struct task_struct *tsk = current; - struct mm_struct *mm; - int rc; + struct mm_struct *mm, *old_mm; - task_lock(tsk); - - rc = 0; + /* Do we have pgstes? if yes, we are done */ if (tsk->mm->context.pgstes) - goto unlock; + return 0; - rc = -EINVAL; + /* lets check if we are allowed to replace the mm */ + task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || - tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) - goto unlock; + tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { + task_unlock(tsk); + return -EINVAL; + } + task_unlock(tsk); - tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ + /* we copy the mm with pgstes enabled */ + tsk->mm->context.pgstes = 1; mm = dup_mm(tsk); tsk->mm->context.pgstes = 0; - - rc = -ENOMEM; if (!mm) - goto unlock; - mmput(tsk->mm); + return -ENOMEM; + + /* Now lets check again if somebody attached ptrace etc */ + task_lock(tsk); + if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || + tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { + mmput(mm); + task_unlock(tsk); + return -EINVAL; + } + + /* ok, we are alone. No ptrace, no threads, etc. */ + old_mm = tsk->mm; tsk->mm = tsk->active_mm = mm; preempt_disable(); update_mm(mm, tsk); cpu_set(smp_processor_id(), mm->cpu_vm_mask); preempt_enable(); - rc = 0; -unlock: task_unlock(tsk); - return rc; + mmput(old_mm); + return 0; } EXPORT_SYMBOL_GPL(s390_enable_sie); -- cgit v1.2.3 From 71cde5879f10b639506bc0b9f29a89f58b42a17e Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 21 May 2008 13:37:34 +0200 Subject: KVM: s390: handle machine checks when guest is running The low-level interrupt handler on s390 checks for _TIF_WORK_INT and exits the guest context, if work is pending. TIF_WORK_INT is defined as_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING. Currently the sie loop checks for signals and reschedule, but it does not check for machine checks. That means that we exit the guest context if a machine check is pending, but we do not handle the machine check. Signed-off-by: Christian Borntraeger CC: Heiko Carstens Signed-off-by: Carsten Otte Signed-off-by: Avi Kivity --- arch/s390/kvm/kvm-s390.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0ac36a649eba..40e4f2de7320 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return -EINVAL; /* not implemented yet */ } +extern void s390_handle_mcck(void); + static void __vcpu_run(struct kvm_vcpu *vcpu) { memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); @@ -430,6 +432,9 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) if (need_resched()) schedule(); + if (test_thread_flag(TIF_MCCK_PENDING)) + s390_handle_mcck(); + vcpu->arch.sie_block->icptcode = 0; local_irq_disable(); kvm_guest_enter(); -- cgit v1.2.3 From 0ff318674503ce3787ef62d84f4d948db204b268 Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Wed, 21 May 2008 13:37:37 +0200 Subject: KVM: s390: fix interrupt delivery The current code delivers pending interrupts before it checks for need_resched. On a busy host, this can lead to a longer interrupt latency if the interrupt is injected while the process is scheduled away. This patch moves delivering the interrupt _after_ schedule(), which makes more sense. Signed-off-by: Carsten Otte Signed-off-by: Avi Kivity --- arch/s390/kvm/kvm-s390.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 40e4f2de7320..ded27c7777cc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -435,6 +435,8 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) if (test_thread_flag(TIF_MCCK_PENDING)) s390_handle_mcck(); + kvm_s390_deliver_pending_interrupts(vcpu); + vcpu->arch.sie_block->icptcode = 0; local_irq_disable(); kvm_guest_enter(); @@ -480,7 +482,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) might_sleep(); do { - kvm_s390_deliver_pending_interrupts(vcpu); __vcpu_run(vcpu); rc = kvm_handle_sie_intercept(vcpu); } while (!signal_pending(current) && !rc); -- cgit v1.2.3 From 1f0d0f094df9a570dfc26d5eb825986b7e165e1d Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Wed, 21 May 2008 13:37:40 +0200 Subject: KVM: s390: Send program check on access error If the guest accesses non-existing memory, the sie64a function returns -EFAULT. We must check the return value and send a program check to the guest if the sie instruction faulted, otherwise the guest will loop at the faulting code. Signed-off-by: Christian Borntraeger Signed-off-by: Carsten Otte Signed-off-by: Avi Kivity --- arch/s390/kvm/kvm-s390.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ded27c7777cc..6558b09ff579 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -443,7 +443,10 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); - sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); + if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { + VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); + kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); local_irq_disable(); -- cgit v1.2.3 From e52b2af541bcb299212a63cfa3e3231618a415be Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Wed, 21 May 2008 13:37:44 +0200 Subject: KVM: s390: Fix race condition in kvm_s390_handle_wait The call to add_timer was issued before local_int.lock was taken and before timer_due was set to 0. If the timer expires before the lock is being taken, the timer function will set timer_due to 1 and exit before the vcpu falls asleep. Depending on other external events, the vcpu might sleep forever. This fix pulls setting timer_due to the beginning of the function before add_timer, which ensures correct behavior. Signed-off-by: Carsten Otte Signed-off-by: Avi Kivity --- arch/s390/kvm/interrupt.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index fcd1ed8015c1..84a7fed4cd4e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) if (kvm_cpu_has_interrupt(vcpu)) return 0; + __set_cpu_idle(vcpu); + spin_lock_bh(&vcpu->arch.local_int.lock); + vcpu->arch.local_int.timer_due = 0; + spin_unlock_bh(&vcpu->arch.local_int.lock); + if (psw_interrupts_disabled(vcpu)) { VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); __unset_cpu_idle(vcpu); @@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) no_timer: spin_lock_bh(&vcpu->arch.local_int.float_int->lock); spin_lock_bh(&vcpu->arch.local_int.lock); - __set_cpu_idle(vcpu); - vcpu->arch.local_int.timer_due = 0; add_wait_queue(&vcpu->arch.local_int.wq, &wait); while (list_empty(&vcpu->arch.local_int.list) && list_empty(&vcpu->arch.local_int.float_int->list) && -- cgit v1.2.3 From 148f1678f0ba7a5e79e44ff23064d4326fa145a4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 10 Jun 2008 10:03:18 +0200 Subject: [S390] sparsemem: use SPARSEMEM_STATIC if !64BIT. In case of !64BIT kernel we end up with a zero sized mem_section array. This happens because NR_MEM_SECTIONS is smaller than SECTIONS_PER_ROOT but we have: #define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) and struct mem_section *mem_section[NR_SECTION_ROOTS]; So fix this by selecting SPARSEMEM_STATIC which makes sure that SECTIONS_PER_ROOT is 1. Cc: Gerald Schaefer Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 93acb3c1859d..107e492cb47e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -304,6 +304,7 @@ config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_VMEMMAP_ENABLE select SPARSEMEM_VMEMMAP + select SPARSEMEM_STATIC if !64BIT config ARCH_SPARSEMEM_DEFAULT def_bool y -- cgit v1.2.3 From ee0ddadd086e25503f81be551c43f66472300acd Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 10 Jun 2008 10:03:20 +0200 Subject: [S390] vmemmap: fix off-by-one bug. If a memory range is supposed to be added to the 1:1 mapping and it ends just below the maximum supported physical address it won't succeed. This is because a test doesn't consider that the end address is 1 smaller than start + size. Fix the comparison. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/vmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index f591188fa2c0..e4868bfc672f 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -236,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg) { struct memory_segment *tmp; - if (seg->start + seg->size >= VMEM_MAX_PHYS || + if (seg->start + seg->size > VMEM_MAX_PHYS || seg->start + seg->size < seg->start) return -ERANGE; -- cgit v1.2.3 From 24d3e210c18bfedafe986ec489575cf91ac39d22 Mon Sep 17 00:00:00 2001 From: Segher Boessenkool Date: Tue, 10 Jun 2008 10:03:23 +0200 Subject: [S390] Fix build failure in __cpu_up() The first argument to __ctl_store() should be the array to store stuff in, not just the first element of that array. With the current code in __cpu_up(), mainline GCC dies with an internal compiler error. I didn't diagnose that further, but just fixed the kernel bug. Signed-off-by: Segher Boessenkool Signed-off-by: Martin Schwidefsky Cc: Heiko Carstens --- arch/s390/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 42b1d12ebb10..5d4fa4b1c74c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -711,7 +711,7 @@ int __cpuinit __cpu_up(unsigned int cpu) memset(sf, 0, sizeof(struct stack_frame)); sf->gprs[9] = (unsigned long) sf; cpu_lowcore->save_area[15] = (unsigned long) sf; - __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); + __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); asm volatile( " stam 0,15,0(%0)" : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); -- cgit v1.2.3