summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2011-03-18 18:06:08 -0400
committerLen Brown <len.brown@intel.com>2011-03-18 18:06:08 -0400
commit05534c9ffc9d5d950b14de8ba49a7609dc59b0b8 (patch)
tree65a01a1e0bc0e28c64fb5105cc763949f5412b4b
parentdd87cc53c42f3260b7eb7f60822de0fa9e58af59 (diff)
parent589c7a39ae2f2b74fd13ae344ca1dcca61da6bca (diff)
Merge branch 'acpica' into release
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c2
-rw-r--r--arch/arm/mach-omap2/mailbox.c2
-rw-r--r--arch/arm/mach-omap2/mux.c2
-rw-r--r--arch/arm/mach-omap2/pm-debug.c8
-rw-r--r--arch/arm/mach-omap2/prcm_mpu44xx.h4
-rw-r--r--arch/arm/mach-omap2/smartreflex.c4
-rw-r--r--arch/arm/mach-omap2/timer-gp.c13
-rw-r--r--arch/arm/plat-omap/mailbox.c11
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c14
-rw-r--r--arch/x86/kernel/apb_timer.c2
-rw-r--r--arch/x86/kernel/early-quirks.c16
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c8
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/acdispat.h38
-rw-r--r--drivers/acpi/acpica/acglobal.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/dsargs.c391
-rw-r--r--drivers/acpi/acpica/dscontrol.c410
-rw-r--r--drivers/acpi/acpica/dsopcode.c725
-rw-r--r--drivers/acpi/acpica/dswload.c670
-rw-r--r--drivers/acpi/acpica/dswload2.c720
-rw-r--r--drivers/acpi/acpica/evgpe.c9
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c34
-rw-r--r--drivers/acpi/acpica/exfldio.c4
-rw-r--r--drivers/acpi/acpica/tbfadt.c5
-rw-r--r--drivers/acpi/acpica/utdecode.c548
-rw-r--r--drivers/acpi/acpica/utglobal.c484
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c56
-rw-r--r--drivers/char/tpm/tpm.c10
-rw-r--r--drivers/gpu/drm/drm_irq.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/pn544.c4
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c12
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c14
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c40
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c1
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/aio.c52
-rw-r--r--fs/block_dev.c19
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/extent-tree.c9
-rw-r--r--fs/btrfs/extent_io.c138
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/inode.c126
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--fs/btrfs/lzo.c21
-rw-r--r--fs/btrfs/relocation.c13
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/eventpoll.c95
-rw-r--r--fs/fuse/dir.c7
-rw-r--r--fs/fuse/file.c52
-rw-r--r--fs/fuse/fuse_i.h6
-rw-r--r--fs/inode.c9
-rw-r--r--fs/internal.h2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ocfs2/journal.h6
-rw-r--r--fs/ocfs2/refcounttree.c7
-rw-r--r--fs/ocfs2/super.c28
-rw-r--r--fs/partitions/ldm.c5
-rw-r--r--include/acpi/acoutput.h12
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl.h16
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_wakeup.h25
-rw-r--r--include/linux/rio_regs.h4
-rw-r--r--include/linux/thermal.h8
-rw-r--r--kernel/time/tick-broadcast.c10
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-internal.h3
-rw-r--r--lib/swiotlb.c6
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/vmscan.c32
-rw-r--r--net/rxrpc/ar-key.c8
-rw-r--r--sound/core/jack.c1
-rw-r--r--sound/pci/hda/patch_conexant.c5
-rw-r--r--sound/pci/hda/patch_sigmatel.c15
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/soc/codecs/wm8903.h2
-rw-r--r--sound/soc/codecs/wm8994.c5
-rw-r--r--sound/soc/codecs/wm_hubs.c3
-rw-r--r--sound/soc/imx/eukrea-tlv320.c2
-rw-r--r--sound/soc/pxa/e740_wm9705.c4
-rw-r--r--sound/soc/pxa/e750_wm9705.c4
-rw-r--r--sound/soc/pxa/e800_wm9712.c4
-rw-r--r--sound/soc/pxa/em-x270.c4
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c4
-rw-r--r--sound/soc/pxa/palm27x.c4
-rw-r--r--sound/soc/pxa/tosa.c4
-rw-r--r--sound/soc/pxa/zylonite.c4
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/pcm.c7
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/perf/builtin-timechart.c6
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/svghelper.c6
139 files changed, 3208 insertions, 2283 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 6f99e1260db8..8afba6321e24 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1692,6 +1692,13 @@ M: Andy Whitcroft <apw@canonical.com>
S: Supported
F: scripts/checkpatch.pl
+CHINESE DOCUMENTATION
+M: Harry Wei <harryxiyou@gmail.com>
+L: xiyoulinuxkernelgroup@googlegroups.com
+L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/zh_CN/
+
CISCO VIC ETHERNET NIC DRIVER
M: Vasanthy Kolluri <vkolluri@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
@@ -5266,7 +5273,7 @@ S: Maintained
F: drivers/net/wireless/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
-M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M: Herton Ronaldo Krzesinski <herton@canonical.com>
M: Hin-Tak Leung <htl10@users.sourceforge.net>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
@@ -6104,7 +6111,7 @@ S: Maintained
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
-M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M: Herton Ronaldo Krzesinski <herton@canonical.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c
diff --git a/Makefile b/Makefile
index 26d7d824db51..2f7d92255b57 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 337392c3f549..acb7ae5b0a25 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
- fint = clk->parent->rate / (n + 1);
+ fint = clk->parent->rate / n;
if (fint < DPLL_FINT_BAND1_MIN) {
pr_debug("rejecting n=%d due to Fint failure, "
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 394413dc7deb..0a585dfa9874 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -334,7 +334,7 @@ static struct omap_mbox mbox_iva_info = {
.priv = &omap2_mbox_iva_priv,
};
-struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
+struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
#endif
#if defined(CONFIG_ARCH_OMAP4)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 98148b6c36e9..6c84659cf846 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
list_for_each_entry(e, &partition->muxmodes, node) {
struct omap_mux *m = &e->mux;
- (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
+ (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
m, &omap_mux_dbg_signal_fops);
}
}
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 125f56591fb5..a5a83b358ddd 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
}
- (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
+ (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
&enable_off_mode, &pm_dbg_option_fops);
- (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
+ (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
&sleep_while_idle, &pm_dbg_option_fops);
- (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
+ (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
&wakeup_timer_seconds, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_milliseconds",
- S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
+ S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
&pm_dbg_option_fops);
pm_dbg_init_done = 1;
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h
index 729a644ce852..3300ff6e3cfe 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.h
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -38,8 +38,8 @@
#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
/* PRCM_MPU clockdomain register offsets (from instance start) */
-#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000
-#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000
+#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
+#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
/*
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index c37e823266d3..95ac336fe3f7 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -900,7 +900,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
return PTR_ERR(dbg_dir);
}
- (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
+ (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
(void *)sr_info, &pm_sr_fops);
(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
&sr_info->err_weight);
@@ -939,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
strcpy(name, "volt_");
sprintf(volt_name, "%d", volt_data[i].volt_nominal);
strcat(name, volt_name);
- (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
+ (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
&(sr_info->nvalue_table[i].nvalue));
}
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 7b7c2683ae7b..0fc550e7e482 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -39,6 +39,7 @@
#include <asm/mach/time.h>
#include <plat/dmtimer.h>
#include <asm/localtimer.h>
+#include <asm/sched_clock.h>
#include "timer-gp.h"
@@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void)
/*
* clocksource
*/
+static DEFINE_CLOCK_DATA(cd);
static struct omap_dm_timer *gpt_clocksource;
static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
@@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static void notrace dmtimer_update_sched_clock(void)
+{
+ u32 cyc;
+
+ cyc = omap_dm_timer_read_counter(gpt_clocksource);
+
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+
/* Setup free-running counter for clocksource */
static void __init omap2_gp_clocksource_init(void)
{
@@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void)
omap_dm_timer_set_load_start(gpt, 1, 0);
+ init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
+
if (clocksource_register_hz(&clocksource_gpt, tick_rate))
printk(err2, clocksource_gpt.name);
}
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 459b319a9fad..49d3208793e5 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
{
- struct omap_mbox *mbox;
- int ret;
+ struct omap_mbox *_mbox, *mbox = NULL;
+ int i, ret;
if (!mboxes)
return ERR_PTR(-EINVAL);
- for (mbox = *mboxes; mbox; mbox++)
- if (!strcmp(mbox->name, name))
+ for (i = 0; (_mbox = mboxes[i]); i++) {
+ if (!strcmp(_mbox->name, name)) {
+ mbox = _mbox;
break;
+ }
+ }
if (!mbox)
return ERR_PTR(-ENOENT);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index aa92684aa674..ef14da1f4ec5 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -88,6 +88,7 @@ extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
+extern int acpi_fix_pin2_polarity;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 6c22bf353f26..725b77831993 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*/
CMOS_WRITE(0, 0xf);
- *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+ *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b3a71137983a..3e6e2d68f761 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
+int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
return 0;
}
- if (acpi_skip_timer_override &&
- intsrc->source_irq == 0 && intsrc->global_irq == 2) {
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
- return 0;
+ if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+ if (acpi_skip_timer_override) {
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ return 0;
+ }
+ if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+ intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+ printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ }
}
mp_override_legacy_irq(intsrc->source_irq,
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 51ef31a89be9..51d4e1663066 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
- apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
+ adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
global_clock_event = &adev->evt;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 76b8cd953dee..9efbdcc56425 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
static u32 __init ati_sbx00_rev(int num, int slot, int func)
{
- u32 old, d;
+ u32 d;
- d = read_pci_config(num, slot, func, 0x70);
- old = d;
- d &= ~(1<<8);
- write_pci_config(num, slot, func, 0x70, d);
d = read_pci_config(num, slot, func, 0x8);
d &= 0xff;
- write_pci_config(num, slot, func, 0x70, old);
return d;
}
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func)
{
u32 d, rev;
- if (acpi_use_timer_override)
- return;
-
rev = ati_sbx00_rev(num, slot, func);
+ if (rev >= 0x40)
+ acpi_fix_pin2_polarity = 1;
+
if (rev > 0x13)
return;
+ if (acpi_use_timer_override)
+ return;
+
/* check for IRQ0 interrupt swap */
d = read_pci_config(num, slot, func, 0x64);
if (!(d & (1<<14)))
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index fc7aae1e2bc7..715037caeb43 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
+ { /* Handle problems with rebooting on VersaLogic Menlow boards */
+ .callback = set_bios_reboot,
+ .ident = "VersaLogic Menlow based board",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 54ce246a383e..63fec1531e89 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, reg, val);
}
+ skip_emulated_instruction(&svm->vcpu);
+
return 1;
}
diff --git a/block/genhd.c b/block/genhd.c
index 6a5b772aa201..cbf1112a885c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
- res = __invalidate_device(bdev);
+ res = __invalidate_device(bdev, true);
bdput(bdev);
}
return res;
diff --git a/block/ioctl.c b/block/ioctl.c
index 9049d460fa89..1124cd297263 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EINVAL;
if (get_user(n, (int __user *) arg))
return -EFAULT;
- if (!(mode & FMODE_EXCL) &&
- blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
- return -EBUSY;
+ if (!(mode & FMODE_EXCL)) {
+ bdgrab(bdev);
+ if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+ return -EBUSY;
+ }
ret = set_blocksize(bdev, n);
if (!(mode & FMODE_EXCL))
blkdev_put(bdev, mode | FMODE_EXCL);
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index eec2eadd2431..a1224712fd0c 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -10,7 +10,7 @@ obj-y += acpi.o
acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
- dsinit.o
+ dsinit.o dsargs.o dscontrol.o dswload2.o
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
@@ -45,4 +45,4 @@ acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
- utosi.o utxferror.o
+ utosi.o utxferror.o utdecode.o
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 666271b65418..2d1b7ffa377a 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -48,7 +48,7 @@
#define NAMEOF_ARG_NTE "__A0"
/*
- * dsopcode - support for late evaluation
+ * dsargs - execution of dynamic arguments for static objects
*/
acpi_status
acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc);
@@ -62,6 +62,20 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc);
acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc);
+/*
+ * dscontrol - support for execution control opcodes
+ */
+acpi_status
+acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+acpi_status
+acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+/*
+ * dsopcode - support for late operand evaluation
+ */
acpi_status
acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
@@ -86,17 +100,6 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
acpi_status acpi_ds_initialize_region(acpi_handle obj_handle);
/*
- * dsctrl - Parser/Interpreter interface, control stack routines
- */
-acpi_status
-acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op);
-
-acpi_status
-acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op);
-
-/*
* dsexec - Parser/Interpreter interface, method execution callbacks
*/
acpi_status
@@ -136,23 +139,26 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
struct acpi_walk_state *walk_state);
/*
- * dsload - Parser/Interpreter interface, namespace load callbacks
+ * dsload - Parser/Interpreter interface, pass 1 namespace load callbacks
*/
acpi_status
+acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
+
+acpi_status
acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op);
acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state);
+/*
+ * dsload - Parser/Interpreter interface, pass 2 namespace load callbacks
+ */
acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op);
acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state);
-acpi_status
-acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
-
/*
* dsmthdat - method data (locals/args)
*/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 82a1bd283db8..d69750b83b36 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -273,6 +273,10 @@ ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
+/* Initialization sequencing */
+
+ACPI_EXTERN u8 acpi_gbl_reg_methods_executed;
+
/* Misc */
ACPI_EXTERN u32 acpi_gbl_original_mode;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index edc25867ad9d..c7f743ca395b 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -89,25 +89,6 @@ union acpi_parse_object;
#define ACPI_MAX_MUTEX 7
#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-#ifdef DEFINE_ACPI_GLOBALS
-
-/* Debug names for the mutexes above */
-
-static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
- "ACPI_MTX_Interpreter",
- "ACPI_MTX_Namespace",
- "ACPI_MTX_Tables",
- "ACPI_MTX_Events",
- "ACPI_MTX_Caches",
- "ACPI_MTX_Memory",
- "ACPI_MTX_CommandComplete",
- "ACPI_MTX_CommandReady"
-};
-
-#endif
-#endif
-
/* Lock structure for reader/writer interfaces */
struct acpi_rw_lock {
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
new file mode 100644
index 000000000000..8c7b99728aa2
--- /dev/null
+++ b/drivers/acpi/acpica/dsargs.c
@@ -0,0 +1,391 @@
+/******************************************************************************
+ *
+ * Module Name: dsargs - Support for execution of dynamic arguments for static
+ * objects (regions, fields, buffer fields, etc.)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsargs")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_execute_arguments(struct acpi_namespace_node *node,
+ struct acpi_namespace_node *scope_node,
+ u32 aml_length, u8 *aml_start);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_execute_arguments
+ *
+ * PARAMETERS: Node - Object NS node
+ * scope_node - Parent NS node
+ * aml_length - Length of executable AML
+ * aml_start - Pointer to the AML
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Late (deferred) execution of region or field arguments
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_execute_arguments(struct acpi_namespace_node *node,
+ struct acpi_namespace_node *scope_node,
+ u32 aml_length, u8 *aml_start)
+{
+ acpi_status status;
+ union acpi_parse_object *op;
+ struct acpi_walk_state *walk_state;
+
+ ACPI_FUNCTION_TRACE(ds_execute_arguments);
+
+ /* Allocate a new parser op to be the root of the parsed tree */
+
+ op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Save the Node for use in acpi_ps_parse_aml */
+
+ op->common.node = scope_node;
+
+ /* Create and initialize a new parser state */
+
+ walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
+ if (!walk_state) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
+ aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ goto cleanup;
+ }
+
+ /* Mark this parse as a deferred opcode */
+
+ walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
+ walk_state->deferred_node = node;
+
+ /* Pass1: Parse the entire declaration */
+
+ status = acpi_ps_parse_aml(walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Get and init the Op created above */
+
+ op->common.node = node;
+ acpi_ps_delete_parse_tree(op);
+
+ /* Evaluate the deferred arguments */
+
+ op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ op->common.node = scope_node;
+
+ /* Create and initialize a new parser state */
+
+ walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
+ if (!walk_state) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Execute the opcode and arguments */
+
+ status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
+ aml_length, NULL, ACPI_IMODE_EXECUTE);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ goto cleanup;
+ }
+
+ /* Mark this execution as a deferred opcode */
+
+ walk_state->deferred_node = node;
+ status = acpi_ps_parse_aml(walk_state);
+
+ cleanup:
+ acpi_ps_delete_parse_tree(op);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_buffer_field_arguments
+ *
+ * PARAMETERS: obj_desc - A valid buffer_field object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
+ * evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
+{
+ union acpi_operand_object *extra_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the AML pointer (method object) and buffer_field node */
+
+ extra_desc = acpi_ns_get_secondary_object(obj_desc);
+ node = obj_desc->buffer_field.node;
+
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_BUFFER_FIELD,
+ node, NULL));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
+ acpi_ut_get_node_name(node)));
+
+ /* Execute the AML code for the term_arg arguments */
+
+ status = acpi_ds_execute_arguments(node, node->parent,
+ extra_desc->extra.aml_length,
+ extra_desc->extra.aml_start);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_bank_field_arguments
+ *
+ * PARAMETERS: obj_desc - A valid bank_field object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get bank_field bank_value. This implements the late
+ * evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
+{
+ union acpi_operand_object *extra_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the AML pointer (method object) and bank_field node */
+
+ extra_desc = acpi_ns_get_secondary_object(obj_desc);
+ node = obj_desc->bank_field.node;
+
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
+ acpi_ut_get_node_name(node)));
+
+ /* Execute the AML code for the term_arg arguments */
+
+ status = acpi_ds_execute_arguments(node, node->parent,
+ extra_desc->extra.aml_length,
+ extra_desc->extra.aml_start);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_buffer_arguments
+ *
+ * PARAMETERS: obj_desc - A valid Buffer object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get Buffer length and initializer byte list. This implements
+ * the late evaluation of these attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the Buffer node */
+
+ node = obj_desc->buffer.node;
+ if (!node) {
+ ACPI_ERROR((AE_INFO,
+ "No pointer back to namespace node in buffer object %p",
+ obj_desc));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
+
+ /* Execute the AML code for the term_arg arguments */
+
+ status = acpi_ds_execute_arguments(node, node,
+ obj_desc->buffer.aml_length,
+ obj_desc->buffer.aml_start);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_package_arguments
+ *
+ * PARAMETERS: obj_desc - A valid Package object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get Package length and initializer byte list. This implements
+ * the late evaluation of these attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the Package node */
+
+ node = obj_desc->package.node;
+ if (!node) {
+ ACPI_ERROR((AE_INFO,
+ "No pointer back to namespace node in package %p",
+ obj_desc));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
+
+ /* Execute the AML code for the term_arg arguments */
+
+ status = acpi_ds_execute_arguments(node, node,
+ obj_desc->package.aml_length,
+ obj_desc->package.aml_start);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_region_arguments
+ *
+ * PARAMETERS: obj_desc - A valid region object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get region address and length. This implements the late
+ * evaluation of these region attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ union acpi_operand_object *extra_desc;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
+
+ if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ extra_desc = acpi_ns_get_secondary_object(obj_desc);
+ if (!extra_desc) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /* Get the Region node */
+
+ node = obj_desc->region.node;
+
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_REGION, node, NULL));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
+ acpi_ut_get_node_name(node),
+ extra_desc->extra.aml_start));
+
+ /* Execute the argument AML */
+
+ status = acpi_ds_execute_arguments(node, node->parent,
+ extra_desc->extra.aml_length,
+ extra_desc->extra.aml_start);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
new file mode 100644
index 000000000000..26c49fff58da
--- /dev/null
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -0,0 +1,410 @@
+/******************************************************************************
+ *
+ * Module Name: dscontrol - Support for execution control opcodes -
+ * if/else/while/return
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dscontrol")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_exec_begin_control_op
+ *
+ * PARAMETERS: walk_list - The list that owns the walk stack
+ * Op - The control Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handles all control ops encountered during control method
+ * execution.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
+{
+ acpi_status status = AE_OK;
+ union acpi_generic_state *control_state;
+
+ ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n",
+ op, op->common.aml_opcode, walk_state));
+
+ switch (op->common.aml_opcode) {
+ case AML_WHILE_OP:
+
+ /*
+ * If this is an additional iteration of a while loop, continue.
+ * There is no need to allocate a new control state.
+ */
+ if (walk_state->control_state) {
+ if (walk_state->control_state->control.
+ aml_predicate_start ==
+ (walk_state->parser_state.aml - 1)) {
+
+ /* Reset the state to start-of-loop */
+
+ walk_state->control_state->common.state =
+ ACPI_CONTROL_CONDITIONAL_EXECUTING;
+ break;
+ }
+ }
+
+ /*lint -fallthrough */
+
+ case AML_IF_OP:
+
+ /*
+ * IF/WHILE: Create a new control state to manage these
+ * constructs. We need to manage these as a stack, in order
+ * to handle nesting.
+ */
+ control_state = acpi_ut_create_control_state();
+ if (!control_state) {
+ status = AE_NO_MEMORY;
+ break;
+ }
+ /*
+ * Save a pointer to the predicate for multiple executions
+ * of a loop
+ */
+ control_state->control.aml_predicate_start =
+ walk_state->parser_state.aml - 1;
+ control_state->control.package_end =
+ walk_state->parser_state.pkg_end;
+ control_state->control.opcode = op->common.aml_opcode;
+
+ /* Push the control state on this walk's control stack */
+
+ acpi_ut_push_generic_state(&walk_state->control_state,
+ control_state);
+ break;
+
+ case AML_ELSE_OP:
+
+ /* Predicate is in the state object */
+ /* If predicate is true, the IF was executed, ignore ELSE part */
+
+ if (walk_state->last_predicate) {
+ status = AE_CTRL_TRUE;
+ }
+
+ break;
+
+ case AML_RETURN_OP:
+
+ break;
+
+ default:
+ break;
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_exec_end_control_op
+ *
+ * PARAMETERS: walk_list - The list that owns the walk stack
+ * Op - The control Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handles all control ops encountered during control method
+ * execution.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
+ union acpi_parse_object * op)
+{
+ acpi_status status = AE_OK;
+ union acpi_generic_state *control_state;
+
+ ACPI_FUNCTION_NAME(ds_exec_end_control_op);
+
+ switch (op->common.aml_opcode) {
+ case AML_IF_OP:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
+
+ /*
+ * Save the result of the predicate in case there is an
+ * ELSE to come
+ */
+ walk_state->last_predicate =
+ (u8)walk_state->control_state->common.value;
+
+ /*
+ * Pop the control state that was created at the start
+ * of the IF and free it
+ */
+ control_state =
+ acpi_ut_pop_generic_state(&walk_state->control_state);
+ acpi_ut_delete_generic_state(control_state);
+ break;
+
+ case AML_ELSE_OP:
+
+ break;
+
+ case AML_WHILE_OP:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
+
+ control_state = walk_state->control_state;
+ if (control_state->common.value) {
+
+ /* Predicate was true, the body of the loop was just executed */
+
+ /*
+ * This loop counter mechanism allows the interpreter to escape
+ * possibly infinite loops. This can occur in poorly written AML
+ * when the hardware does not respond within a while loop and the
+ * loop does not implement a timeout.
+ */
+ control_state->control.loop_count++;
+ if (control_state->control.loop_count >
+ ACPI_MAX_LOOP_ITERATIONS) {
+ status = AE_AML_INFINITE_LOOP;
+ break;
+ }
+
+ /*
+ * Go back and evaluate the predicate and maybe execute the loop
+ * another time
+ */
+ status = AE_CTRL_PENDING;
+ walk_state->aml_last_while =
+ control_state->control.aml_predicate_start;
+ break;
+ }
+
+ /* Predicate was false, terminate this while loop */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "[WHILE_OP] termination! Op=%p\n", op));
+
+ /* Pop this control state and free it */
+
+ control_state =
+ acpi_ut_pop_generic_state(&walk_state->control_state);
+ acpi_ut_delete_generic_state(control_state);
+ break;
+
+ case AML_RETURN_OP:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "[RETURN_OP] Op=%p Arg=%p\n", op,
+ op->common.value.arg));
+
+ /*
+ * One optional operand -- the return value
+ * It can be either an immediate operand or a result that
+ * has been bubbled up the tree
+ */
+ if (op->common.value.arg) {
+
+ /* Since we have a real Return(), delete any implicit return */
+
+ acpi_ds_clear_implicit_return(walk_state);
+
+ /* Return statement has an immediate operand */
+
+ status =
+ acpi_ds_create_operands(walk_state,
+ op->common.value.arg);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * If value being returned is a Reference (such as
+ * an arg or local), resolve it now because it may
+ * cease to exist at the end of the method.
+ */
+ status =
+ acpi_ex_resolve_to_value(&walk_state->operands[0],
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Get the return value and save as the last result
+ * value. This is the only place where walk_state->return_desc
+ * is set to anything other than zero!
+ */
+ walk_state->return_desc = walk_state->operands[0];
+ } else if (walk_state->result_count) {
+
+ /* Since we have a real Return(), delete any implicit return */
+
+ acpi_ds_clear_implicit_return(walk_state);
+
+ /*
+ * The return value has come from a previous calculation.
+ *
+ * If value being returned is a Reference (such as
+ * an arg or local), resolve it now because it may
+ * cease to exist at the end of the method.
+ *
+ * Allow references created by the Index operator to return
+ * unchanged.
+ */
+ if ((ACPI_GET_DESCRIPTOR_TYPE
+ (walk_state->results->results.obj_desc[0]) ==
+ ACPI_DESC_TYPE_OPERAND)
+ && ((walk_state->results->results.obj_desc[0])->
+ common.type == ACPI_TYPE_LOCAL_REFERENCE)
+ && ((walk_state->results->results.obj_desc[0])->
+ reference.class != ACPI_REFCLASS_INDEX)) {
+ status =
+ acpi_ex_resolve_to_value(&walk_state->
+ results->results.
+ obj_desc[0],
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ walk_state->return_desc =
+ walk_state->results->results.obj_desc[0];
+ } else {
+ /* No return operand */
+
+ if (walk_state->num_operands) {
+ acpi_ut_remove_reference(walk_state->
+ operands[0]);
+ }
+
+ walk_state->operands[0] = NULL;
+ walk_state->num_operands = 0;
+ walk_state->return_desc = NULL;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Completed RETURN_OP State=%p, RetVal=%p\n",
+ walk_state, walk_state->return_desc));
+
+ /* End the control method execution right now */
+
+ status = AE_CTRL_TERMINATE;
+ break;
+
+ case AML_NOOP_OP:
+
+ /* Just do nothing! */
+ break;
+
+ case AML_BREAK_POINT_OP:
+
+ /*
+ * Set the single-step flag. This will cause the debugger (if present)
+ * to break to the console within the AML debugger at the start of the
+ * next AML instruction.
+ */
+ ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
+ ACPI_DEBUGGER_EXEC(acpi_os_printf
+ ("**break** Executed AML BreakPoint opcode\n"));
+
+ /* Call to the OSL in case OS wants a piece of the action */
+
+ status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
+ "Executed AML Breakpoint opcode");
+ break;
+
+ case AML_BREAK_OP:
+ case AML_CONTINUE_OP: /* ACPI 2.0 */
+
+ /* Pop and delete control states until we find a while */
+
+ while (walk_state->control_state &&
+ (walk_state->control_state->control.opcode !=
+ AML_WHILE_OP)) {
+ control_state =
+ acpi_ut_pop_generic_state(&walk_state->
+ control_state);
+ acpi_ut_delete_generic_state(control_state);
+ }
+
+ /* No while found? */
+
+ if (!walk_state->control_state) {
+ return (AE_AML_NO_WHILE);
+ }
+
+ /* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
+
+ walk_state->aml_last_while =
+ walk_state->control_state->control.package_end;
+
+ /* Return status depending on opcode */
+
+ if (op->common.aml_opcode == AML_BREAK_OP) {
+ status = AE_CTRL_BREAK;
+ } else {
+ status = AE_CTRL_CONTINUE;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
+ op->common.aml_opcode, op));
+
+ status = AE_AML_BAD_OPCODE;
+ break;
+ }
+
+ return (status);
+}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index bbecf293aeeb..c627a288e027 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -1,7 +1,6 @@
/******************************************************************************
*
- * Module Name: dsopcode - Dispatcher Op Region support and handling of
- * "control" opcodes
+ * Module Name: dsopcode - Dispatcher suport for regions and fields
*
*****************************************************************************/
@@ -57,11 +56,6 @@ ACPI_MODULE_NAME("dsopcode")
/* Local prototypes */
static acpi_status
-acpi_ds_execute_arguments(struct acpi_namespace_node *node,
- struct acpi_namespace_node *scope_node,
- u32 aml_length, u8 * aml_start);
-
-static acpi_status
acpi_ds_init_buffer_field(u16 aml_opcode,
union acpi_operand_object *obj_desc,
union acpi_operand_object *buffer_desc,
@@ -71,361 +65,6 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/*******************************************************************************
*
- * FUNCTION: acpi_ds_execute_arguments
- *
- * PARAMETERS: Node - Object NS node
- * scope_node - Parent NS node
- * aml_length - Length of executable AML
- * aml_start - Pointer to the AML
- *
- * RETURN: Status.
- *
- * DESCRIPTION: Late (deferred) execution of region or field arguments
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_execute_arguments(struct acpi_namespace_node *node,
- struct acpi_namespace_node *scope_node,
- u32 aml_length, u8 * aml_start)
-{
- acpi_status status;
- union acpi_parse_object *op;
- struct acpi_walk_state *walk_state;
-
- ACPI_FUNCTION_TRACE(ds_execute_arguments);
-
- /*
- * Allocate a new parser op to be the root of the parsed tree
- */
- op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
- if (!op) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /* Save the Node for use in acpi_ps_parse_aml */
-
- op->common.node = scope_node;
-
- /* Create and initialize a new parser state */
-
- walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
- if (!walk_state) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
- aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
- if (ACPI_FAILURE(status)) {
- acpi_ds_delete_walk_state(walk_state);
- goto cleanup;
- }
-
- /* Mark this parse as a deferred opcode */
-
- walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
- walk_state->deferred_node = node;
-
- /* Pass1: Parse the entire declaration */
-
- status = acpi_ps_parse_aml(walk_state);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- /* Get and init the Op created above */
-
- op->common.node = node;
- acpi_ps_delete_parse_tree(op);
-
- /* Evaluate the deferred arguments */
-
- op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
- if (!op) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- op->common.node = scope_node;
-
- /* Create and initialize a new parser state */
-
- walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
- if (!walk_state) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- /* Execute the opcode and arguments */
-
- status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
- aml_length, NULL, ACPI_IMODE_EXECUTE);
- if (ACPI_FAILURE(status)) {
- acpi_ds_delete_walk_state(walk_state);
- goto cleanup;
- }
-
- /* Mark this execution as a deferred opcode */
-
- walk_state->deferred_node = node;
- status = acpi_ps_parse_aml(walk_state);
-
- cleanup:
- acpi_ps_delete_parse_tree(op);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_get_buffer_field_arguments
- *
- * PARAMETERS: obj_desc - A valid buffer_field object
- *
- * RETURN: Status.
- *
- * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
- * evaluation of these field attributes.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
-{
- union acpi_operand_object *extra_desc;
- struct acpi_namespace_node *node;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
-
- if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Get the AML pointer (method object) and buffer_field node */
-
- extra_desc = acpi_ns_get_secondary_object(obj_desc);
- node = obj_desc->buffer_field.node;
-
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_BUFFER_FIELD, node, NULL));
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
- acpi_ut_get_node_name(node)));
-
- /* Execute the AML code for the term_arg arguments */
-
- status = acpi_ds_execute_arguments(node, node->parent,
- extra_desc->extra.aml_length,
- extra_desc->extra.aml_start);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_get_bank_field_arguments
- *
- * PARAMETERS: obj_desc - A valid bank_field object
- *
- * RETURN: Status.
- *
- * DESCRIPTION: Get bank_field bank_value. This implements the late
- * evaluation of these field attributes.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
-{
- union acpi_operand_object *extra_desc;
- struct acpi_namespace_node *node;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
-
- if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Get the AML pointer (method object) and bank_field node */
-
- extra_desc = acpi_ns_get_secondary_object(obj_desc);
- node = obj_desc->bank_field.node;
-
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
- acpi_ut_get_node_name(node)));
-
- /* Execute the AML code for the term_arg arguments */
-
- status = acpi_ds_execute_arguments(node, node->parent,
- extra_desc->extra.aml_length,
- extra_desc->extra.aml_start);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_get_buffer_arguments
- *
- * PARAMETERS: obj_desc - A valid Buffer object
- *
- * RETURN: Status.
- *
- * DESCRIPTION: Get Buffer length and initializer byte list. This implements
- * the late evaluation of these attributes.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
-{
- struct acpi_namespace_node *node;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
-
- if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Get the Buffer node */
-
- node = obj_desc->buffer.node;
- if (!node) {
- ACPI_ERROR((AE_INFO,
- "No pointer back to namespace node in buffer object %p",
- obj_desc));
- return_ACPI_STATUS(AE_AML_INTERNAL);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
-
- /* Execute the AML code for the term_arg arguments */
-
- status = acpi_ds_execute_arguments(node, node,
- obj_desc->buffer.aml_length,
- obj_desc->buffer.aml_start);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_get_package_arguments
- *
- * PARAMETERS: obj_desc - A valid Package object
- *
- * RETURN: Status.
- *
- * DESCRIPTION: Get Package length and initializer byte list. This implements
- * the late evaluation of these attributes.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
-{
- struct acpi_namespace_node *node;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
-
- if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Get the Package node */
-
- node = obj_desc->package.node;
- if (!node) {
- ACPI_ERROR((AE_INFO,
- "No pointer back to namespace node in package %p",
- obj_desc));
- return_ACPI_STATUS(AE_AML_INTERNAL);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
-
- /* Execute the AML code for the term_arg arguments */
-
- status = acpi_ds_execute_arguments(node, node,
- obj_desc->package.aml_length,
- obj_desc->package.aml_start);
- return_ACPI_STATUS(status);
-}
-
-/*****************************************************************************
- *
- * FUNCTION: acpi_ds_get_region_arguments
- *
- * PARAMETERS: obj_desc - A valid region object
- *
- * RETURN: Status.
- *
- * DESCRIPTION: Get region address and length. This implements the late
- * evaluation of these region attributes.
- *
- ****************************************************************************/
-
-acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
-{
- struct acpi_namespace_node *node;
- acpi_status status;
- union acpi_operand_object *extra_desc;
-
- ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
-
- if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
- return_ACPI_STATUS(AE_OK);
- }
-
- extra_desc = acpi_ns_get_secondary_object(obj_desc);
- if (!extra_desc) {
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
-
- /* Get the Region node */
-
- node = obj_desc->region.node;
-
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_REGION, node, NULL));
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
- acpi_ut_get_node_name(node),
- extra_desc->extra.aml_start));
-
- /* Execute the argument AML */
-
- status = acpi_ds_execute_arguments(node, node->parent,
- extra_desc->extra.aml_length,
- extra_desc->extra.aml_start);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Validate the region address/length via the host OS */
-
- status = acpi_os_validate_address(obj_desc->region.space_id,
- obj_desc->region.address,
- (acpi_size) obj_desc->region.length,
- acpi_ut_get_node_name(node));
-
- if (ACPI_FAILURE(status)) {
- /*
- * Invalid address/length. We will emit an error message and mark
- * the region as invalid, so that it will cause an additional error if
- * it is ever used. Then return AE_OK.
- */
- ACPI_EXCEPTION((AE_INFO, status,
- "During address validation of OpRegion [%4.4s]",
- node->name.ascii));
- obj_desc->common.flags |= AOPOBJ_INVALID;
- status = AE_OK;
- }
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ds_initialize_region
*
* PARAMETERS: obj_handle - Region namespace node
@@ -826,8 +465,9 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
*
* RETURN: Status
*
- * DESCRIPTION: Get region address and length
- * Called from acpi_ds_exec_end_op during data_table_region parse tree walk
+ * DESCRIPTION: Get region address and length.
+ * Called from acpi_ds_exec_end_op during data_table_region parse
+ * tree walk.
*
******************************************************************************/
@@ -1114,360 +754,3 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
acpi_ut_remove_reference(operand_desc);
return_ACPI_STATUS(status);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_exec_begin_control_op
- *
- * PARAMETERS: walk_list - The list that owns the walk stack
- * Op - The control Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Handles all control ops encountered during control method
- * execution.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op)
-{
- acpi_status status = AE_OK;
- union acpi_generic_state *control_state;
-
- ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
- op->common.aml_opcode, walk_state));
-
- switch (op->common.aml_opcode) {
- case AML_WHILE_OP:
-
- /*
- * If this is an additional iteration of a while loop, continue.
- * There is no need to allocate a new control state.
- */
- if (walk_state->control_state) {
- if (walk_state->control_state->control.aml_predicate_start
- == (walk_state->parser_state.aml - 1)) {
-
- /* Reset the state to start-of-loop */
-
- walk_state->control_state->common.state =
- ACPI_CONTROL_CONDITIONAL_EXECUTING;
- break;
- }
- }
-
- /*lint -fallthrough */
-
- case AML_IF_OP:
-
- /*
- * IF/WHILE: Create a new control state to manage these
- * constructs. We need to manage these as a stack, in order
- * to handle nesting.
- */
- control_state = acpi_ut_create_control_state();
- if (!control_state) {
- status = AE_NO_MEMORY;
- break;
- }
- /*
- * Save a pointer to the predicate for multiple executions
- * of a loop
- */
- control_state->control.aml_predicate_start =
- walk_state->parser_state.aml - 1;
- control_state->control.package_end =
- walk_state->parser_state.pkg_end;
- control_state->control.opcode = op->common.aml_opcode;
-
- /* Push the control state on this walk's control stack */
-
- acpi_ut_push_generic_state(&walk_state->control_state,
- control_state);
- break;
-
- case AML_ELSE_OP:
-
- /* Predicate is in the state object */
- /* If predicate is true, the IF was executed, ignore ELSE part */
-
- if (walk_state->last_predicate) {
- status = AE_CTRL_TRUE;
- }
-
- break;
-
- case AML_RETURN_OP:
-
- break;
-
- default:
- break;
- }
-
- return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_exec_end_control_op
- *
- * PARAMETERS: walk_list - The list that owns the walk stack
- * Op - The control Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Handles all control ops encountered during control method
- * execution.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
- union acpi_parse_object * op)
-{
- acpi_status status = AE_OK;
- union acpi_generic_state *control_state;
-
- ACPI_FUNCTION_NAME(ds_exec_end_control_op);
-
- switch (op->common.aml_opcode) {
- case AML_IF_OP:
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
-
- /*
- * Save the result of the predicate in case there is an
- * ELSE to come
- */
- walk_state->last_predicate =
- (u8) walk_state->control_state->common.value;
-
- /*
- * Pop the control state that was created at the start
- * of the IF and free it
- */
- control_state =
- acpi_ut_pop_generic_state(&walk_state->control_state);
- acpi_ut_delete_generic_state(control_state);
- break;
-
- case AML_ELSE_OP:
-
- break;
-
- case AML_WHILE_OP:
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
-
- control_state = walk_state->control_state;
- if (control_state->common.value) {
-
- /* Predicate was true, the body of the loop was just executed */
-
- /*
- * This loop counter mechanism allows the interpreter to escape
- * possibly infinite loops. This can occur in poorly written AML
- * when the hardware does not respond within a while loop and the
- * loop does not implement a timeout.
- */
- control_state->control.loop_count++;
- if (control_state->control.loop_count >
- ACPI_MAX_LOOP_ITERATIONS) {
- status = AE_AML_INFINITE_LOOP;
- break;
- }
-
- /*
- * Go back and evaluate the predicate and maybe execute the loop
- * another time
- */
- status = AE_CTRL_PENDING;
- walk_state->aml_last_while =
- control_state->control.aml_predicate_start;
- break;
- }
-
- /* Predicate was false, terminate this while loop */
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "[WHILE_OP] termination! Op=%p\n", op));
-
- /* Pop this control state and free it */
-
- control_state =
- acpi_ut_pop_generic_state(&walk_state->control_state);
- acpi_ut_delete_generic_state(control_state);
- break;
-
- case AML_RETURN_OP:
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "[RETURN_OP] Op=%p Arg=%p\n", op,
- op->common.value.arg));
-
- /*
- * One optional operand -- the return value
- * It can be either an immediate operand or a result that
- * has been bubbled up the tree
- */
- if (op->common.value.arg) {
-
- /* Since we have a real Return(), delete any implicit return */
-
- acpi_ds_clear_implicit_return(walk_state);
-
- /* Return statement has an immediate operand */
-
- status =
- acpi_ds_create_operands(walk_state,
- op->common.value.arg);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * If value being returned is a Reference (such as
- * an arg or local), resolve it now because it may
- * cease to exist at the end of the method.
- */
- status =
- acpi_ex_resolve_to_value(&walk_state->operands[0],
- walk_state);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Get the return value and save as the last result
- * value. This is the only place where walk_state->return_desc
- * is set to anything other than zero!
- */
- walk_state->return_desc = walk_state->operands[0];
- } else if (walk_state->result_count) {
-
- /* Since we have a real Return(), delete any implicit return */
-
- acpi_ds_clear_implicit_return(walk_state);
-
- /*
- * The return value has come from a previous calculation.
- *
- * If value being returned is a Reference (such as
- * an arg or local), resolve it now because it may
- * cease to exist at the end of the method.
- *
- * Allow references created by the Index operator to return unchanged.
- */
- if ((ACPI_GET_DESCRIPTOR_TYPE
- (walk_state->results->results.obj_desc[0]) ==
- ACPI_DESC_TYPE_OPERAND)
- && ((walk_state->results->results.obj_desc[0])->
- common.type == ACPI_TYPE_LOCAL_REFERENCE)
- && ((walk_state->results->results.obj_desc[0])->
- reference.class != ACPI_REFCLASS_INDEX)) {
- status =
- acpi_ex_resolve_to_value(&walk_state->
- results->results.
- obj_desc[0],
- walk_state);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
-
- walk_state->return_desc =
- walk_state->results->results.obj_desc[0];
- } else {
- /* No return operand */
-
- if (walk_state->num_operands) {
- acpi_ut_remove_reference(walk_state->
- operands[0]);
- }
-
- walk_state->operands[0] = NULL;
- walk_state->num_operands = 0;
- walk_state->return_desc = NULL;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Completed RETURN_OP State=%p, RetVal=%p\n",
- walk_state, walk_state->return_desc));
-
- /* End the control method execution right now */
-
- status = AE_CTRL_TERMINATE;
- break;
-
- case AML_NOOP_OP:
-
- /* Just do nothing! */
- break;
-
- case AML_BREAK_POINT_OP:
-
- /*
- * Set the single-step flag. This will cause the debugger (if present)
- * to break to the console within the AML debugger at the start of the
- * next AML instruction.
- */
- ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
- ACPI_DEBUGGER_EXEC(acpi_os_printf
- ("**break** Executed AML BreakPoint opcode\n"));
-
- /* Call to the OSL in case OS wants a piece of the action */
-
- status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
- "Executed AML Breakpoint opcode");
- break;
-
- case AML_BREAK_OP:
- case AML_CONTINUE_OP: /* ACPI 2.0 */
-
- /* Pop and delete control states until we find a while */
-
- while (walk_state->control_state &&
- (walk_state->control_state->control.opcode !=
- AML_WHILE_OP)) {
- control_state =
- acpi_ut_pop_generic_state(&walk_state->
- control_state);
- acpi_ut_delete_generic_state(control_state);
- }
-
- /* No while found? */
-
- if (!walk_state->control_state) {
- return (AE_AML_NO_WHILE);
- }
-
- /* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
-
- walk_state->aml_last_while =
- walk_state->control_state->control.package_end;
-
- /* Return status depending on opcode */
-
- if (op->common.aml_opcode == AML_BREAK_OP) {
- status = AE_CTRL_BREAK;
- } else {
- status = AE_CTRL_CONTINUE;
- }
- break;
-
- default:
-
- ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
- op->common.aml_opcode, op));
-
- status = AE_AML_BAD_OPCODE;
- break;
- }
-
- return (status);
-}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 52566ff5e903..23a3b1ab20c1 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Module Name: dswload - Dispatcher namespace load callbacks
+ * Module Name: dswload - Dispatcher first pass namespace load callbacks
*
*****************************************************************************/
@@ -48,7 +48,6 @@
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
-#include "acevents.h"
#ifdef ACPI_ASL_COMPILER
#include <acpi/acdisasm.h>
@@ -537,670 +536,3 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
return_ACPI_STATUS(status);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_load2_begin_op
- *
- * PARAMETERS: walk_state - Current state of the parse tree walk
- * out_op - Wher to return op if a new one is created
- *
- * RETURN: Status
- *
- * DESCRIPTION: Descending callback used during the loading of ACPI tables.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object **out_op)
-{
- union acpi_parse_object *op;
- struct acpi_namespace_node *node;
- acpi_status status;
- acpi_object_type object_type;
- char *buffer_ptr;
- u32 flags;
-
- ACPI_FUNCTION_TRACE(ds_load2_begin_op);
-
- op = walk_state->op;
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
- walk_state));
-
- if (op) {
- if ((walk_state->control_state) &&
- (walk_state->control_state->common.state ==
- ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
-
- /* We are executing a while loop outside of a method */
-
- status = acpi_ds_exec_begin_op(walk_state, out_op);
- return_ACPI_STATUS(status);
- }
-
- /* We only care about Namespace opcodes here */
-
- if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
- (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
- (!(walk_state->op_info->flags & AML_NAMED))) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Get the name we are going to enter or lookup in the namespace */
-
- if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
-
- /* For Namepath op, get the path string */
-
- buffer_ptr = op->common.value.string;
- if (!buffer_ptr) {
-
- /* No name, just exit */
-
- return_ACPI_STATUS(AE_OK);
- }
- } else {
- /* Get name from the op */
-
- buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
- }
- } else {
- /* Get the namestring from the raw AML */
-
- buffer_ptr =
- acpi_ps_get_next_namestring(&walk_state->parser_state);
- }
-
- /* Map the opcode into an internal object type */
-
- object_type = walk_state->op_info->object_type;
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "State=%p Op=%p Type=%X\n", walk_state, op,
- object_type));
-
- switch (walk_state->opcode) {
- case AML_FIELD_OP:
- case AML_BANK_FIELD_OP:
- case AML_INDEX_FIELD_OP:
-
- node = NULL;
- status = AE_OK;
- break;
-
- case AML_INT_NAMEPATH_OP:
- /*
- * The name_path is an object reference to an existing object.
- * Don't enter the name into the namespace, but look it up
- * for use later.
- */
- status =
- acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
- object_type, ACPI_IMODE_EXECUTE,
- ACPI_NS_SEARCH_PARENT, walk_state, &(node));
- break;
-
- case AML_SCOPE_OP:
-
- /* Special case for Scope(\) -> refers to the Root node */
-
- if (op && (op->named.node == acpi_gbl_root_node)) {
- node = op->named.node;
-
- status =
- acpi_ds_scope_stack_push(node, object_type,
- walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- } else {
- /*
- * The Path is an object reference to an existing object.
- * Don't enter the name into the namespace, but look it up
- * for use later.
- */
- status =
- acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
- object_type, ACPI_IMODE_EXECUTE,
- ACPI_NS_SEARCH_PARENT, walk_state,
- &(node));
- if (ACPI_FAILURE(status)) {
-#ifdef ACPI_ASL_COMPILER
- if (status == AE_NOT_FOUND) {
- status = AE_OK;
- } else {
- ACPI_ERROR_NAMESPACE(buffer_ptr,
- status);
- }
-#else
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
-#endif
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * We must check to make sure that the target is
- * one of the opcodes that actually opens a scope
- */
- switch (node->type) {
- case ACPI_TYPE_ANY:
- case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_THERMAL:
-
- /* These are acceptable types */
- break;
-
- case ACPI_TYPE_INTEGER:
- case ACPI_TYPE_STRING:
- case ACPI_TYPE_BUFFER:
-
- /*
- * These types we will allow, but we will change the type.
- * This enables some existing code of the form:
- *
- * Name (DEB, 0)
- * Scope (DEB) { ... }
- */
- ACPI_WARNING((AE_INFO,
- "Type override - [%4.4s] had invalid type (%s) "
- "for Scope operator, changed to type ANY\n",
- acpi_ut_get_node_name(node),
- acpi_ut_get_type_name(node->type)));
-
- node->type = ACPI_TYPE_ANY;
- walk_state->scope_info->common.value = ACPI_TYPE_ANY;
- break;
-
- default:
-
- /* All other types are an error */
-
- ACPI_ERROR((AE_INFO,
- "Invalid type (%s) for target of "
- "Scope operator [%4.4s] (Cannot override)",
- acpi_ut_get_type_name(node->type),
- acpi_ut_get_node_name(node)));
-
- return (AE_AML_OPERAND_TYPE);
- }
- break;
-
- default:
-
- /* All other opcodes */
-
- if (op && op->common.node) {
-
- /* This op/node was previously entered into the namespace */
-
- node = op->common.node;
-
- if (acpi_ns_opens_scope(object_type)) {
- status =
- acpi_ds_scope_stack_push(node, object_type,
- walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- return_ACPI_STATUS(AE_OK);
- }
-
- /*
- * Enter the named type into the internal namespace. We enter the name
- * as we go downward in the parse tree. Any necessary subobjects that
- * involve arguments to the opcode must be created as we go back up the
- * parse tree later.
- *
- * Note: Name may already exist if we are executing a deferred opcode.
- */
- if (walk_state->deferred_node) {
-
- /* This name is already in the namespace, get the node */
-
- node = walk_state->deferred_node;
- status = AE_OK;
- break;
- }
-
- flags = ACPI_NS_NO_UPSEARCH;
- if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
-
- /* Execution mode, node cannot already exist, node is temporary */
-
- flags |= ACPI_NS_ERROR_IF_FOUND;
-
- if (!
- (walk_state->
- parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
- flags |= ACPI_NS_TEMPORARY;
- }
- }
-
- /* Add new entry or lookup existing entry */
-
- status =
- acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
- object_type, ACPI_IMODE_LOAD_PASS2, flags,
- walk_state, &node);
-
- if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "***New Node [%4.4s] %p is temporary\n",
- acpi_ut_get_node_name(node), node));
- }
- break;
- }
-
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
- return_ACPI_STATUS(status);
- }
-
- if (!op) {
-
- /* Create a new op */
-
- op = acpi_ps_alloc_op(walk_state->opcode);
- if (!op) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /* Initialize the new op */
-
- if (node) {
- op->named.name = node->name.integer;
- }
- *out_op = op;
- }
-
- /*
- * Put the Node in the "op" object that the parser uses, so we
- * can get it again quickly when this scope is closed
- */
- op->common.node = node;
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_load2_end_op
- *
- * PARAMETERS: walk_state - Current state of the parse tree walk
- *
- * RETURN: Status
- *
- * DESCRIPTION: Ascending callback used during the loading of the namespace,
- * both control methods and everything else.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
-{
- union acpi_parse_object *op;
- acpi_status status = AE_OK;
- acpi_object_type object_type;
- struct acpi_namespace_node *node;
- union acpi_parse_object *arg;
- struct acpi_namespace_node *new_node;
-#ifndef ACPI_NO_METHOD_EXECUTION
- u32 i;
- u8 region_space;
-#endif
-
- ACPI_FUNCTION_TRACE(ds_load2_end_op);
-
- op = walk_state->op;
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
- walk_state->op_info->name, op, walk_state));
-
- /* Check if opcode had an associated namespace object */
-
- if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
- return_ACPI_STATUS(AE_OK);
- }
-
- if (op->common.aml_opcode == AML_SCOPE_OP) {
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Ending scope Op=%p State=%p\n", op,
- walk_state));
- }
-
- object_type = walk_state->op_info->object_type;
-
- /*
- * Get the Node/name from the earlier lookup
- * (It was saved in the *op structure)
- */
- node = op->common.node;
-
- /*
- * Put the Node on the object stack (Contains the ACPI Name of
- * this object)
- */
- walk_state->operands[0] = (void *)node;
- walk_state->num_operands = 1;
-
- /* Pop the scope stack */
-
- if (acpi_ns_opens_scope(object_type) &&
- (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "(%s) Popping scope for Op %p\n",
- acpi_ut_get_type_name(object_type), op));
-
- status = acpi_ds_scope_stack_pop(walk_state);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
- }
-
- /*
- * Named operations are as follows:
- *
- * AML_ALIAS
- * AML_BANKFIELD
- * AML_CREATEBITFIELD
- * AML_CREATEBYTEFIELD
- * AML_CREATEDWORDFIELD
- * AML_CREATEFIELD
- * AML_CREATEQWORDFIELD
- * AML_CREATEWORDFIELD
- * AML_DATA_REGION
- * AML_DEVICE
- * AML_EVENT
- * AML_FIELD
- * AML_INDEXFIELD
- * AML_METHOD
- * AML_METHODCALL
- * AML_MUTEX
- * AML_NAME
- * AML_NAMEDFIELD
- * AML_OPREGION
- * AML_POWERRES
- * AML_PROCESSOR
- * AML_SCOPE
- * AML_THERMALZONE
- */
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
- acpi_ps_get_opcode_name(op->common.aml_opcode),
- walk_state, op, node));
-
- /* Decode the opcode */
-
- arg = op->common.value.arg;
-
- switch (walk_state->op_info->type) {
-#ifndef ACPI_NO_METHOD_EXECUTION
-
- case AML_TYPE_CREATE_FIELD:
- /*
- * Create the field object, but the field buffer and index must
- * be evaluated later during the execution phase
- */
- status = acpi_ds_create_buffer_field(op, walk_state);
- break;
-
- case AML_TYPE_NAMED_FIELD:
- /*
- * If we are executing a method, initialize the field
- */
- if (walk_state->method_node) {
- status = acpi_ds_init_field_objects(op, walk_state);
- }
-
- switch (op->common.aml_opcode) {
- case AML_INDEX_FIELD_OP:
-
- status =
- acpi_ds_create_index_field(op,
- (acpi_handle) arg->
- common.node, walk_state);
- break;
-
- case AML_BANK_FIELD_OP:
-
- status =
- acpi_ds_create_bank_field(op, arg->common.node,
- walk_state);
- break;
-
- case AML_FIELD_OP:
-
- status =
- acpi_ds_create_field(op, arg->common.node,
- walk_state);
- break;
-
- default:
- /* All NAMED_FIELD opcodes must be handled above */
- break;
- }
- break;
-
- case AML_TYPE_NAMED_SIMPLE:
-
- status = acpi_ds_create_operands(walk_state, arg);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- switch (op->common.aml_opcode) {
- case AML_PROCESSOR_OP:
-
- status = acpi_ex_create_processor(walk_state);
- break;
-
- case AML_POWER_RES_OP:
-
- status = acpi_ex_create_power_resource(walk_state);
- break;
-
- case AML_MUTEX_OP:
-
- status = acpi_ex_create_mutex(walk_state);
- break;
-
- case AML_EVENT_OP:
-
- status = acpi_ex_create_event(walk_state);
- break;
-
- case AML_ALIAS_OP:
-
- status = acpi_ex_create_alias(walk_state);
- break;
-
- default:
- /* Unknown opcode */
-
- status = AE_OK;
- goto cleanup;
- }
-
- /* Delete operands */
-
- for (i = 1; i < walk_state->num_operands; i++) {
- acpi_ut_remove_reference(walk_state->operands[i]);
- walk_state->operands[i] = NULL;
- }
-
- break;
-#endif /* ACPI_NO_METHOD_EXECUTION */
-
- case AML_TYPE_NAMED_COMPLEX:
-
- switch (op->common.aml_opcode) {
-#ifndef ACPI_NO_METHOD_EXECUTION
- case AML_REGION_OP:
- case AML_DATA_REGION_OP:
-
- if (op->common.aml_opcode == AML_REGION_OP) {
- region_space = (acpi_adr_space_type)
- ((op->common.value.arg)->common.value.
- integer);
- } else {
- region_space = REGION_DATA_TABLE;
- }
-
- /*
- * The op_region is not fully parsed at this time. The only valid
- * argument is the space_id. (We must save the address of the
- * AML of the address and length operands)
- *
- * If we have a valid region, initialize it. The namespace is
- * unlocked at this point.
- *
- * Need to unlock interpreter if it is locked (if we are running
- * a control method), in order to allow _REG methods to be run
- * during acpi_ev_initialize_region.
- */
- if (walk_state->method_node) {
- /*
- * Executing a method: initialize the region and unlock
- * the interpreter
- */
- status =
- acpi_ex_create_region(op->named.data,
- op->named.length,
- region_space,
- walk_state);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- acpi_ex_exit_interpreter();
- }
-
- status =
- acpi_ev_initialize_region
- (acpi_ns_get_attached_object(node), FALSE);
- if (walk_state->method_node) {
- acpi_ex_enter_interpreter();
- }
-
- if (ACPI_FAILURE(status)) {
- /*
- * If AE_NOT_EXIST is returned, it is not fatal
- * because many regions get created before a handler
- * is installed for said region.
- */
- if (AE_NOT_EXIST == status) {
- status = AE_OK;
- }
- }
- break;
-
- case AML_NAME_OP:
-
- status = acpi_ds_create_node(walk_state, node, op);
- break;
-
- case AML_METHOD_OP:
- /*
- * method_op pkg_length name_string method_flags term_list
- *
- * Note: We must create the method node/object pair as soon as we
- * see the method declaration. This allows later pass1 parsing
- * of invocations of the method (need to know the number of
- * arguments.)
- */
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
- walk_state, op, op->named.node));
-
- if (!acpi_ns_get_attached_object(op->named.node)) {
- walk_state->operands[0] =
- ACPI_CAST_PTR(void, op->named.node);
- walk_state->num_operands = 1;
-
- status =
- acpi_ds_create_operands(walk_state,
- op->common.value.
- arg);
- if (ACPI_SUCCESS(status)) {
- status =
- acpi_ex_create_method(op->named.
- data,
- op->named.
- length,
- walk_state);
- }
- walk_state->operands[0] = NULL;
- walk_state->num_operands = 0;
-
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
- break;
-
-#endif /* ACPI_NO_METHOD_EXECUTION */
-
- default:
- /* All NAMED_COMPLEX opcodes must be handled above */
- break;
- }
- break;
-
- case AML_CLASS_INTERNAL:
-
- /* case AML_INT_NAMEPATH_OP: */
- break;
-
- case AML_CLASS_METHOD_CALL:
-
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
- walk_state, op, node));
-
- /*
- * Lookup the method name and save the Node
- */
- status =
- acpi_ns_lookup(walk_state->scope_info,
- arg->common.value.string, ACPI_TYPE_ANY,
- ACPI_IMODE_LOAD_PASS2,
- ACPI_NS_SEARCH_PARENT |
- ACPI_NS_DONT_OPEN_SCOPE, walk_state,
- &(new_node));
- if (ACPI_SUCCESS(status)) {
- /*
- * Make sure that what we found is indeed a method
- * We didn't search for a method on purpose, to see if the name
- * would resolve
- */
- if (new_node->type != ACPI_TYPE_METHOD) {
- status = AE_AML_OPERAND_TYPE;
- }
-
- /* We could put the returned object (Node) on the object stack for
- * later, but for now, we will put it in the "op" object that the
- * parser uses, so we can get it again at the end of this scope
- */
- op->common.node = new_node;
- } else {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
- }
- break;
-
- default:
- break;
- }
-
- cleanup:
-
- /* Remove the Node pushed at the very beginning */
-
- walk_state->operands[0] = NULL;
- walk_state->num_operands = 0;
- return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
new file mode 100644
index 000000000000..4be4e921dfe1
--- /dev/null
+++ b/drivers/acpi/acpica/dswload2.c
@@ -0,0 +1,720 @@
+/******************************************************************************
+ *
+ * Module Name: dswload2 - Dispatcher second pass namespace load callbacks
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "acevents.h"
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswload2")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_load2_begin_op
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ * out_op - Wher to return op if a new one is created
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Descending callback used during the loading of ACPI tables.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op)
+{
+ union acpi_parse_object *op;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ acpi_object_type object_type;
+ char *buffer_ptr;
+ u32 flags;
+
+ ACPI_FUNCTION_TRACE(ds_load2_begin_op);
+
+ op = walk_state->op;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+ walk_state));
+
+ if (op) {
+ if ((walk_state->control_state) &&
+ (walk_state->control_state->common.state ==
+ ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
+
+ /* We are executing a while loop outside of a method */
+
+ status = acpi_ds_exec_begin_op(walk_state, out_op);
+ return_ACPI_STATUS(status);
+ }
+
+ /* We only care about Namespace opcodes here */
+
+ if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
+ (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
+ (!(walk_state->op_info->flags & AML_NAMED))) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the name we are going to enter or lookup in the namespace */
+
+ if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+
+ /* For Namepath op, get the path string */
+
+ buffer_ptr = op->common.value.string;
+ if (!buffer_ptr) {
+
+ /* No name, just exit */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+ } else {
+ /* Get name from the op */
+
+ buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
+ }
+ } else {
+ /* Get the namestring from the raw AML */
+
+ buffer_ptr =
+ acpi_ps_get_next_namestring(&walk_state->parser_state);
+ }
+
+ /* Map the opcode into an internal object type */
+
+ object_type = walk_state->op_info->object_type;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "State=%p Op=%p Type=%X\n", walk_state, op,
+ object_type));
+
+ switch (walk_state->opcode) {
+ case AML_FIELD_OP:
+ case AML_BANK_FIELD_OP:
+ case AML_INDEX_FIELD_OP:
+
+ node = NULL;
+ status = AE_OK;
+ break;
+
+ case AML_INT_NAMEPATH_OP:
+ /*
+ * The name_path is an object reference to an existing object.
+ * Don't enter the name into the namespace, but look it up
+ * for use later.
+ */
+ status =
+ acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+ object_type, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT, walk_state, &(node));
+ break;
+
+ case AML_SCOPE_OP:
+
+ /* Special case for Scope(\) -> refers to the Root node */
+
+ if (op && (op->named.node == acpi_gbl_root_node)) {
+ node = op->named.node;
+
+ status =
+ acpi_ds_scope_stack_push(node, object_type,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ /*
+ * The Path is an object reference to an existing object.
+ * Don't enter the name into the namespace, but look it up
+ * for use later.
+ */
+ status =
+ acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+ object_type, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT, walk_state,
+ &(node));
+ if (ACPI_FAILURE(status)) {
+#ifdef ACPI_ASL_COMPILER
+ if (status == AE_NOT_FOUND) {
+ status = AE_OK;
+ } else {
+ ACPI_ERROR_NAMESPACE(buffer_ptr,
+ status);
+ }
+#else
+ ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+#endif
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * We must check to make sure that the target is
+ * one of the opcodes that actually opens a scope
+ */
+ switch (node->type) {
+ case ACPI_TYPE_ANY:
+ case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* These are acceptable types */
+ break;
+
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /*
+ * These types we will allow, but we will change the type.
+ * This enables some existing code of the form:
+ *
+ * Name (DEB, 0)
+ * Scope (DEB) { ... }
+ */
+ ACPI_WARNING((AE_INFO,
+ "Type override - [%4.4s] had invalid type (%s) "
+ "for Scope operator, changed to type ANY\n",
+ acpi_ut_get_node_name(node),
+ acpi_ut_get_type_name(node->type)));
+
+ node->type = ACPI_TYPE_ANY;
+ walk_state->scope_info->common.value = ACPI_TYPE_ANY;
+ break;
+
+ default:
+
+ /* All other types are an error */
+
+ ACPI_ERROR((AE_INFO,
+ "Invalid type (%s) for target of "
+ "Scope operator [%4.4s] (Cannot override)",
+ acpi_ut_get_type_name(node->type),
+ acpi_ut_get_node_name(node)));
+
+ return (AE_AML_OPERAND_TYPE);
+ }
+ break;
+
+ default:
+
+ /* All other opcodes */
+
+ if (op && op->common.node) {
+
+ /* This op/node was previously entered into the namespace */
+
+ node = op->common.node;
+
+ if (acpi_ns_opens_scope(object_type)) {
+ status =
+ acpi_ds_scope_stack_push(node, object_type,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Enter the named type into the internal namespace. We enter the name
+ * as we go downward in the parse tree. Any necessary subobjects that
+ * involve arguments to the opcode must be created as we go back up the
+ * parse tree later.
+ *
+ * Note: Name may already exist if we are executing a deferred opcode.
+ */
+ if (walk_state->deferred_node) {
+
+ /* This name is already in the namespace, get the node */
+
+ node = walk_state->deferred_node;
+ status = AE_OK;
+ break;
+ }
+
+ flags = ACPI_NS_NO_UPSEARCH;
+ if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
+
+ /* Execution mode, node cannot already exist, node is temporary */
+
+ flags |= ACPI_NS_ERROR_IF_FOUND;
+
+ if (!
+ (walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
+ flags |= ACPI_NS_TEMPORARY;
+ }
+ }
+
+ /* Add new entry or lookup existing entry */
+
+ status =
+ acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+ object_type, ACPI_IMODE_LOAD_PASS2, flags,
+ walk_state, &node);
+
+ if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "***New Node [%4.4s] %p is temporary\n",
+ acpi_ut_get_node_name(node), node));
+ }
+ break;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ return_ACPI_STATUS(status);
+ }
+
+ if (!op) {
+
+ /* Create a new op */
+
+ op = acpi_ps_alloc_op(walk_state->opcode);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize the new op */
+
+ if (node) {
+ op->named.name = node->name.integer;
+ }
+ *out_op = op;
+ }
+
+ /*
+ * Put the Node in the "op" object that the parser uses, so we
+ * can get it again quickly when this scope is closed
+ */
+ op->common.node = node;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_load2_end_op
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Ascending callback used during the loading of the namespace,
+ * both control methods and everything else.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
+{
+ union acpi_parse_object *op;
+ acpi_status status = AE_OK;
+ acpi_object_type object_type;
+ struct acpi_namespace_node *node;
+ union acpi_parse_object *arg;
+ struct acpi_namespace_node *new_node;
+#ifndef ACPI_NO_METHOD_EXECUTION
+ u32 i;
+ u8 region_space;
+#endif
+
+ ACPI_FUNCTION_TRACE(ds_load2_end_op);
+
+ op = walk_state->op;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
+ walk_state->op_info->name, op, walk_state));
+
+ /* Check if opcode had an associated namespace object */
+
+ if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if (op->common.aml_opcode == AML_SCOPE_OP) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Ending scope Op=%p State=%p\n", op,
+ walk_state));
+ }
+
+ object_type = walk_state->op_info->object_type;
+
+ /*
+ * Get the Node/name from the earlier lookup
+ * (It was saved in the *op structure)
+ */
+ node = op->common.node;
+
+ /*
+ * Put the Node on the object stack (Contains the ACPI Name of
+ * this object)
+ */
+ walk_state->operands[0] = (void *)node;
+ walk_state->num_operands = 1;
+
+ /* Pop the scope stack */
+
+ if (acpi_ns_opens_scope(object_type) &&
+ (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "(%s) Popping scope for Op %p\n",
+ acpi_ut_get_type_name(object_type), op));
+
+ status = acpi_ds_scope_stack_pop(walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Named operations are as follows:
+ *
+ * AML_ALIAS
+ * AML_BANKFIELD
+ * AML_CREATEBITFIELD
+ * AML_CREATEBYTEFIELD
+ * AML_CREATEDWORDFIELD
+ * AML_CREATEFIELD
+ * AML_CREATEQWORDFIELD
+ * AML_CREATEWORDFIELD
+ * AML_DATA_REGION
+ * AML_DEVICE
+ * AML_EVENT
+ * AML_FIELD
+ * AML_INDEXFIELD
+ * AML_METHOD
+ * AML_METHODCALL
+ * AML_MUTEX
+ * AML_NAME
+ * AML_NAMEDFIELD
+ * AML_OPREGION
+ * AML_POWERRES
+ * AML_PROCESSOR
+ * AML_SCOPE
+ * AML_THERMALZONE
+ */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
+ acpi_ps_get_opcode_name(op->common.aml_opcode),
+ walk_state, op, node));
+
+ /* Decode the opcode */
+
+ arg = op->common.value.arg;
+
+ switch (walk_state->op_info->type) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+
+ case AML_TYPE_CREATE_FIELD:
+ /*
+ * Create the field object, but the field buffer and index must
+ * be evaluated later during the execution phase
+ */
+ status = acpi_ds_create_buffer_field(op, walk_state);
+ break;
+
+ case AML_TYPE_NAMED_FIELD:
+ /*
+ * If we are executing a method, initialize the field
+ */
+ if (walk_state->method_node) {
+ status = acpi_ds_init_field_objects(op, walk_state);
+ }
+
+ switch (op->common.aml_opcode) {
+ case AML_INDEX_FIELD_OP:
+
+ status =
+ acpi_ds_create_index_field(op,
+ (acpi_handle) arg->
+ common.node, walk_state);
+ break;
+
+ case AML_BANK_FIELD_OP:
+
+ status =
+ acpi_ds_create_bank_field(op, arg->common.node,
+ walk_state);
+ break;
+
+ case AML_FIELD_OP:
+
+ status =
+ acpi_ds_create_field(op, arg->common.node,
+ walk_state);
+ break;
+
+ default:
+ /* All NAMED_FIELD opcodes must be handled above */
+ break;
+ }
+ break;
+
+ case AML_TYPE_NAMED_SIMPLE:
+
+ status = acpi_ds_create_operands(walk_state, arg);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ switch (op->common.aml_opcode) {
+ case AML_PROCESSOR_OP:
+
+ status = acpi_ex_create_processor(walk_state);
+ break;
+
+ case AML_POWER_RES_OP:
+
+ status = acpi_ex_create_power_resource(walk_state);
+ break;
+
+ case AML_MUTEX_OP:
+
+ status = acpi_ex_create_mutex(walk_state);
+ break;
+
+ case AML_EVENT_OP:
+
+ status = acpi_ex_create_event(walk_state);
+ break;
+
+ case AML_ALIAS_OP:
+
+ status = acpi_ex_create_alias(walk_state);
+ break;
+
+ default:
+ /* Unknown opcode */
+
+ status = AE_OK;
+ goto cleanup;
+ }
+
+ /* Delete operands */
+
+ for (i = 1; i < walk_state->num_operands; i++) {
+ acpi_ut_remove_reference(walk_state->operands[i]);
+ walk_state->operands[i] = NULL;
+ }
+
+ break;
+#endif /* ACPI_NO_METHOD_EXECUTION */
+
+ case AML_TYPE_NAMED_COMPLEX:
+
+ switch (op->common.aml_opcode) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+ case AML_REGION_OP:
+ case AML_DATA_REGION_OP:
+
+ if (op->common.aml_opcode == AML_REGION_OP) {
+ region_space = (acpi_adr_space_type)
+ ((op->common.value.arg)->common.value.
+ integer);
+ } else {
+ region_space = REGION_DATA_TABLE;
+ }
+
+ /*
+ * The op_region is not fully parsed at this time. The only valid
+ * argument is the space_id. (We must save the address of the
+ * AML of the address and length operands)
+ *
+ * If we have a valid region, initialize it. The namespace is
+ * unlocked at this point.
+ *
+ * Need to unlock interpreter if it is locked (if we are running
+ * a control method), in order to allow _REG methods to be run
+ * during acpi_ev_initialize_region.
+ */
+ if (walk_state->method_node) {
+ /*
+ * Executing a method: initialize the region and unlock
+ * the interpreter
+ */
+ status =
+ acpi_ex_create_region(op->named.data,
+ op->named.length,
+ region_space,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ acpi_ex_exit_interpreter();
+ }
+
+ status =
+ acpi_ev_initialize_region
+ (acpi_ns_get_attached_object(node), FALSE);
+ if (walk_state->method_node) {
+ acpi_ex_enter_interpreter();
+ }
+
+ if (ACPI_FAILURE(status)) {
+ /*
+ * If AE_NOT_EXIST is returned, it is not fatal
+ * because many regions get created before a handler
+ * is installed for said region.
+ */
+ if (AE_NOT_EXIST == status) {
+ status = AE_OK;
+ }
+ }
+ break;
+
+ case AML_NAME_OP:
+
+ status = acpi_ds_create_node(walk_state, node, op);
+ break;
+
+ case AML_METHOD_OP:
+ /*
+ * method_op pkg_length name_string method_flags term_list
+ *
+ * Note: We must create the method node/object pair as soon as we
+ * see the method declaration. This allows later pass1 parsing
+ * of invocations of the method (need to know the number of
+ * arguments.)
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
+ walk_state, op, op->named.node));
+
+ if (!acpi_ns_get_attached_object(op->named.node)) {
+ walk_state->operands[0] =
+ ACPI_CAST_PTR(void, op->named.node);
+ walk_state->num_operands = 1;
+
+ status =
+ acpi_ds_create_operands(walk_state,
+ op->common.value.
+ arg);
+ if (ACPI_SUCCESS(status)) {
+ status =
+ acpi_ex_create_method(op->named.
+ data,
+ op->named.
+ length,
+ walk_state);
+ }
+ walk_state->operands[0] = NULL;
+ walk_state->num_operands = 0;
+
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ break;
+
+#endif /* ACPI_NO_METHOD_EXECUTION */
+
+ default:
+ /* All NAMED_COMPLEX opcodes must be handled above */
+ break;
+ }
+ break;
+
+ case AML_CLASS_INTERNAL:
+
+ /* case AML_INT_NAMEPATH_OP: */
+ break;
+
+ case AML_CLASS_METHOD_CALL:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
+ walk_state, op, node));
+
+ /*
+ * Lookup the method name and save the Node
+ */
+ status =
+ acpi_ns_lookup(walk_state->scope_info,
+ arg->common.value.string, ACPI_TYPE_ANY,
+ ACPI_IMODE_LOAD_PASS2,
+ ACPI_NS_SEARCH_PARENT |
+ ACPI_NS_DONT_OPEN_SCOPE, walk_state,
+ &(new_node));
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * Make sure that what we found is indeed a method
+ * We didn't search for a method on purpose, to see if the name
+ * would resolve
+ */
+ if (new_node->type != ACPI_TYPE_METHOD) {
+ status = AE_AML_OPERAND_TYPE;
+ }
+
+ /* We could put the returned object (Node) on the object stack for
+ * later, but for now, we will put it in the "op" object that the
+ * parser uses, so we can get it again at the end of this scope
+ */
+ op->common.node = new_node;
+ } else {
+ ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ cleanup:
+
+ /* Remove the Node pushed at the very beginning */
+
+ walk_state->operands[0] = NULL;
+ walk_state->num_operands = 0;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index f4725212eb48..65c79add3b19 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -373,6 +373,15 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
gpe_register_info = &gpe_block->register_info[i];
+ /*
+ * Optimization: If there are no GPEs enabled within this
+ * register, we can safely ignore the entire register.
+ */
+ if (!(gpe_register_info->enable_for_run |
+ gpe_register_info->enable_for_wake)) {
+ continue;
+ }
+
/* Read the Status Register */
status =
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 785a5ee64585..bea7223d7a71 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -231,6 +231,8 @@ acpi_status acpi_ev_initialize_op_regions(void)
}
}
+ acpi_gbl_reg_methods_executed = TRUE;
+
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index eb7386763712..c85c8c45599d 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -110,9 +110,39 @@ acpi_install_address_space_handler(acpi_handle device,
goto unlock_and_exit;
}
- /* Run all _REG methods for this address space */
+ /*
+ * For the default space_iDs, (the IDs for which there are default region handlers
+ * installed) Only execute the _REG methods if the global initialization _REG
+ * methods have already been run (via acpi_initialize_objects). In other words,
+ * we will defer the execution of the _REG methods for these space_iDs until
+ * execution of acpi_initialize_objects. This is done because we need the handlers
+ * for the default spaces (mem/io/pci/table) to be installed before we can run
+ * any control methods (or _REG methods). There is known BIOS code that depends
+ * on this.
+ *
+ * For all other space_iDs, we can safely execute the _REG methods immediately.
+ * This means that for IDs like embedded_controller, this function should be called
+ * only after acpi_enable_subsystem has been called.
+ */
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ case ACPI_ADR_SPACE_DATA_TABLE:
+
+ if (acpi_gbl_reg_methods_executed) {
+
+ /* Run all _REG methods for this address space */
+
+ status = acpi_ev_execute_reg_methods(node, space_id);
+ }
+ break;
+
+ default:
- status = acpi_ev_execute_reg_methods(node, space_id);
+ status = acpi_ev_execute_reg_methods(node, space_id);
+ break;
+ }
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 6c79c29f082d..f915a7f3f921 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -280,13 +280,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_ERROR((AE_INFO,
- "Region %s(0x%X) not implemented",
+ "Region %s (ID=%u) not implemented",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) {
ACPI_ERROR((AE_INFO,
- "Region %s(0x%X) has no handler",
+ "Region %s (ID=%u) has no handler",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 428d44e2d162..6f5588e62c0a 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -384,8 +384,11 @@ static void acpi_tb_convert_fadt(void)
*
* The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
* offset 45, 55, 95, and the word located at offset 109, 110.
+ *
+ * Note: The FADT revision value is unreliable. Only the length can be
+ * trusted.
*/
- if (acpi_gbl_FADT.header.revision < FADT2_REVISION_ID) {
+ if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
acpi_gbl_FADT.preferred_profile = 0;
acpi_gbl_FADT.pstate_control = 0;
acpi_gbl_FADT.cst_control = 0;
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
new file mode 100644
index 000000000000..136a814cec69
--- /dev/null
+++ b/drivers/acpi/acpica/utdecode.c
@@ -0,0 +1,548 @@
+/******************************************************************************
+ *
+ * Module Name: utdecode - Utility decoding routines (value-to-string)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utdecode")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_format_exception
+ *
+ * PARAMETERS: Status - The acpi_status code to be formatted
+ *
+ * RETURN: A string containing the exception text. A valid pointer is
+ * always returned.
+ *
+ * DESCRIPTION: This function translates an ACPI exception into an ASCII string
+ * It is here instead of utxface.c so it is always present.
+ *
+ ******************************************************************************/
+const char *acpi_format_exception(acpi_status status)
+{
+ const char *exception = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ exception = acpi_ut_validate_exception(status);
+ if (!exception) {
+
+ /* Exception code was not recognized */
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown exception code: 0x%8.8X", status));
+
+ exception = "UNKNOWN_STATUS_CODE";
+ }
+
+ return (ACPI_CAST_PTR(const char, exception));
+}
+
+ACPI_EXPORT_SYMBOL(acpi_format_exception)
+
+/*
+ * Properties of the ACPI Object Types, both internal and external.
+ * The table is indexed by values of acpi_object_type
+ */
+const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = {
+ ACPI_NS_NORMAL, /* 00 Any */
+ ACPI_NS_NORMAL, /* 01 Number */
+ ACPI_NS_NORMAL, /* 02 String */
+ ACPI_NS_NORMAL, /* 03 Buffer */
+ ACPI_NS_NORMAL, /* 04 Package */
+ ACPI_NS_NORMAL, /* 05 field_unit */
+ ACPI_NS_NEWSCOPE, /* 06 Device */
+ ACPI_NS_NORMAL, /* 07 Event */
+ ACPI_NS_NEWSCOPE, /* 08 Method */
+ ACPI_NS_NORMAL, /* 09 Mutex */
+ ACPI_NS_NORMAL, /* 10 Region */
+ ACPI_NS_NEWSCOPE, /* 11 Power */
+ ACPI_NS_NEWSCOPE, /* 12 Processor */
+ ACPI_NS_NEWSCOPE, /* 13 Thermal */
+ ACPI_NS_NORMAL, /* 14 buffer_field */
+ ACPI_NS_NORMAL, /* 15 ddb_handle */
+ ACPI_NS_NORMAL, /* 16 Debug Object */
+ ACPI_NS_NORMAL, /* 17 def_field */
+ ACPI_NS_NORMAL, /* 18 bank_field */
+ ACPI_NS_NORMAL, /* 19 index_field */
+ ACPI_NS_NORMAL, /* 20 Reference */
+ ACPI_NS_NORMAL, /* 21 Alias */
+ ACPI_NS_NORMAL, /* 22 method_alias */
+ ACPI_NS_NORMAL, /* 23 Notify */
+ ACPI_NS_NORMAL, /* 24 Address Handler */
+ ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Desc */
+ ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 26 Resource Field */
+ ACPI_NS_NEWSCOPE, /* 27 Scope */
+ ACPI_NS_NORMAL, /* 28 Extra */
+ ACPI_NS_NORMAL, /* 29 Data */
+ ACPI_NS_NORMAL /* 30 Invalid */
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_hex_to_ascii_char
+ *
+ * PARAMETERS: Integer - Contains the hex digit
+ * Position - bit position of the digit within the
+ * integer (multiple of 4)
+ *
+ * RETURN: The converted Ascii character
+ *
+ * DESCRIPTION: Convert a hex digit to an Ascii character
+ *
+ ******************************************************************************/
+
+/* Hex to ASCII conversion table */
+
+static const char acpi_gbl_hex_to_ascii[] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
+};
+
+char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
+{
+
+ return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_region_name
+ *
+ * PARAMETERS: Space ID - ID for the region
+ *
+ * RETURN: Decoded region space_id name
+ *
+ * DESCRIPTION: Translate a Space ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Region type decoding */
+
+const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
+ "SystemMemory",
+ "SystemIO",
+ "PCI_Config",
+ "EmbeddedControl",
+ "SMBus",
+ "SystemCMOS",
+ "PCIBARTarget",
+ "IPMI",
+ "DataTable"
+};
+
+char *acpi_ut_get_region_name(u8 space_id)
+{
+
+ if (space_id >= ACPI_USER_REGION_BEGIN) {
+ return ("UserDefinedRegion");
+ } else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ return ("FunctionalFixedHW");
+ } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
+ return ("InvalidSpaceId");
+ }
+
+ return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_event_name
+ *
+ * PARAMETERS: event_id - Fixed event ID
+ *
+ * RETURN: Decoded event ID name
+ *
+ * DESCRIPTION: Translate a Event ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Event type decoding */
+
+static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
+ "PM_Timer",
+ "GlobalLock",
+ "PowerButton",
+ "SleepButton",
+ "RealTimeClock",
+};
+
+char *acpi_ut_get_event_name(u32 event_id)
+{
+
+ if (event_id > ACPI_EVENT_MAX) {
+ return ("InvalidEventID");
+ }
+
+ return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_type_name
+ *
+ * PARAMETERS: Type - An ACPI object type
+ *
+ * RETURN: Decoded ACPI object type name
+ *
+ * DESCRIPTION: Translate a Type ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/*
+ * Elements of acpi_gbl_ns_type_names below must match
+ * one-to-one with values of acpi_object_type
+ *
+ * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
+ * when stored in a table it really means that we have thus far seen no
+ * evidence to indicate what type is actually going to be stored for this entry.
+ */
+static const char acpi_gbl_bad_type[] = "UNDEFINED";
+
+/* Printable names of the ACPI object types */
+
+static const char *acpi_gbl_ns_type_names[] = {
+ /* 00 */ "Untyped",
+ /* 01 */ "Integer",
+ /* 02 */ "String",
+ /* 03 */ "Buffer",
+ /* 04 */ "Package",
+ /* 05 */ "FieldUnit",
+ /* 06 */ "Device",
+ /* 07 */ "Event",
+ /* 08 */ "Method",
+ /* 09 */ "Mutex",
+ /* 10 */ "Region",
+ /* 11 */ "Power",
+ /* 12 */ "Processor",
+ /* 13 */ "Thermal",
+ /* 14 */ "BufferField",
+ /* 15 */ "DdbHandle",
+ /* 16 */ "DebugObject",
+ /* 17 */ "RegionField",
+ /* 18 */ "BankField",
+ /* 19 */ "IndexField",
+ /* 20 */ "Reference",
+ /* 21 */ "Alias",
+ /* 22 */ "MethodAlias",
+ /* 23 */ "Notify",
+ /* 24 */ "AddrHandler",
+ /* 25 */ "ResourceDesc",
+ /* 26 */ "ResourceFld",
+ /* 27 */ "Scope",
+ /* 28 */ "Extra",
+ /* 29 */ "Data",
+ /* 30 */ "Invalid"
+};
+
+char *acpi_ut_get_type_name(acpi_object_type type)
+{
+
+ if (type > ACPI_TYPE_INVALID) {
+ return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
+ }
+
+ return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
+}
+
+char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
+{
+
+ if (!obj_desc) {
+ return ("[NULL Object Descriptor]");
+ }
+
+ return (acpi_ut_get_type_name(obj_desc->common.type));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_node_name
+ *
+ * PARAMETERS: Object - A namespace node
+ *
+ * RETURN: ASCII name of the node
+ *
+ * DESCRIPTION: Validate the node and return the node's ACPI name.
+ *
+ ******************************************************************************/
+
+char *acpi_ut_get_node_name(void *object)
+{
+ struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
+
+ /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
+
+ if (!object) {
+ return ("NULL");
+ }
+
+ /* Check for Root node */
+
+ if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
+ return ("\"\\\" ");
+ }
+
+ /* Descriptor must be a namespace node */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+ return ("####");
+ }
+
+ /*
+ * Ensure name is valid. The name was validated/repaired when the node
+ * was created, but make sure it has not been corrupted.
+ */
+ acpi_ut_repair_name(node->name.ascii);
+
+ /* Return the name */
+
+ return (node->name.ascii);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_descriptor_name
+ *
+ * PARAMETERS: Object - An ACPI object
+ *
+ * RETURN: Decoded name of the descriptor type
+ *
+ * DESCRIPTION: Validate object and return the descriptor type
+ *
+ ******************************************************************************/
+
+/* Printable names of object descriptor types */
+
+static const char *acpi_gbl_desc_type_names[] = {
+ /* 00 */ "Not a Descriptor",
+ /* 01 */ "Cached",
+ /* 02 */ "State-Generic",
+ /* 03 */ "State-Update",
+ /* 04 */ "State-Package",
+ /* 05 */ "State-Control",
+ /* 06 */ "State-RootParseScope",
+ /* 07 */ "State-ParseScope",
+ /* 08 */ "State-WalkScope",
+ /* 09 */ "State-Result",
+ /* 10 */ "State-Notify",
+ /* 11 */ "State-Thread",
+ /* 12 */ "Walk",
+ /* 13 */ "Parser",
+ /* 14 */ "Operand",
+ /* 15 */ "Node"
+};
+
+char *acpi_ut_get_descriptor_name(void *object)
+{
+
+ if (!object) {
+ return ("NULL OBJECT");
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
+ return ("Not a Descriptor");
+ }
+
+ return (ACPI_CAST_PTR(char,
+ acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
+ (object)]));
+
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_reference_name
+ *
+ * PARAMETERS: Object - An ACPI reference object
+ *
+ * RETURN: Decoded name of the type of reference
+ *
+ * DESCRIPTION: Decode a reference object sub-type to a string.
+ *
+ ******************************************************************************/
+
+/* Printable names of reference object sub-types */
+
+static const char *acpi_gbl_ref_class_names[] = {
+ /* 00 */ "Local",
+ /* 01 */ "Argument",
+ /* 02 */ "RefOf",
+ /* 03 */ "Index",
+ /* 04 */ "DdbHandle",
+ /* 05 */ "Named Object",
+ /* 06 */ "Debug"
+};
+
+const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
+{
+
+ if (!object) {
+ return ("NULL Object");
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
+ return ("Not an Operand object");
+ }
+
+ if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE) {
+ return ("Not a Reference object");
+ }
+
+ if (object->reference.class > ACPI_REFCLASS_MAX) {
+ return ("Unknown Reference class");
+ }
+
+ return (acpi_gbl_ref_class_names[object->reference.class]);
+}
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/*
+ * Strings and procedures used for debug only
+ */
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_mutex_name
+ *
+ * PARAMETERS: mutex_id - The predefined ID for this mutex.
+ *
+ * RETURN: Decoded name of the internal mutex
+ *
+ * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Names for internal mutex objects, used for debug output */
+
+static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
+ "ACPI_MTX_Interpreter",
+ "ACPI_MTX_Namespace",
+ "ACPI_MTX_Tables",
+ "ACPI_MTX_Events",
+ "ACPI_MTX_Caches",
+ "ACPI_MTX_Memory",
+ "ACPI_MTX_CommandComplete",
+ "ACPI_MTX_CommandReady"
+};
+
+char *acpi_ut_get_mutex_name(u32 mutex_id)
+{
+
+ if (mutex_id > ACPI_MAX_MUTEX) {
+ return ("Invalid Mutex ID");
+ }
+
+ return (acpi_gbl_mutex_names[mutex_id]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_notify_name
+ *
+ * PARAMETERS: notify_value - Value from the Notify() request
+ *
+ * RETURN: Decoded name for the notify value
+ *
+ * DESCRIPTION: Translate a Notify Value to a notify namestring.
+ *
+ ******************************************************************************/
+
+/* Names for Notify() values, used for debug output */
+
+static const char *acpi_gbl_notify_value_names[] = {
+ "Bus Check",
+ "Device Check",
+ "Device Wake",
+ "Eject Request",
+ "Device Check Light",
+ "Frequency Mismatch",
+ "Bus Mode Mismatch",
+ "Power Fault",
+ "Capabilities Check",
+ "Device PLD Check",
+ "Reserved",
+ "System Locality Update"
+};
+
+const char *acpi_ut_get_notify_name(u32 notify_value)
+{
+
+ if (notify_value <= ACPI_NOTIFY_MAX) {
+ return (acpi_gbl_notify_value_names[notify_value]);
+ } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+ return ("Reserved");
+ } else { /* Greater or equal to 0x80 */
+
+ return ("**Device Specific**");
+ }
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_object_type
+ *
+ * PARAMETERS: Type - Object type to be validated
+ *
+ * RETURN: TRUE if valid object type, FALSE otherwise
+ *
+ * DESCRIPTION: Validate an object type
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_object_type(acpi_object_type type)
+{
+
+ if (type > ACPI_TYPE_LOCAL_MAX) {
+
+ /* Note: Assumes all TYPEs are contiguous (external/local) */
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 97dd9bbf055a..833a38a9c905 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -45,7 +45,6 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acnamesp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utglobal")
@@ -107,43 +106,6 @@ const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS] = {
/*******************************************************************************
*
- * FUNCTION: acpi_format_exception
- *
- * PARAMETERS: Status - The acpi_status code to be formatted
- *
- * RETURN: A string containing the exception text. A valid pointer is
- * always returned.
- *
- * DESCRIPTION: This function translates an ACPI exception into an ASCII string
- * It is here instead of utxface.c so it is always present.
- *
- ******************************************************************************/
-
-const char *acpi_format_exception(acpi_status status)
-{
- const char *exception = NULL;
-
- ACPI_FUNCTION_ENTRY();
-
- exception = acpi_ut_validate_exception(status);
- if (!exception) {
-
- /* Exception code was not recognized */
-
- ACPI_ERROR((AE_INFO,
- "Unknown exception code: 0x%8.8X", status));
-
- exception = "UNKNOWN_STATUS_CODE";
- dump_stack();
- }
-
- return (ACPI_CAST_PTR(const char, exception));
-}
-
-ACPI_EXPORT_SYMBOL(acpi_format_exception)
-
-/*******************************************************************************
- *
* Namespace globals
*
******************************************************************************/
@@ -177,71 +139,6 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{NULL, ACPI_TYPE_ANY, NULL}
};
-/*
- * Properties of the ACPI Object Types, both internal and external.
- * The table is indexed by values of acpi_object_type
- */
-const u8 acpi_gbl_ns_properties[] = {
- ACPI_NS_NORMAL, /* 00 Any */
- ACPI_NS_NORMAL, /* 01 Number */
- ACPI_NS_NORMAL, /* 02 String */
- ACPI_NS_NORMAL, /* 03 Buffer */
- ACPI_NS_NORMAL, /* 04 Package */
- ACPI_NS_NORMAL, /* 05 field_unit */
- ACPI_NS_NEWSCOPE, /* 06 Device */
- ACPI_NS_NORMAL, /* 07 Event */
- ACPI_NS_NEWSCOPE, /* 08 Method */
- ACPI_NS_NORMAL, /* 09 Mutex */
- ACPI_NS_NORMAL, /* 10 Region */
- ACPI_NS_NEWSCOPE, /* 11 Power */
- ACPI_NS_NEWSCOPE, /* 12 Processor */
- ACPI_NS_NEWSCOPE, /* 13 Thermal */
- ACPI_NS_NORMAL, /* 14 buffer_field */
- ACPI_NS_NORMAL, /* 15 ddb_handle */
- ACPI_NS_NORMAL, /* 16 Debug Object */
- ACPI_NS_NORMAL, /* 17 def_field */
- ACPI_NS_NORMAL, /* 18 bank_field */
- ACPI_NS_NORMAL, /* 19 index_field */
- ACPI_NS_NORMAL, /* 20 Reference */
- ACPI_NS_NORMAL, /* 21 Alias */
- ACPI_NS_NORMAL, /* 22 method_alias */
- ACPI_NS_NORMAL, /* 23 Notify */
- ACPI_NS_NORMAL, /* 24 Address Handler */
- ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Desc */
- ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 26 Resource Field */
- ACPI_NS_NEWSCOPE, /* 27 Scope */
- ACPI_NS_NORMAL, /* 28 Extra */
- ACPI_NS_NORMAL, /* 29 Data */
- ACPI_NS_NORMAL /* 30 Invalid */
-};
-
-/* Hex to ASCII conversion table */
-
-static const char acpi_gbl_hex_to_ascii[] = {
- '0', '1', '2', '3', '4', '5', '6', '7',
- '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
-};
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_hex_to_ascii_char
- *
- * PARAMETERS: Integer - Contains the hex digit
- * Position - bit position of the digit within the
- * integer (multiple of 4)
- *
- * RETURN: The converted Ascii character
- *
- * DESCRIPTION: Convert a hex digit to an Ascii character
- *
- ******************************************************************************/
-
-char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
-{
-
- return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
-}
-
/******************************************************************************
*
* Event and Hardware globals
@@ -341,386 +238,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
/*******************************************************************************
*
- * FUNCTION: acpi_ut_get_region_name
- *
- * PARAMETERS: None.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Translate a Space ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/* Region type decoding */
-
-const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
- "SystemMemory",
- "SystemIO",
- "PCI_Config",
- "EmbeddedControl",
- "SMBus",
- "SystemCMOS",
- "PCIBARTarget",
- "IPMI",
- "DataTable"
-};
-
-char *acpi_ut_get_region_name(u8 space_id)
-{
-
- if (space_id >= ACPI_USER_REGION_BEGIN) {
- return ("UserDefinedRegion");
- } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
- return ("InvalidSpaceId");
- }
-
- return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_get_event_name
- *
- * PARAMETERS: None.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Translate a Event ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/* Event type decoding */
-
-static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
- "PM_Timer",
- "GlobalLock",
- "PowerButton",
- "SleepButton",
- "RealTimeClock",
-};
-
-char *acpi_ut_get_event_name(u32 event_id)
-{
-
- if (event_id > ACPI_EVENT_MAX) {
- return ("InvalidEventID");
- }
-
- return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_get_type_name
- *
- * PARAMETERS: None.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Translate a Type ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/*
- * Elements of acpi_gbl_ns_type_names below must match
- * one-to-one with values of acpi_object_type
- *
- * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
- * when stored in a table it really means that we have thus far seen no
- * evidence to indicate what type is actually going to be stored for this entry.
- */
-static const char acpi_gbl_bad_type[] = "UNDEFINED";
-
-/* Printable names of the ACPI object types */
-
-static const char *acpi_gbl_ns_type_names[] = {
- /* 00 */ "Untyped",
- /* 01 */ "Integer",
- /* 02 */ "String",
- /* 03 */ "Buffer",
- /* 04 */ "Package",
- /* 05 */ "FieldUnit",
- /* 06 */ "Device",
- /* 07 */ "Event",
- /* 08 */ "Method",
- /* 09 */ "Mutex",
- /* 10 */ "Region",
- /* 11 */ "Power",
- /* 12 */ "Processor",
- /* 13 */ "Thermal",
- /* 14 */ "BufferField",
- /* 15 */ "DdbHandle",
- /* 16 */ "DebugObject",
- /* 17 */ "RegionField",
- /* 18 */ "BankField",
- /* 19 */ "IndexField",
- /* 20 */ "Reference",
- /* 21 */ "Alias",
- /* 22 */ "MethodAlias",
- /* 23 */ "Notify",
- /* 24 */ "AddrHandler",
- /* 25 */ "ResourceDesc",
- /* 26 */ "ResourceFld",
- /* 27 */ "Scope",
- /* 28 */ "Extra",
- /* 29 */ "Data",
- /* 30 */ "Invalid"
-};
-
-char *acpi_ut_get_type_name(acpi_object_type type)
-{
-
- if (type > ACPI_TYPE_INVALID) {
- return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
- }
-
- return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
-}
-
-char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
-{
-
- if (!obj_desc) {
- return ("[NULL Object Descriptor]");
- }
-
- return (acpi_ut_get_type_name(obj_desc->common.type));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_get_node_name
- *
- * PARAMETERS: Object - A namespace node
- *
- * RETURN: Pointer to a string
- *
- * DESCRIPTION: Validate the node and return the node's ACPI name.
- *
- ******************************************************************************/
-
-char *acpi_ut_get_node_name(void *object)
-{
- struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
-
- /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
-
- if (!object) {
- return ("NULL");
- }
-
- /* Check for Root node */
-
- if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
- return ("\"\\\" ");
- }
-
- /* Descriptor must be a namespace node */
-
- if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
- return ("####");
- }
-
- /* Name must be a valid ACPI name */
-
- if (!acpi_ut_valid_acpi_name(node->name.integer)) {
- node->name.integer = acpi_ut_repair_name(node->name.ascii);
- }
-
- /* Return the name */
-
- return (node->name.ascii);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_get_descriptor_name
- *
- * PARAMETERS: Object - An ACPI object
- *
- * RETURN: Pointer to a string
- *
- * DESCRIPTION: Validate object and return the descriptor type
- *
- ******************************************************************************/
-
-/* Printable names of object descriptor types */
-
-static const char *acpi_gbl_desc_type_names[] = {
- /* 00 */ "Invalid",
- /* 01 */ "Cached",
- /* 02 */ "State-Generic",
- /* 03 */ "State-Update",
- /* 04 */ "State-Package",
- /* 05 */ "State-Control",
- /* 06 */ "State-RootParseScope",
- /* 07 */ "State-ParseScope",
- /* 08 */ "State-WalkScope",
- /* 09 */ "State-Result",
- /* 10 */ "State-Notify",
- /* 11 */ "State-Thread",
- /* 12 */ "Walk",
- /* 13 */ "Parser",
- /* 14 */ "Operand",
- /* 15 */ "Node"
-};
-
-char *acpi_ut_get_descriptor_name(void *object)
-{
-
- if (!object) {
- return ("NULL OBJECT");
- }
-
- if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
- return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
- }
-
- return (ACPI_CAST_PTR(char,
- acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
- (object)]));
-
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_get_reference_name
- *
- * PARAMETERS: Object - An ACPI reference object
- *
- * RETURN: Pointer to a string
- *
- * DESCRIPTION: Decode a reference object sub-type to a string.
- *
- ******************************************************************************/
-
-/* Printable names of reference object sub-types */
-
-static const char *acpi_gbl_ref_class_names[] = {
- /* 00 */ "Local",
- /* 01 */ "Argument",
- /* 02 */ "RefOf",
- /* 03 */ "Index",
- /* 04 */ "DdbHandle",
- /* 05 */ "Named Object",
- /* 06 */ "Debug"
-};
-
-const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
-{
- if (!object)
- return "NULL Object";
-
- if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
- return "Not an Operand object";
-
- if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
- return "Not a Reference object";
-
- if (object->reference.class > ACPI_REFCLASS_MAX)
- return "Unknown Reference class";
-
- return acpi_gbl_ref_class_names[object->reference.class];
-}
-
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_get_mutex_name
- *
- * PARAMETERS: mutex_id - The predefined ID for this mutex.
- *
- * RETURN: String containing the name of the mutex. Always returns a valid
- * pointer.
- *
- * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-char *acpi_ut_get_mutex_name(u32 mutex_id)
-{
-
- if (mutex_id > ACPI_MAX_MUTEX) {
- return ("Invalid Mutex ID");
- }
-
- return (acpi_gbl_mutex_names[mutex_id]);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_get_notify_name
- *
- * PARAMETERS: notify_value - Value from the Notify() request
- *
- * RETURN: String corresponding to the Notify Value.
- *
- * DESCRIPTION: Translate a Notify Value to a notify namestring.
- *
- ******************************************************************************/
-
-/* Names for Notify() values, used for debug output */
-
-static const char *acpi_gbl_notify_value_names[] = {
- "Bus Check",
- "Device Check",
- "Device Wake",
- "Eject Request",
- "Device Check Light",
- "Frequency Mismatch",
- "Bus Mode Mismatch",
- "Power Fault",
- "Capabilities Check",
- "Device PLD Check",
- "Reserved",
- "System Locality Update"
-};
-
-const char *acpi_ut_get_notify_name(u32 notify_value)
-{
-
- if (notify_value <= ACPI_NOTIFY_MAX) {
- return (acpi_gbl_notify_value_names[notify_value]);
- } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
- return ("Reserved");
- } else { /* Greater or equal to 0x80 */
-
- return ("**Device Specific**");
- }
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_valid_object_type
- *
- * PARAMETERS: Type - Object type to be validated
- *
- * RETURN: TRUE if valid object type, FALSE otherwise
- *
- * DESCRIPTION: Validate an object type
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_object_type(acpi_object_type type)
-{
-
- if (type > ACPI_TYPE_LOCAL_MAX) {
-
- /* Note: Assumes all TYPEs are contiguous (external/local) */
-
- return (FALSE);
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_init_globals
*
* PARAMETERS: None
@@ -806,6 +323,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
acpi_gbl_osi_data = 0;
acpi_gbl_osi_mutex = NULL;
+ acpi_gbl_reg_methods_executed = FALSE;
/* Hardware oriented */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04fc2b34..77fc76f8aea9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(bdev);
+ __invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfeade11..5feebe2800e9 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
+#define I830_HIC 0x70
/* Intel 965G registers */
#define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d3265adb..0d09b537bb9a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
int num_dcache_entries;
- union {
- void __iomem *i9xx_flush_page;
- void *i8xx_flush_page;
- };
+ void __iomem *i9xx_flush_page;
char *i81x_gtt_table;
- struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
static void i830_cleanup(void)
{
- if (intel_private.i8xx_flush_page) {
- kunmap(intel_private.i8xx_flush_page);
- intel_private.i8xx_flush_page = NULL;
- }
-
- __free_page(intel_private.i8xx_page);
- intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
- /* return if we've already set the flush mechanism up */
- if (intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_page = alloc_page(GFP_KERNEL);
- if (!intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- if (!intel_private.i8xx_flush_page)
- i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
*/
static void i830_chipset_flush(void)
{
- unsigned int *pg = intel_private.i8xx_flush_page;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Forcibly evict everything from the CPU write buffers.
+ * clflush appears to be insufficient.
+ */
+ wbinvd_on_all_cpus();
+
+ /* Now we've only seen documents for this magic bit on 855GM,
+ * we hope it exists for the other gen2 chipsets...
+ *
+ * Also works as advertised on my 845G.
+ */
+ writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+ intel_private.registers+I830_HIC);
- memset(pg, 0, 1024);
+ while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+ if (time_after(jiffies, timeout))
+ break;
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ udelay(50);
+ }
}
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
- intel_i830_setup_flush();
-
return 0;
}
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 36e0fa161c2b..1f46f1cd9225 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
- if (duration_idx != TPM_UNDEFINED) {
+ if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
- /* if duration is 0, it's because chip->vendor.duration wasn't */
- /* filled yet, so we set the lowest timeout just to give enough */
- /* time for tpm_get_timeouts() to succeed */
- return (duration <= 0 ? HZ : duration);
- } else
+ if (duration <= 0)
return 2 * 60 * HZ;
+ else
+ return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 53120a72a48c..28d1d3c24d65 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1012,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
+ int ret = 0;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..79a04fde69b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
- int tile_width;
+ int tile_width, tile_height;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
@@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
}
+ if (IS_GEN2(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_height = 32;
+ else
+ tile_height = 8;
+ /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
+ * number of tile rows. */
+ if (IS_GEN2(dev))
+ tile_height *= 2;
+
+ /* Size needs to be aligned to a full tile row */
+ if (size & (tile_height * stride - 1))
+ return false;
+
/* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..8a9e08bf1cf7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
- I915_WRITE(FDI_RXA_IMR, 0);
- I915_WRITE(FDI_RXB_IMR, 0);
+ hotplug_mask |= SDE_AUX_MASK;
}
dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b006536b3d2..e79b25bbee6c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj, false);
- if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ (void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0);
}
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ return false;
+ continue;
+ }
+ }
+
+ return true;
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+ bool is_pch_port = false;
if (intel_crtc->active)
return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- ironlake_fdi_enable(crtc);
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else {
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
+ }
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane);
}
+ /* Skip the PCH stuff if possible */
+ if (!is_pch_port)
+ goto done;
+
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
POSTING_READ(RSTDBYCTL);
}
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index d38a4d9f9b0b..a52184007f5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- nouveau_vm_put(&nvbo->vma);
+ if (nvbo->vma.node) {
+ nouveau_vm_unmap(&nvbo->vma);
+ nouveau_vm_put(&nvbo->vma);
+ }
kfree(nvbo);
}
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822aa9bbf..d46c0c758ddf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
{ "ad7414", 0 },
{}
};
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
static struct i2c_driver ad7414_driver = {
.driver = {
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843a2964..5cc3e3784b42 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
{ "adt7411", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
static struct i2c_driver adt7411_driver = {
.driver = {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..0ed7f6bc2a7f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ecda4c1..818313e277e7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
+ if (unit && MAJOR(unit) != MD_MAJOR)
+ unit &= ~((1<<MdpMinorShift)-1);
+
retry:
spin_lock(&all_mddevs_lock);
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
}
mddev->array_sectors = sectors;
- set_capacity(mddev->gendisk, mddev->array_sectors);
- if (mddev->pers)
+ if (mddev->pers) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
-
+ }
return len;
}
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
}
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
+ mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
+ mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
+ mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_size_change(mddev->gendisk, bdev);
+ check_disk_change(bdev);
out:
return err;
}
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
+
+static int md_media_changed(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ mddev->changed = 0;
+ return 0;
+}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
+ .media_changed = md_media_changed,
+ .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7e90b8593b2a..12215d437fcc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -274,6 +274,8 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
+ int changed; /* True if we might need to
+ * reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..3a62d440e27b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a96855edb..c0ac457f1218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
mddev->delta_disks = 1 - mddev->raid_disks;
+ mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba9..06cd712807d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Only take the spinlock to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- mddev->queue->queue_lock = &conf->device_lock;
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b28741b..747d061d8e05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Spinlock only taken to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
if (!conf)
goto out;
- mddev->queue->queue_lock = &conf->device_lock;
-
mddev->thread = conf->thread;
conf->thread = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 702812824195..78536fdbd87f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_queue;
chunk_size = mddev->chunk_sectors << 9;
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd492754..ea1580085347 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig NFC_DEVICES
- bool "NFC devices"
+ bool "Near Field Communication (NFC) devices"
default n
---help---
You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae647264dd6..724f65d8f9e4 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@ enum pn544_irq {
struct pn544_info {
struct miscdevice miscdev;
struct i2c_client *i2c_dev;
- struct regulator_bulk_data regs[2];
+ struct regulator_bulk_data regs[3];
enum pn544_state state;
wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
static const char reg_vdd_io[] = "Vdd_IO";
static const char reg_vbat[] = "VBat";
+static const char reg_vsim[] = "VSim";
/* sysfs interface */
static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
info->regs[0].supply = reg_vdd_io;
info->regs[1].supply = reg_vbat;
+ info->regs[2].supply = reg_vsim;
r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
info->regs);
if (r < 0)
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43f7519..a4e8eb9fece6 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
{
unsigned long flags;
int captured = 0;
- struct pps_ktime ts_real;
+ struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
/* check event type */
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b41853a877..1269fbd2deca 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
/* Several chips lock up trying to read undefined config space */
if (capable(CAP_SYS_ADMIN))
- size = 0x200000;
+ size = RIO_MAINT_SPACE_SZ;
- if (off > size)
+ if (off >= size)
return 0;
if (off + count > size) {
size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
loff_t init_off = off;
u8 *data = (u8 *) buf;
- if (off > 0x200000)
+ if (off >= RIO_MAINT_SPACE_SZ)
return 0;
- if (off + count > 0x200000) {
- size = 0x200000 - off;
+ if (off + count > RIO_MAINT_SPACE_SZ) {
+ size = RIO_MAINT_SPACE_SZ - off;
count = size;
}
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
- .size = 0x200000,
+ .size = RIO_MAINT_SPACE_SZ,
.read = rio_read_config,
.write = rio_write_config,
};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b950d4..2bb5de1f2421 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
- BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+ BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
return mc13xxx_regulators[id].voltages[val];
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4bde91..06df898842c0 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_IDLE;
default:
BUG();
+ return -EINVAL;
}
}
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index c36749e4c926..5469c52cba3d 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
.proc = at91_rtc_proc,
- .alarm_irq_enabled = at91_rtc_alarm_irq_enable,
+ .alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
/*
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee19764c..950735415a7c 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
/*
* RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
*
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
* Author: Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
time->tm_hour = bcd2bin(hour);
}
- time->tm_wday = bcd2bin(week);
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ time->tm_wday = bcd2bin(week) - 1;
time->tm_mday = bcd2bin(day);
- time->tm_mon = bcd2bin(month & 0x7F);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ time->tm_mon = bcd2bin(month & 0x7F) - 1;
if (century)
add_century = 100;
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
buf[0] = bin2bcd(time->tm_sec);
buf[1] = bin2bcd(time->tm_min);
buf[2] = bin2bcd(time->tm_hour);
- buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ buf[3] = bin2bcd(time->tm_wday + 1);
buf[4] = bin2bcd(time->tm_mday); /* Date */
- buf[5] = bin2bcd(time->tm_mon);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ buf[5] = bin2bcd(time->tm_mon + 1);
if (time->tm_year >= 100) {
buf[5] |= 0x80;
buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba3ca23..bf7c687519ef 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
menuconfig THERMAL
tristate "Generic Thermal sysfs driver"
- depends on NET
help
Generic Thermal Sysfs driver offers a generic mechanism for
thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c79280..713b7ea4a607 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
static unsigned int thermal_event_seqnum;
-static struct genl_family thermal_event_genl_family = {
- .id = GENL_ID_GENERATE,
- .name = THERMAL_GENL_FAMILY_NAME,
- .version = THERMAL_GENL_VERSION,
- .maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
- .name = THERMAL_GENL_MCAST_GROUP_NAME,
-};
-
-static int genetlink_init(void);
-static void genetlink_exit(void);
-
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
EXPORT_SYMBOL(thermal_zone_device_unregister);
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = THERMAL_GENL_FAMILY_NAME,
+ .version = THERMAL_GENL_VERSION,
+ .maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+ .name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
int generate_netlink_event(u32 orig, enum events event)
{
struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
return result;
}
+static void genetlink_exit(void)
+{
+ genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
static int __init thermal_init(void)
{
int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
return result;
}
-static void genetlink_exit(void)
-{
- genl_unregister_family(&thermal_event_genl_family);
-}
-
static void __exit thermal_exit(void)
{
class_unregister(&thermal_class);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d041c6826e43..0f299b7aad60 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
- if (!udev->config && oldspeed == USB_SPEED_SUPER) {
- /* Don't reset USB 3.0 devices during an initial setup */
- usb_set_device_state(udev, USB_STATE_DEFAULT);
- } else {
- /* Reset the device; full speed may morph to high speed */
- /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
- retval = hub_port_reset(hub, port1, udev, delay);
- if (retval < 0) /* error or disconnect */
- goto fail;
- /* success, speed is known */
- }
+ /* Reset the device; full speed may morph to high speed */
+ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ retval = hub_port_reset(hub, port1, udev, delay);
+ if (retval < 0) /* error or disconnect */
+ goto fail;
+ /* success, speed is known */
+
retval = -ENODEV;
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4abbf381..0231814a97a5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
}
}
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
{
- void *addr;
+ struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+ void __iomem *addr;
u32 temp;
u64 temp_64;
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
}
}
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f0e7a6..a9534396e85b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
/***************** Streams structures manipulation *************************/
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val &= DBOFF_MASK;
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
" from cap regs base addr\n", val);
- xhci->dba = (void *) xhci->cap_regs + val;
+ xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
- xhci->ir_set = (void *) xhci->run_regs->ir_set;
+ xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c1ce5a..3289bf4832c9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg)) {
+ while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (urb->transfer_buffer_length < trb_buff_len)
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
- running_total = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34cf4e165877..2083fc2179b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
/*
* Set the run bit and wait for the host to be running.
*/
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
{
unsigned long flags;
int temp;
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
if (NUM_TEST_NOOPS > 0)
doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7f236fd22015..7f127df6dd55 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
}
/* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 54a8bd1047d6..c292d5c499e7 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
INIT_LIST_HEAD(&musb->out_bulk);
hcd->uses_new_polling = 1;
+ hcd->has_tt = 1;
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 15690bb1d3b5..789b3afb3423 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
candidate->first = candidate->last = index;
candidate->offset_first = from;
candidate->to_last = to;
+ INIT_LIST_HEAD(&candidate->link);
candidate->usage = 1;
candidate->state = AFS_WBACK_PENDING;
init_waitqueue_head(&candidate->waitq);
diff --git a/fs/aio.c b/fs/aio.c
index fc557a3be0a9..26869cde3953 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx)
call_rcu(&ctx->rcu_head, ctx_rcu_free);
}
-#define get_ioctx(kioctx) do { \
- BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
- atomic_inc(&(kioctx)->users); \
-} while (0)
-#define put_ioctx(kioctx) do { \
- BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
- if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
- __put_ioctx(kioctx); \
-} while (0)
+static inline void get_ioctx(struct kioctx *kioctx)
+{
+ BUG_ON(atomic_read(&kioctx->users) <= 0);
+ atomic_inc(&kioctx->users);
+}
+
+static inline int try_get_ioctx(struct kioctx *kioctx)
+{
+ return atomic_inc_not_zero(&kioctx->users);
+}
+
+static inline void put_ioctx(struct kioctx *kioctx)
+{
+ BUG_ON(atomic_read(&kioctx->users) <= 0);
+ if (unlikely(atomic_dec_and_test(&kioctx->users)))
+ __put_ioctx(kioctx);
+}
/* ioctx_alloc
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
@@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
rcu_read_lock();
hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
- if (ctx->user_id == ctx_id && !ctx->dead) {
- get_ioctx(ctx);
+ /*
+ * RCU protects us against accessing freed memory but
+ * we have to be careful not to get a reference when the
+ * reference count already dropped to 0 (ctx->dead test
+ * is unreliable because of races).
+ */
+ if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
ret = ctx;
break;
}
@@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
goto out_put_req;
spin_lock_irq(&ctx->ctx_lock);
+ /*
+ * We could have raced with io_destroy() and are currently holding a
+ * reference to ctx which should be destroyed. We cannot submit IO
+ * since ctx gets freed as soon as io_submit() puts its reference. The
+ * check here is reliable: io_destroy() sets ctx->dead before waiting
+ * for outstanding IO and the barrier between these two is realized by
+ * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
+ * increment ctx->reqs_active before checking for ctx->dead and the
+ * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
+ * don't see ctx->dead set here, io_destroy() waits for our IO to
+ * finish.
+ */
+ if (ctx->dead) {
+ spin_unlock_irq(&ctx->ctx_lock);
+ ret = -EINVAL;
+ goto out_put_req;
+ }
aio_run_iocb(req);
if (!list_empty(&ctx->run_list)) {
/* drain the run list */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4fb8a3431531..889287019599 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
if (ret)
goto out_del;
+ /*
+ * bdev could be deleted beneath us which would implicitly destroy
+ * the holder directory. Hold on to it.
+ */
+ kobject_get(bdev->bd_part->holder_dir);
list_add(&holder->list, &bdev->bd_holder_disks);
goto out_unlock;
@@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
del_symlink(bdev->bd_part->holder_dir,
&disk_to_dev(disk)->kobj);
+ kobject_put(bdev->bd_part->holder_dir);
list_del_init(&holder->list);
kfree(holder);
}
@@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
* flush_disk - invalidates all buffer-cache entries on a disk
*
* @bdev: struct block device to be flushed
+ * @kill_dirty: flag to guide handling of dirty inodes
*
* Invalidates all buffer-cache entries on a disk. It should be called
* when a disk has been changed -- either by a media change or online
* resize.
*/
-static void flush_disk(struct block_device *bdev)
+static void flush_disk(struct block_device *bdev, bool kill_dirty)
{
- if (__invalidate_device(bdev)) {
+ if (__invalidate_device(bdev, kill_dirty)) {
char name[BDEVNAME_SIZE] = "";
if (bdev->bd_disk)
@@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
"%s: detected capacity change from %lld to %lld\n",
name, bdev_size, disk_size);
i_size_write(bdev->bd_inode, disk_size);
- flush_disk(bdev);
+ flush_disk(bdev, false);
}
}
EXPORT_SYMBOL(check_disk_size_change);
@@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev)
if (!(events & DISK_EVENT_MEDIA_CHANGE))
return 0;
- flush_disk(bdev);
+ flush_disk(bdev, true);
if (bdops->revalidate_disk)
bdops->revalidate_disk(bdev->bd_disk);
return 1;
@@ -1600,7 +1607,7 @@ fail:
}
EXPORT_SYMBOL(lookup_bdev);
-int __invalidate_device(struct block_device *bdev)
+int __invalidate_device(struct block_device *bdev, bool kill_dirty)
{
struct super_block *sb = get_super(bdev);
int res = 0;
@@ -1613,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev)
* hold).
*/
shrink_dcache_sb(sb);
- res = invalidate_inodes(sb);
+ res = invalidate_inodes(sb, kill_dirty);
drop_super(sb);
}
invalidate_bdev(bdev);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2c98b3af6052..6f820fa23df4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1254,6 +1254,7 @@ struct btrfs_root {
#define BTRFS_MOUNT_SPACE_CACHE (1 << 12)
#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13)
#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
+#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2218,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root,
u64 start, u64 end);
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
u64 num_bytes);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 type);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f3c96fc01439..588ff9849873 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5376,7 +5376,7 @@ again:
num_bytes, data, 1);
goto again;
}
- if (ret == -ENOSPC) {
+ if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, data);
@@ -8065,6 +8065,13 @@ out:
return ret;
}
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 type)
+{
+ u64 alloc_flags = get_alloc_profile(root, type);
+ return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+}
+
/*
* helper to account the unused space of all the readonly block group in the
* list. takes mirrors into account.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 92ac5192c518..fd3f172e94e6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode,
*/
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end, u64 max_bytes,
- unsigned long bits)
+ unsigned long bits, int contig)
{
struct rb_node *node;
struct extent_state *state;
u64 cur_start = *start;
u64 total_bytes = 0;
+ u64 last = 0;
int found = 0;
if (search_end <= cur_start) {
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
state = rb_entry(node, struct extent_state, rb_node);
if (state->start > search_end)
break;
- if (state->end >= cur_start && (state->state & bits)) {
+ if (contig && found && state->start > last + 1)
+ break;
+ if (state->end >= cur_start && (state->state & bits) == bits) {
total_bytes += min(search_end, state->end) + 1 -
max(cur_start, state->start);
if (total_bytes >= max_bytes)
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
*start = state->start;
found = 1;
}
+ last = state->end;
+ } else if (contig && found) {
+ break;
}
node = rb_next(node);
if (!node)
@@ -2912,6 +2918,46 @@ out:
return sector;
}
+/*
+ * helper function for fiemap, which doesn't want to see any holes.
+ * This maps until we find something past 'last'
+ */
+static struct extent_map *get_extent_skip_holes(struct inode *inode,
+ u64 offset,
+ u64 last,
+ get_extent_t *get_extent)
+{
+ u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
+ struct extent_map *em;
+ u64 len;
+
+ if (offset >= last)
+ return NULL;
+
+ while(1) {
+ len = last - offset;
+ if (len == 0)
+ break;
+ len = (len + sectorsize - 1) & ~(sectorsize - 1);
+ em = get_extent(inode, NULL, 0, offset, len, 0);
+ if (!em || IS_ERR(em))
+ return em;
+
+ /* if this isn't a hole return it */
+ if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
+ em->block_start != EXTENT_MAP_HOLE) {
+ return em;
+ }
+
+ /* this is a hole, advance to the next extent */
+ offset = extent_map_end(em);
+ free_extent_map(em);
+ if (offset >= last)
+ break;
+ }
+ return NULL;
+}
+
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent)
{
@@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u32 flags = 0;
u32 found_type;
u64 last;
+ u64 last_for_get_extent = 0;
u64 disko = 0;
+ u64 isize = i_size_read(inode);
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
struct btrfs_file_extent_item *item;
int end = 0;
- u64 em_start = 0, em_len = 0;
+ u64 em_start = 0;
+ u64 em_len = 0;
+ u64 em_end = 0;
unsigned long emflags;
- int hole = 0;
if (len == 0)
return -EINVAL;
@@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -ENOMEM;
path->leave_spinning = 1;
+ /*
+ * lookup the last file extent. We're not using i_size here
+ * because there might be preallocation past i_size
+ */
ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
path, inode->i_ino, -1, 0);
if (ret < 0) {
@@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
- /* No extents, just return */
+ /* No extents, but there might be delalloc bits */
if (found_key.objectid != inode->i_ino ||
found_type != BTRFS_EXTENT_DATA_KEY) {
- btrfs_free_path(path);
- return 0;
+ /* have to trust i_size as the end */
+ last = (u64)-1;
+ last_for_get_extent = isize;
+ } else {
+ /*
+ * remember the start of the last extent. There are a
+ * bunch of different factors that go into the length of the
+ * extent, so its much less complex to remember where it started
+ */
+ last = found_key.offset;
+ last_for_get_extent = last + 1;
}
- last = found_key.offset;
btrfs_free_path(path);
+ /*
+ * we might have some extents allocated but more delalloc past those
+ * extents. so, we trust isize unless the start of the last extent is
+ * beyond isize
+ */
+ if (last < isize) {
+ last = (u64)-1;
+ last_for_get_extent = isize;
+ }
+
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
&cached_state, GFP_NOFS);
- em = get_extent(inode, NULL, 0, off, max - off, 0);
+
+ em = get_extent_skip_holes(inode, off, last_for_get_extent,
+ get_extent);
if (!em)
goto out;
if (IS_ERR(em)) {
@@ -2973,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
while (!end) {
- hole = 0;
- off = em->start + em->len;
+ off = extent_map_end(em);
if (off >= max)
end = 1;
- if (em->block_start == EXTENT_MAP_HOLE) {
- hole = 1;
- goto next;
- }
-
em_start = em->start;
em_len = em->len;
-
+ em_end = extent_map_end(em);
+ emflags = em->flags;
disko = 0;
flags = 0;
@@ -3004,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
flags |= FIEMAP_EXTENT_ENCODED;
-next:
- emflags = em->flags;
free_extent_map(em);
em = NULL;
- if (!end) {
- em = get_extent(inode, NULL, 0, off, max - off, 0);
- if (!em)
- goto out;
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out;
- }
- emflags = em->flags;
- }
-
- if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
+ if ((em_start >= last) || em_len == (u64)-1 ||
+ (last == (u64)-1 && isize <= em_end)) {
flags |= FIEMAP_EXTENT_LAST;
end = 1;
}
- if (em_start == last) {
+ /* now scan forward to see if this is really the last extent. */
+ em = get_extent_skip_holes(inode, off, last_for_get_extent,
+ get_extent);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+ if (!em) {
flags |= FIEMAP_EXTENT_LAST;
end = 1;
}
-
- if (!hole) {
- ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
- em_len, flags);
- if (ret)
- goto out_free;
- }
+ ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+ em_len, flags);
+ if (ret)
+ goto out_free;
}
out_free:
free_extent_map(em);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 7083cfafd061..9318dfefd59c 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -191,7 +191,7 @@ void extent_io_exit(void);
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
- u64 max_bytes, unsigned long bits);
+ u64 max_bytes, unsigned long bits, int contig);
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fb9bd7832b6d..0efdb65953c5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start)
private = 0;
if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
- (u64)-1, 1, EXTENT_DIRTY)) {
+ (u64)-1, 1, EXTENT_DIRTY, 0)) {
ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
start, &private_failure);
if (ret == 0) {
@@ -5280,6 +5280,128 @@ out:
return em;
}
+struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
+ size_t pg_offset, u64 start, u64 len,
+ int create)
+{
+ struct extent_map *em;
+ struct extent_map *hole_em = NULL;
+ u64 range_start = start;
+ u64 end;
+ u64 found;
+ u64 found_end;
+ int err = 0;
+
+ em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
+ if (IS_ERR(em))
+ return em;
+ if (em) {
+ /*
+ * if our em maps to a hole, there might
+ * actually be delalloc bytes behind it
+ */
+ if (em->block_start != EXTENT_MAP_HOLE)
+ return em;
+ else
+ hole_em = em;
+ }
+
+ /* check to see if we've wrapped (len == -1 or similar) */
+ end = start + len;
+ if (end < start)
+ end = (u64)-1;
+ else
+ end -= 1;
+
+ em = NULL;
+
+ /* ok, we didn't find anything, lets look for delalloc */
+ found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
+ end, len, EXTENT_DELALLOC, 1);
+ found_end = range_start + found;
+ if (found_end < range_start)
+ found_end = (u64)-1;
+
+ /*
+ * we didn't find anything useful, return
+ * the original results from get_extent()
+ */
+ if (range_start > end || found_end <= start) {
+ em = hole_em;
+ hole_em = NULL;
+ goto out;
+ }
+
+ /* adjust the range_start to make sure it doesn't
+ * go backwards from the start they passed in
+ */
+ range_start = max(start,range_start);
+ found = found_end - range_start;
+
+ if (found > 0) {
+ u64 hole_start = start;
+ u64 hole_len = len;
+
+ em = alloc_extent_map(GFP_NOFS);
+ if (!em) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /*
+ * when btrfs_get_extent can't find anything it
+ * returns one huge hole
+ *
+ * make sure what it found really fits our range, and
+ * adjust to make sure it is based on the start from
+ * the caller
+ */
+ if (hole_em) {
+ u64 calc_end = extent_map_end(hole_em);
+
+ if (calc_end <= start || (hole_em->start > end)) {
+ free_extent_map(hole_em);
+ hole_em = NULL;
+ } else {
+ hole_start = max(hole_em->start, start);
+ hole_len = calc_end - hole_start;
+ }
+ }
+ em->bdev = NULL;
+ if (hole_em && range_start > hole_start) {
+ /* our hole starts before our delalloc, so we
+ * have to return just the parts of the hole
+ * that go until the delalloc starts
+ */
+ em->len = min(hole_len,
+ range_start - hole_start);
+ em->start = hole_start;
+ em->orig_start = hole_start;
+ /*
+ * don't adjust block start at all,
+ * it is fixed at EXTENT_MAP_HOLE
+ */
+ em->block_start = hole_em->block_start;
+ em->block_len = hole_len;
+ } else {
+ em->start = range_start;
+ em->len = found;
+ em->orig_start = range_start;
+ em->block_start = EXTENT_MAP_DELALLOC;
+ em->block_len = found;
+ }
+ } else if (hole_em) {
+ return hole_em;
+ }
+out:
+
+ free_extent_map(hole_em);
+ if (err) {
+ free_extent_map(em);
+ return ERR_PTR(err);
+ }
+ return em;
+}
+
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
u64 start, u64 len)
{
@@ -6102,7 +6224,7 @@ out:
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
- return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
+ return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
}
int btrfs_readpage(struct file *file, struct page *page)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index be2d4f6aaa5e..5fdb2abc4fa7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
- if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC)
+ if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
return -EINVAL;
if (flags & ~BTRFS_SUBVOL_RDONLY)
return -EOPNOTSUPP;
+ if (!is_owner_or_cap(inode))
+ return -EACCES;
+
down_write(&root->fs_info->subvol_sem);
/* nothing to do */
@@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
goto out_reset;
}
- ret = btrfs_update_root(trans, root,
+ ret = btrfs_update_root(trans, root->fs_info->tree_root,
&root->root_key, &root->root_item);
btrfs_commit_transaction(trans, root);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index cc9b450399df..a178f5ebea78 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
unsigned long tot_out;
unsigned long tot_len;
char *buf;
+ bool may_late_unmap, need_unmap;
data_in = kmap(pages_in[0]);
tot_len = read_compress_length(data_in);
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws,
tot_in += in_len;
working_bytes = in_len;
+ may_late_unmap = need_unmap = false;
/* fast path: avoid using the working buffer */
if (in_page_bytes_left >= in_len) {
buf = data_in + in_offset;
bytes = in_len;
+ may_late_unmap = true;
goto cont;
}
@@ -329,14 +332,17 @@ cont:
if (working_bytes == 0 && tot_in >= tot_len)
break;
- kunmap(pages_in[page_in_index]);
- page_in_index++;
- if (page_in_index >= total_pages_in) {
+ if (page_in_index + 1 >= total_pages_in) {
ret = -1;
- data_in = NULL;
goto done;
}
- data_in = kmap(pages_in[page_in_index]);
+
+ if (may_late_unmap)
+ need_unmap = true;
+ else
+ kunmap(pages_in[page_in_index]);
+
+ data_in = kmap(pages_in[++page_in_index]);
in_page_bytes_left = PAGE_CACHE_SIZE;
in_offset = 0;
@@ -346,6 +352,8 @@ cont:
out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
&out_len);
+ if (need_unmap)
+ kunmap(pages_in[page_in_index - 1]);
if (ret != LZO_E_OK) {
printk(KERN_WARNING "btrfs decompress failed\n");
ret = -1;
@@ -363,8 +371,7 @@ cont:
break;
}
done:
- if (data_in)
- kunmap(pages_in[page_in_index]);
+ kunmap(pages_in[page_in_index]);
return ret;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0825e4ed9447..31ade5802ae8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
u32 item_size;
int ret;
int err = 0;
+ int progress = 0;
path = btrfs_alloc_path();
if (!path)
@@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
while (1) {
+ progress++;
trans = btrfs_start_transaction(rc->extent_root, 0);
BUG_ON(IS_ERR(trans));
-
+restart:
if (update_backref_cache(trans, &rc->backref_cache)) {
btrfs_end_transaction(trans, rc->extent_root);
continue;
@@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
}
}
+ if (trans && progress && err == -ENOSPC) {
+ ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+ rc->block_group->flags);
+ if (ret == 0) {
+ err = 0;
+ progress = 0;
+ goto restart;
+ }
+ }
btrfs_release_path(rc->extent_root, path);
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a004008f7d28..d39a9895d932 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -155,7 +155,8 @@ enum {
Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
- Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err,
+ Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
+ Opt_enospc_debug, Opt_err,
};
static match_table_t tokens = {
@@ -184,6 +185,7 @@ static match_table_t tokens = {
{Opt_space_cache, "space_cache"},
{Opt_clear_cache, "clear_cache"},
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
+ {Opt_enospc_debug, "enospc_debug"},
{Opt_err, NULL},
};
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_user_subvol_rm_allowed:
btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
break;
+ case Opt_enospc_debug:
+ btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
+ break;
case Opt_err:
printk(KERN_INFO "btrfs: unrecognized mount option "
"'%s'\n", p);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index af7dbca15276..dd13eb81ee40 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
ret = btrfs_shrink_device(device, 0);
if (ret)
- goto error_brelse;
+ goto error_undo;
ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
if (ret)
- goto error_brelse;
+ goto error_undo;
device->in_fs_metadata = 0;
@@ -1416,6 +1416,13 @@ out:
mutex_unlock(&root->fs_info->volume_mutex);
mutex_unlock(&uuid_mutex);
return ret;
+error_undo:
+ if (device->writeable) {
+ list_add(&device->dev_alloc_list,
+ &root->fs_info->fs_devices->alloc_list);
+ root->fs_info->fs_devices->rw_devices++;
+ }
+ goto error_brelse;
}
/*
@@ -1633,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
- device->mode = 0;
+ device->mode = FMODE_EXCL;
set_blocksize(device->bdev, 4096);
if (seeding_dev) {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 267d0ada4541..4a09af9e9a63 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -63,6 +63,13 @@
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
* close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * It is also acquired when inserting an epoll fd onto another epoll
+ * fd. We do this so that we walk the epoll tree and ensure that this
+ * insertion does not create a cycle of epoll file descriptors, which
+ * could lead to deadlock. We need a global mutex to prevent two
+ * simultaneous inserts (A into B and B into A) from racing and
+ * constructing a cycle without either insert observing that it is
+ * going to.
* It is possible to drop the "ep->mtx" and to use the global
* mutex "epmutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
@@ -224,6 +231,9 @@ static long max_user_watches __read_mostly;
*/
static DEFINE_MUTEX(epmutex);
+/* Used to check for epoll file descriptor inclusion loops */
+static struct nested_calls poll_loop_ncalls;
+
/* Used for safe wake up implementation */
static struct nested_calls poll_safewake_ncalls;
@@ -1198,6 +1208,62 @@ retry:
return res;
}
+/**
+ * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
+ * API, to verify that adding an epoll file inside another
+ * epoll structure, does not violate the constraints, in
+ * terms of closed loops, or too deep chains (which can
+ * result in excessive stack usage).
+ *
+ * @priv: Pointer to the epoll file to be currently checked.
+ * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
+ * data structure pointer.
+ * @call_nests: Current dept of the @ep_call_nested() call stack.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ * structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+{
+ int error = 0;
+ struct file *file = priv;
+ struct eventpoll *ep = file->private_data;
+ struct rb_node *rbp;
+ struct epitem *epi;
+
+ mutex_lock(&ep->mtx);
+ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+ epi = rb_entry(rbp, struct epitem, rbn);
+ if (unlikely(is_file_epoll(epi->ffd.file))) {
+ error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, epi->ffd.file,
+ epi->ffd.file->private_data, current);
+ if (error != 0)
+ break;
+ }
+ }
+ mutex_unlock(&ep->mtx);
+
+ return error;
+}
+
+/**
+ * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
+ * another epoll file (represented by @ep) does not create
+ * closed loops or too deep chains.
+ *
+ * @ep: Pointer to the epoll private data structure.
+ * @file: Pointer to the epoll file to be checked.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ * structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check(struct eventpoll *ep, struct file *file)
+{
+ return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, file, ep, current);
+}
+
/*
* Open an eventpoll file descriptor.
*/
@@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
int error;
+ int did_lock_epmutex = 0;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
@@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
*/
ep = file->private_data;
+ /*
+ * When we insert an epoll file descriptor, inside another epoll file
+ * descriptor, there is the change of creating closed loops, which are
+ * better be handled here, than in more critical paths.
+ *
+ * We hold epmutex across the loop check and the insert in this case, in
+ * order to prevent two separate inserts from racing and each doing the
+ * insert "at the same time" such that ep_loop_check passes on both
+ * before either one does the insert, thereby creating a cycle.
+ */
+ if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
+ mutex_lock(&epmutex);
+ did_lock_epmutex = 1;
+ error = -ELOOP;
+ if (ep_loop_check(ep, tfile) != 0)
+ goto error_tgt_fput;
+ }
+
+
mutex_lock(&ep->mtx);
/*
@@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
mutex_unlock(&ep->mtx);
error_tgt_fput:
+ if (unlikely(did_lock_epmutex))
+ mutex_unlock(&epmutex);
+
fput(tfile);
error_fput:
fput(file);
@@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void)
EP_ITEM_COST;
BUG_ON(max_user_watches < 0);
+ /*
+ * Initialize the structure used to perform epoll file descriptor
+ * inclusion loops checks.
+ */
+ ep_nested_calls_init(&poll_loop_ncalls);
+
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index bfed8447ed80..83543b5ff941 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
if (err)
return err;
- if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc)
- return 0;
+ if (attr->ia_valid & ATTR_OPEN) {
+ if (fc->atomic_o_trunc)
+ return 0;
+ file = NULL;
+ }
if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 95da1bc1c826..9e0832dbb1e3 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
return ff;
}
+static void fuse_release_async(struct work_struct *work)
+{
+ struct fuse_req *req;
+ struct fuse_conn *fc;
+ struct path path;
+
+ req = container_of(work, struct fuse_req, misc.release.work);
+ path = req->misc.release.path;
+ fc = get_fuse_conn(path.dentry->d_inode);
+
+ fuse_put_request(fc, req);
+ path_put(&path);
+}
+
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
- path_put(&req->misc.release.path);
+ if (fc->destroy_req) {
+ /*
+ * If this is a fuseblk mount, then it's possible that
+ * releasing the path will result in releasing the
+ * super block and sending the DESTROY request. If
+ * the server is single threaded, this would hang.
+ * For this reason do the path_put() in a separate
+ * thread.
+ */
+ atomic_inc(&req->count);
+ INIT_WORK(&req->misc.release.work, fuse_release_async);
+ schedule_work(&req->misc.release.work);
+ } else {
+ path_put(&req->misc.release.path);
+ }
}
-static void fuse_file_put(struct fuse_file *ff)
+static void fuse_file_put(struct fuse_file *ff, bool sync)
{
if (atomic_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
- req->end = fuse_release_end;
- fuse_request_send_background(ff->fc, req);
+ if (sync) {
+ fuse_request_send(ff->fc, req);
+ path_put(&req->misc.release.path);
+ fuse_put_request(ff->fc, req);
+ } else {
+ req->end = fuse_release_end;
+ fuse_request_send_background(ff->fc, req);
+ }
kfree(ff);
}
}
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode)
* Normally this will send the RELEASE request, however if
* some asynchronous READ or WRITE requests are outstanding,
* the sending will be delayed.
+ *
+ * Make the release synchronous if this is a fuseblk mount,
+ * synchronous RELEASE is allowed (and desirable) in this case
+ * because the server can be trusted not to screw up.
*/
- fuse_file_put(ff);
+ fuse_file_put(ff, ff->fc->destroy_req != NULL);
}
static int fuse_open(struct inode *inode, struct file *file)
@@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
page_cache_release(page);
}
if (req->ff)
- fuse_file_put(req->ff);
+ fuse_file_put(req->ff, false);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
__free_page(req->pages[0]);
- fuse_file_put(req->ff);
+ fuse_file_put(req->ff, false);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ae5744a2f9e9..d4286947bc2c 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,7 @@
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/poll.h>
+#include <linux/workqueue.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@@ -262,7 +263,10 @@ struct fuse_req {
/** Data for asynchronous requests */
union {
struct {
- struct fuse_release_in in;
+ union {
+ struct fuse_release_in in;
+ struct work_struct work;
+ };
struct path path;
} release;
struct fuse_init_in init_in;
diff --git a/fs/inode.c b/fs/inode.c
index 9c2b795ccc93..0647d80accf6 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -548,11 +548,14 @@ void evict_inodes(struct super_block *sb)
/**
* invalidate_inodes - attempt to free all inodes on a superblock
* @sb: superblock to operate on
+ * @kill_dirty: flag to guide handling of dirty inodes
*
* Attempts to free all inodes for a given superblock. If there were any
* busy inodes return a non-zero value, else zero.
+ * If @kill_dirty is set, discard dirty inodes too, otherwise treat
+ * them as busy.
*/
-int invalidate_inodes(struct super_block *sb)
+int invalidate_inodes(struct super_block *sb, bool kill_dirty)
{
int busy = 0;
struct inode *inode, *next;
@@ -564,6 +567,10 @@ int invalidate_inodes(struct super_block *sb)
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
continue;
+ if (inode->i_state & I_DIRTY && !kill_dirty) {
+ busy = 1;
+ continue;
+ }
if (atomic_read(&inode->i_count)) {
busy = 1;
continue;
diff --git a/fs/internal.h b/fs/internal.h
index 0663568b1247..9b976b57d7fe 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *);
*/
extern int get_nr_dirty_inodes(void);
extern void evict_inodes(struct super_block *);
-extern int invalidate_inodes(struct super_block *);
+extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b0b95371696..d1edf26025dc 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
*/
br_write_lock(vfsmount_lock);
if (mnt_get_count(mnt) != 2) {
- br_write_lock(vfsmount_lock);
+ br_write_unlock(vfsmount_lock);
return -EBUSY;
}
br_write_unlock(vfsmount_lock);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 43e56b97f9c0..6180da1e37e6 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb)
ocfs2_quota_trans_credits(sb);
}
-/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe +
- * bitmap block for the new bit) dx_root update for free list */
-#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1)
+/* data block for new dir/symlink, allocation of directory block, dx_root
+ * update for free list */
+#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
{
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b5f9160e93e9..19ebc5aad391 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
u32 num_clusters, unsigned int e_flags)
{
int ret, delete, index, credits = 0;
- u32 new_bit, new_len;
+ u32 new_bit, new_len, orig_num_clusters;
unsigned int set_len;
struct ocfs2_super *osb = OCFS2_SB(sb);
handle_t *handle;
@@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
goto out;
}
+ orig_num_clusters = num_clusters;
+
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
p_cluster, num_clusters,
@@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
* in write-back mode.
*/
if (context->get_clusters == ocfs2_di_get_clusters) {
- ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
+ ret = ocfs2_cow_sync_writeback(sb, context, cpos,
+ orig_num_clusters);
if (ret)
mlog_errno(ret);
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 38f986d2447e..36c423fb0635 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb,
struct mount_options *mopt,
int is_remount)
{
- int status;
+ int status, user_stack = 0;
char *p;
u32 tmp;
@@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb,
memcpy(mopt->cluster_stack, args[0].from,
OCFS2_STACK_LABEL_LEN);
mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+ /*
+ * Open code the memcmp here as we don't have
+ * an osb to pass to
+ * ocfs2_userspace_stack().
+ */
+ if (memcmp(mopt->cluster_stack,
+ OCFS2_CLASSIC_CLUSTER_STACK,
+ OCFS2_STACK_LABEL_LEN))
+ user_stack = 1;
break;
case Opt_inode64:
mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb,
}
}
- /* Ensure only one heartbeat mode */
- tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
- OCFS2_MOUNT_HB_NONE);
- if (hweight32(tmp) != 1) {
- mlog(ML_ERROR, "Invalid heartbeat mount options\n");
- status = 0;
- goto bail;
+ if (user_stack == 0) {
+ /* Ensure only one heartbeat mode */
+ tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
+ OCFS2_MOUNT_HB_GLOBAL |
+ OCFS2_MOUNT_HB_NONE);
+ if (hweight32(tmp) != 1) {
+ mlog(ML_ERROR, "Invalid heartbeat mount options\n");
+ status = 0;
+ goto bail;
+ }
}
status = 1;
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 789c625c7aa5..b10e3540d5b7 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
}
vm->vblk_size = get_unaligned_be32(data + 0x08);
+ if (vm->vblk_size == 0) {
+ ldm_error ("Illegal VBLK size");
+ return false;
+ }
+
vm->vblk_offset = get_unaligned_be32(data + 0x0C);
vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index ef1cef77d32b..d7bd661bfae7 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -183,13 +183,19 @@
#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
/*
- * Module name is included in both debug and non-debug versions primarily for
- * error messages. The __FILE__ macro is not very useful for this, because it
- * often includes the entire pathname to the module
+ * The module name is used primarily for error and debug messages.
+ * The __FILE__ macro is not very useful for this, because it
+ * usually includes the entire pathname to the module making the
+ * debug output difficult to read.
*/
#define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name;
#else
+/*
+ * For the no-debug and no-error-msg cases, we must at least define
+ * a null module name.
+ */
#define ACPI_MODULE_NAME(name)
+#define _acpi_module_name ""
#endif
/*
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e46ec95a8ada..ec908540d346 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20110112
+#define ACPI_CA_VERSION 0x20110211
#include "actypes.h"
#include "actbl.h"
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 7e42bfee0e29..d41c94885211 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -343,4 +343,20 @@ struct acpi_table_desc {
#include <acpi/actbl1.h>
#include <acpi/actbl2.h>
+/*
+ * Sizes of the various flavors of FADT. We need to look closely
+ * at the FADT length because the version number essentially tells
+ * us nothing because of many BIOS bugs where the version does not
+ * match the expected length. In other words, the length of the
+ * FADT is the bottom line as to what the version really is.
+ *
+ * For reference, the values below are as follows:
+ * FADT V1 size: 0x74
+ * FADT V2 size: 0x84
+ * FADT V3+ size: 0xF4
+ */
+#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
+#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (reserved4[0]) + 3)
+#define ACPI_FADT_V3_SIZE (u32) (sizeof (struct acpi_table_fadt))
+
#endif /* __ACTBL_H__ */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 31b6188df221..b4bfe338ea0e 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,6 +4,8 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
+#include <linux/mm_types.h>
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fe29aadb129d..348843b80150 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1101,7 +1101,7 @@ struct drm_device {
struct platform_device *platformdev; /**< Platform device struture */
struct drm_sg_mem *sg; /**< Scatter gather memory */
- int num_crtcs; /**< Number of CRTCs on this device */
+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */
void *mm_private;
struct address_space *dev_mapping;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 97d08d8a7de8..e38b50a4b9d2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2140,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk,
struct block_device *bdev);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *);
+extern int __invalidate_device(struct block_device *, bool);
extern int invalidate_partition(struct gendisk *, int);
#endif
unsigned long invalidate_mapping_pages(struct address_space *mapping,
diff --git a/include/linux/pm.h b/include/linux/pm.h
index dd9c7ab38270..21415cc91cbb 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -431,6 +431,8 @@ struct dev_pm_info {
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
+#else
+ unsigned int should_wakeup:1;
#endif
#ifdef CONFIG_PM_RUNTIME
struct timer_list suspend_timer;
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 9cff00dd6b63..03a67db03d01 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev)
return dev->power.can_wakeup;
}
-static inline bool device_may_wakeup(struct device *dev)
-{
- return false;
-}
-
static inline struct wakeup_source *wakeup_source_create(const char *name)
{
return NULL;
@@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
static inline int device_wakeup_enable(struct device *dev)
{
- return -EINVAL;
+ dev->power.should_wakeup = true;
+ return 0;
}
static inline int device_wakeup_disable(struct device *dev)
{
+ dev->power.should_wakeup = false;
return 0;
}
-static inline int device_init_wakeup(struct device *dev, bool val)
+static inline int device_set_wakeup_enable(struct device *dev, bool enable)
{
- dev->power.can_wakeup = val;
- return val ? -EINVAL : 0;
+ dev->power.should_wakeup = enable;
+ return 0;
}
+static inline int device_init_wakeup(struct device *dev, bool val)
+{
+ device_set_wakeup_capable(dev, val);
+ device_set_wakeup_enable(dev, val);
+ return 0;
+}
-static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+static inline bool device_may_wakeup(struct device *dev)
{
- return -EINVAL;
+ return dev->power.can_wakeup && dev->power.should_wakeup;
}
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index d63dcbaea169..9026b30238f3 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -14,10 +14,12 @@
#define LINUX_RIO_REGS_H
/*
- * In RapidIO, each device has a 2MB configuration space that is
+ * In RapidIO, each device has a 16MB configuration space that is
* accessed via maintenance transactions. Portions of configuration
* space are standardized and/or reserved.
*/
+#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */
+
#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */
#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */
#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8651556dbd52..d3ec89fb4122 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *);
struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
const struct thermal_cooling_device_ops *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+
+#ifdef CONFIG_NET
extern int generate_netlink_event(u32 orig, enum events event);
+#else
+static inline int generate_netlink_event(u32 orig, enum events event)
+{
+ return 0;
+}
+#endif
#endif /* __THERMAL_H__ */
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 48b2761b5668..a3b5aff62606 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void)
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
+/*
+ * Check whether the broadcast device supports oneshot.
+ */
+bool tick_broadcast_oneshot_available(void)
+{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+ return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
+}
+
#endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 051bc80a0c43..ed228ef6f6b8 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void)
{
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
- return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
+ if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+ return 0;
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+ return 1;
+ return tick_broadcast_oneshot_available();
}
/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 290eefbc1f60..f65d3a723a64 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void);
extern void tick_check_oneshot_broadcast(int cpu);
+bool tick_broadcast_oneshot_available(void);
# else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
@@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline bool tick_broadcast_oneshot_available(void) { return true; }
# endif /* !BROADCAST */
#else /* !ONESHOT */
@@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
return 0;
}
static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline bool tick_broadcast_oneshot_available(void) { return false; }
#endif /* !TICK_ONESHOT */
/*
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c47bbe11b804..93ca08b8a451 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (!dma_capable(dev, dev_addr, size))
- panic("map_single: bounce buffer is not DMA'ble");
+ if (!dma_capable(dev, dev_addr, size)) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir);
+ dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+ }
return dev_addr;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 368fc9d23610..49355a970be2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1830,7 +1830,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
unsigned nid;
- nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
+ nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
put_mems_allowed();
diff --git a/mm/migrate.c b/mm/migrate.c
index 766115253807..352de555626c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
return -EPERM;
/* Find the mm_struct */
- read_lock(&tasklist_lock);
+ rcu_read_lock();
task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return -ESRCH;
}
mm = get_task_mm(task);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
if (!mm)
return -EINVAL;
diff --git a/mm/mremap.c b/mm/mremap.c
index 9925b6391b80..1de98d492ddc 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
*/
mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
- if (new_vma->vm_truncate_count &&
- new_vma->vm_truncate_count != vma->vm_truncate_count)
- new_vma->vm_truncate_count = 0;
+ new_vma->vm_truncate_count = 0;
}
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a873e61e312e..cdef1d4b4e47 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5376,10 +5376,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
unsigned long check = pfn + iter;
- if (!pfn_valid_within(check)) {
- iter++;
+ if (!pfn_valid_within(check))
continue;
- }
+
page = pfn_to_page(check);
if (!page_count(page)) {
if (PageBuddy(page))
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 07a458d72fa8..0341c5700e34 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -EINVAL;
if (S_ISBLK(inode->i_mode)) {
- bdev = I_BDEV(inode);
+ bdev = bdgrab(I_BDEV(inode));
error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
sys_swapon);
if (error < 0) {
diff --git a/mm/truncate.c b/mm/truncate.c
index 49feb46e77b8..d64296be00d3 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
next = start;
while (next <= end &&
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index = page->index;
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
unlock_page(page);
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
cond_resched();
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 17497d0cd8b9..6771ea70bfe7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
return false;
- /*
- * If we failed to reclaim and have scanned the full list, stop.
- * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far
- * faster but obviously would be less likely to succeed
- * allocation. If this is desirable, use GFP_REPEAT to decide
- * if both reclaimed and scanned should be checked or just
- * reclaimed
- */
- if (!nr_reclaimed && !nr_scanned)
- return false;
+ /* Consider stopping depending on scan and reclaim activity */
+ if (sc->gfp_mask & __GFP_REPEAT) {
+ /*
+ * For __GFP_REPEAT allocations, stop reclaiming if the
+ * full LRU list has been scanned and we are still failing
+ * to reclaim pages. This full LRU scan is potentially
+ * expensive but a __GFP_REPEAT caller really wants to succeed
+ */
+ if (!nr_reclaimed && !nr_scanned)
+ return false;
+ } else {
+ /*
+ * For non-__GFP_REPEAT allocations which can presumably
+ * fail without consequence, stop if we failed to reclaim
+ * any pages from the last SWAP_CLUSTER_MAX number of
+ * pages that were scanned. This will return to the
+ * caller faster at the risk reclaim/compaction and
+ * the resulting allocation attempt fails
+ */
+ if (!nr_reclaimed)
+ return false;
+ }
/*
* If we have not reclaimed enough pages for compaction and the
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 5ee16f0353fe..d763793d39de 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
return ret;
plen -= sizeof(*token);
- token = kmalloc(sizeof(*token), GFP_KERNEL);
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
if (!token)
return -ENOMEM;
- token->kad = kmalloc(plen, GFP_KERNEL);
+ token->kad = kzalloc(plen, GFP_KERNEL);
if (!token->kad) {
kfree(token);
return -ENOMEM;
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
goto error;
ret = -ENOMEM;
- token = kmalloc(sizeof(*token), GFP_KERNEL);
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
if (!token)
goto error;
- token->kad = kmalloc(plen, GFP_KERNEL);
+ token->kad = kzalloc(plen, GFP_KERNEL);
if (!token->kad)
goto error_free;
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 4902ae568730..53b53e97c896 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
fail_input:
input_free_device(jack->input_dev);
+ kfree(jack->id);
kfree(jack);
return err;
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index dd7c5c12225d..4d5004e693f0 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3937,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
.patch = patch_cxt5066 },
{ .id = 0x14f15069, .name = "CX20585",
.patch = patch_cxt5066 },
+ { .id = 0x14f1506e, .name = "CX20590",
+ .patch = patch_cxt5066 },
{ .id = 0x14f15097, .name = "CX20631",
.patch = patch_conexant_auto },
{ .id = 0x14f15098, .name = "CX20632",
@@ -3963,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
MODULE_ALIAS("snd-hda-codec-id:14f15067");
MODULE_ALIAS("snd-hda-codec-id:14f15068");
MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506e");
MODULE_ALIAS("snd-hda-codec-id:14f15097");
MODULE_ALIAS("snd-hda-codec-id:14f15098");
MODULE_ALIAS("snd-hda-codec-id:14f150a1");
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 9ea48b425d0b..bd7b123f6440 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = {
0x0f, 0x10, 0x11, 0x1f, 0x20,
};
-static hda_nid_t stac92hd88xxx_pin_nids[10] = {
+static hda_nid_t stac92hd87xxx_pin_nids[6] = {
+ 0x0a, 0x0b, 0x0c, 0x0d,
+ 0x0f, 0x11,
+};
+
+static hda_nid_t stac92hd88xxx_pin_nids[8] = {
0x0a, 0x0b, 0x0c, 0x0d,
0x0f, 0x11, 0x1f, 0x20,
};
@@ -5430,12 +5435,13 @@ again:
switch (codec->vendor_id) {
case 0x111d76d1:
case 0x111d76d9:
+ case 0x111d76e5:
spec->dmic_nids = stac92hd87b_dmic_nids;
spec->num_dmics = stac92xx_connected_ports(codec,
stac92hd87b_dmic_nids,
STAC92HD87B_NUM_DMICS);
- spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
- spec->pin_nids = stac92hd88xxx_pin_nids;
+ spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids);
+ spec->pin_nids = stac92hd87xxx_pin_nids;
spec->mono_nid = 0;
spec->num_pwrs = 0;
break;
@@ -5443,6 +5449,7 @@ again:
case 0x111d7667:
case 0x111d7668:
case 0x111d7669:
+ case 0x111d76e3:
spec->num_dmics = stac92xx_connected_ports(codec,
stac92hd88xxx_dmic_nids,
STAC92HD88XXX_NUM_DMICS);
@@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
{ .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
{ .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
{ .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
+ { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
+ { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
{ .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
{} /* terminator */
};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index a76c3260d941..63b0054200a8 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
hda_nid_t nid = cfg->inputs[i].pin;
if (spec->smart51_enabled && is_smart51_pins(spec, nid))
ctl = PIN_OUT;
- else if (i == AUTO_PIN_MIC)
+ else if (cfg->inputs[i].type == AUTO_PIN_MIC)
ctl = PIN_VREF50;
else
ctl = PIN_IN;
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index e8490f3edd03..e3ec2433b215 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
#define WM8903_VMID_RES_50K 2
#define WM8903_VMID_RES_250K 3
-#define WM8903_VMID_RES_5K 4
+#define WM8903_VMID_RES_5K 6
/*
* R8 (0x08) - Analogue DAC 0
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index a60b5dbf0154..ebaee5ca7434 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3000,11 +3000,10 @@ static void wm8958_default_micdet(u16 status, void *data)
report |= SND_JACK_BTN_5;
done:
- snd_soc_jack_report(wm8994->micdet[0].jack,
+ snd_soc_jack_report(wm8994->micdet[0].jack, report,
SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
- SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT,
- report);
+ SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT);
}
/**
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 613df5db0b32..516892706063 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -674,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"),
};
static const struct snd_soc_dapm_route analogue_routes[] = {
+ { "MICBIAS1", NULL, "CLK_SYS" },
+ { "MICBIAS2", NULL, "CLK_SYS" },
+
{ "IN1L PGA", "IN1LP Switch", "IN1LP" },
{ "IN1L PGA", "IN1LN Switch", "IN1LN" },
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index e20c9e1457c0..1e9bccae4e80 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = {
.name = "tlv320aic23",
.stream_name = "TLV320AIC23",
.codec_dai_name = "tlv320aic23-hifi",
- .platform_name = "imx-pcm-audio.0",
+ .platform_name = "imx-fiq-pcm-audio.0",
.codec_name = "tlv320aic23-codec.0-001a",
.cpu_dai_name = "imx-ssi.0",
.ops = &eukrea_tlv320_snd_ops,
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 28333e7d9c50..dc65650a6fa1 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9705-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
@@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name = "wm9705-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 01bf31675c55..51897fcd911b 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9705-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name ="wm9705-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index c6a37c6ef23b..053ed208e59f 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9712-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
@@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name ="wm9712-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index fc22e6eefc98..b13a4252812d 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9712-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
@@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name ="wm9712-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 0d70fc8c12bd..38ca6759907e 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9713-hifi",
.codec_name = "wm9713-codec",
.init = mioa701_wm9713_init,
@@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name ="wm9713-aux",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 857db96d4a4f..504e4004f004 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
{
.name = "AC97 HiFi",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9712-hifi",
.codec_name = "wm9712-codec",
.platform_name = "pxa-pcm-audio",
@@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name = "wm9712-aux",
.codec_name = "wm9712-codec",
.platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index f75804ef0897..4b6e5d608b42 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9712-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
@@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name = "wm9712-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index b222a7d72027..25bba108fea3 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
.stream_name = "AC97 HiFi",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
- .cpu_dai_name = "pxa-ac97.0",
+ .cpu_dai_name = "pxa2xx-ac97",
.codec_name = "wm9713-hifi",
.init = zylonite_wm9713_init,
},
@@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
.stream_name = "AC97 Aux",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
- .cpu_dai_name = "pxa-ac97.1",
+ .cpu_dai_name = "pxa2xx-ac97-aux",
.codec_name = "wm9713-aux",
},
{
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 800f7cb4f251..c0f8270bc199 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
return -ENOMEM;
}
+ mutex_init(&chip->shutdown_mutex);
chip->index = idx;
chip->dev = dev;
chip->card = card;
@@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
chip = ptr;
card = chip->card;
mutex_lock(&register_mutex);
+ mutex_lock(&chip->shutdown_mutex);
chip->shutdown = 1;
chip->num_interfaces--;
if (chip->num_interfaces <= 0) {
@@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
snd_usb_mixer_disconnect(p);
}
usb_chip[chip->index] = NULL;
+ mutex_unlock(&chip->shutdown_mutex);
mutex_unlock(&register_mutex);
snd_card_free_when_closed(card);
} else {
+ mutex_unlock(&chip->shutdown_mutex);
mutex_unlock(&register_mutex);
}
}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 4132522ac90f..e3f680526cb5 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
}
if (changed) {
+ mutex_lock(&subs->stream->chip->shutdown_mutex);
/* format changed */
snd_usb_release_substream_urbs(subs, 0);
/* influenced: period_bytes, channels, rate, format, */
@@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
params_rate(hw_params),
snd_pcm_format_physical_width(params_format(hw_params)) *
params_channels(hw_params));
+ mutex_unlock(&subs->stream->chip->shutdown_mutex);
}
return ret;
@@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
subs->cur_audiofmt = NULL;
subs->cur_rate = 0;
subs->period_bytes = 0;
- if (!subs->stream->chip->shutdown)
- snd_usb_release_substream_urbs(subs, 0);
+ mutex_lock(&subs->stream->chip->shutdown_mutex);
+ snd_usb_release_substream_urbs(subs, 0);
+ mutex_unlock(&subs->stream->chip->shutdown_mutex);
return snd_pcm_lib_free_vmalloc_buffer(substream);
}
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index db3eb21627ee..6e66fffe87f5 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -36,6 +36,7 @@ struct snd_usb_audio {
struct snd_card *card;
u32 usb_id;
int shutdown;
+ struct mutex shutdown_mutex;
unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
int num_interfaces;
int num_suspended_intf;
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 746cf03cb05d..0ace786e83e0 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
c->start_time = start;
if (p->start_time == 0 || p->start_time > start)
p->start_time = start;
-
- if (cpu > numcpus)
- numcpus = cpu;
}
#define MAX_CPUS 4096
@@ -511,6 +508,9 @@ static int process_sample_event(event_t *event __used,
if (!event_str)
return 0;
+ if (sample->cpu > numcpus)
+ numcpus = sample->cpu;
+
if (strcmp(event_str, "power:cpu_idle") == 0) {
struct power_processor_entry *ppe = (void *)te;
if (ppe->state == (u32)PWR_EVENT_EXIT)
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 32f4f1f2f6e4..df51560f16f7 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -585,6 +585,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
{
struct sort_entry *se;
u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
+ u64 nr_events;
const char *sep = symbol_conf.field_sep;
int ret;
@@ -593,6 +594,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
if (pair_hists) {
period = self->pair ? self->pair->period : 0;
+ nr_events = self->pair ? self->pair->nr_events : 0;
total = pair_hists->stats.total_period;
period_sys = self->pair ? self->pair->period_sys : 0;
period_us = self->pair ? self->pair->period_us : 0;
@@ -600,6 +602,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
period_guest_us = self->pair ? self->pair->period_guest_us : 0;
} else {
period = self->period;
+ nr_events = self->nr_events;
total = session_total;
period_sys = self->period_sys;
period_us = self->period_us;
@@ -640,9 +643,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
if (symbol_conf.show_nr_samples) {
if (sep)
- ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
+ ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
else
- ret += snprintf(s + ret, size - ret, "%11" PRIu64, period);
+ ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
}
if (pair_hists) {
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index fb737fe9be91..96c866045d60 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -456,9 +456,9 @@ void svg_legenda(void)
return;
svg_legenda_box(0, "Running", "sample");
- svg_legenda_box(100, "Idle","rect.c1");
- svg_legenda_box(200, "Deeper Idle", "rect.c3");
- svg_legenda_box(350, "Deepest Idle", "rect.c6");
+ svg_legenda_box(100, "Idle","c1");
+ svg_legenda_box(200, "Deeper Idle", "c3");
+ svg_legenda_box(350, "Deepest Idle", "c6");
svg_legenda_box(550, "Sleeping", "process2");
svg_legenda_box(650, "Waiting for cpu", "waiting");
svg_legenda_box(800, "Blocked on IO", "blocked");