summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '3.2.64/1059_linux-3.2.60.patch')
-rw-r--r--3.2.64/1059_linux-3.2.60.patch2964
1 files changed, 2964 insertions, 0 deletions
diff --git a/3.2.64/1059_linux-3.2.60.patch b/3.2.64/1059_linux-3.2.60.patch
new file mode 100644
index 0000000..c5a9389
--- /dev/null
+++ b/3.2.64/1059_linux-3.2.60.patch
@@ -0,0 +1,2964 @@
+diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
+index 5602eb7..e1ae127 100644
+--- a/Documentation/input/elantech.txt
++++ b/Documentation/input/elantech.txt
+@@ -504,9 +504,12 @@ byte 5:
+ * reg_10
+
+ bit 7 6 5 4 3 2 1 0
+- 0 0 0 0 0 0 0 A
++ 0 0 0 0 R F T A
+
+ A: 1 = enable absolute tracking
++ T: 1 = enable two finger mode auto correct
++ F: 1 = disable ABS Position Filter
++ R: 1 = enable real hardware resolution
+
+ 6.2 Native absolute mode 6 byte packet format
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+diff --git a/Makefile b/Makefile
+index 1be3414..317d5ea 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 59
++SUBLEVEL = 60
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
+index 1c893f0..21ecdf5 100644
+--- a/arch/powerpc/lib/crtsavres.S
++++ b/arch/powerpc/lib/crtsavres.S
+@@ -230,6 +230,87 @@ _GLOBAL(_rest32gpr_31_x)
+ mr 1,11
+ blr
+
++#ifdef CONFIG_ALTIVEC
++/* Called with r0 pointing just beyond the end of the vector save area. */
++
++_GLOBAL(_savevr_20)
++ li r11,-192
++ stvx vr20,r11,r0
++_GLOBAL(_savevr_21)
++ li r11,-176
++ stvx vr21,r11,r0
++_GLOBAL(_savevr_22)
++ li r11,-160
++ stvx vr22,r11,r0
++_GLOBAL(_savevr_23)
++ li r11,-144
++ stvx vr23,r11,r0
++_GLOBAL(_savevr_24)
++ li r11,-128
++ stvx vr24,r11,r0
++_GLOBAL(_savevr_25)
++ li r11,-112
++ stvx vr25,r11,r0
++_GLOBAL(_savevr_26)
++ li r11,-96
++ stvx vr26,r11,r0
++_GLOBAL(_savevr_27)
++ li r11,-80
++ stvx vr27,r11,r0
++_GLOBAL(_savevr_28)
++ li r11,-64
++ stvx vr28,r11,r0
++_GLOBAL(_savevr_29)
++ li r11,-48
++ stvx vr29,r11,r0
++_GLOBAL(_savevr_30)
++ li r11,-32
++ stvx vr30,r11,r0
++_GLOBAL(_savevr_31)
++ li r11,-16
++ stvx vr31,r11,r0
++ blr
++
++_GLOBAL(_restvr_20)
++ li r11,-192
++ lvx vr20,r11,r0
++_GLOBAL(_restvr_21)
++ li r11,-176
++ lvx vr21,r11,r0
++_GLOBAL(_restvr_22)
++ li r11,-160
++ lvx vr22,r11,r0
++_GLOBAL(_restvr_23)
++ li r11,-144
++ lvx vr23,r11,r0
++_GLOBAL(_restvr_24)
++ li r11,-128
++ lvx vr24,r11,r0
++_GLOBAL(_restvr_25)
++ li r11,-112
++ lvx vr25,r11,r0
++_GLOBAL(_restvr_26)
++ li r11,-96
++ lvx vr26,r11,r0
++_GLOBAL(_restvr_27)
++ li r11,-80
++ lvx vr27,r11,r0
++_GLOBAL(_restvr_28)
++ li r11,-64
++ lvx vr28,r11,r0
++_GLOBAL(_restvr_29)
++ li r11,-48
++ lvx vr29,r11,r0
++_GLOBAL(_restvr_30)
++ li r11,-32
++ lvx vr30,r11,r0
++_GLOBAL(_restvr_31)
++ li r11,-16
++ lvx vr31,r11,r0
++ blr
++
++#endif /* CONFIG_ALTIVEC */
++
+ #else /* CONFIG_PPC64 */
+
+ .globl _savegpr0_14
+@@ -353,6 +434,111 @@ _restgpr0_31:
+ mtlr r0
+ blr
+
++#ifdef CONFIG_ALTIVEC
++/* Called with r0 pointing just beyond the end of the vector save area. */
++
++.globl _savevr_20
++_savevr_20:
++ li r12,-192
++ stvx vr20,r12,r0
++.globl _savevr_21
++_savevr_21:
++ li r12,-176
++ stvx vr21,r12,r0
++.globl _savevr_22
++_savevr_22:
++ li r12,-160
++ stvx vr22,r12,r0
++.globl _savevr_23
++_savevr_23:
++ li r12,-144
++ stvx vr23,r12,r0
++.globl _savevr_24
++_savevr_24:
++ li r12,-128
++ stvx vr24,r12,r0
++.globl _savevr_25
++_savevr_25:
++ li r12,-112
++ stvx vr25,r12,r0
++.globl _savevr_26
++_savevr_26:
++ li r12,-96
++ stvx vr26,r12,r0
++.globl _savevr_27
++_savevr_27:
++ li r12,-80
++ stvx vr27,r12,r0
++.globl _savevr_28
++_savevr_28:
++ li r12,-64
++ stvx vr28,r12,r0
++.globl _savevr_29
++_savevr_29:
++ li r12,-48
++ stvx vr29,r12,r0
++.globl _savevr_30
++_savevr_30:
++ li r12,-32
++ stvx vr30,r12,r0
++.globl _savevr_31
++_savevr_31:
++ li r12,-16
++ stvx vr31,r12,r0
++ blr
++
++.globl _restvr_20
++_restvr_20:
++ li r12,-192
++ lvx vr20,r12,r0
++.globl _restvr_21
++_restvr_21:
++ li r12,-176
++ lvx vr21,r12,r0
++.globl _restvr_22
++_restvr_22:
++ li r12,-160
++ lvx vr22,r12,r0
++.globl _restvr_23
++_restvr_23:
++ li r12,-144
++ lvx vr23,r12,r0
++.globl _restvr_24
++_restvr_24:
++ li r12,-128
++ lvx vr24,r12,r0
++.globl _restvr_25
++_restvr_25:
++ li r12,-112
++ lvx vr25,r12,r0
++.globl _restvr_26
++_restvr_26:
++ li r12,-96
++ lvx vr26,r12,r0
++.globl _restvr_27
++_restvr_27:
++ li r12,-80
++ lvx vr27,r12,r0
++.globl _restvr_28
++_restvr_28:
++ li r12,-64
++ lvx vr28,r12,r0
++.globl _restvr_29
++_restvr_29:
++ li r12,-48
++ lvx vr29,r12,r0
++.globl _restvr_30
++_restvr_30:
++ li r12,-32
++ lvx vr30,r12,r0
++.globl _restvr_31
++_restvr_31:
++ li r12,-16
++ lvx vr31,r12,r0
++ blr
++
++#endif /* CONFIG_ALTIVEC */
++
+ #endif /* CONFIG_PPC64 */
+
+ #endif
+diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
+index 439a9ac..48fa391 100644
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -51,6 +51,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+ {
++ ptep_clear_flush(vma, addr, ptep);
+ }
+
+ static inline int huge_pte_none(pte_t pte)
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index 4ac4531..3e0ccbf 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -21,6 +21,8 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+
++int sysctl_ldt16 = 0;
++
+ #ifdef CONFIG_SMP
+ static void flush_ldt(void *current_mm)
+ {
+@@ -235,7 +237,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ * IRET leaking the high bits of the kernel stack address.
+ */
+ #ifdef CONFIG_X86_64
+- if (!ldt_info.seg_32bit) {
++ if (!ldt_info.seg_32bit && !sysctl_ldt16) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
+index 468d591..51bdc05 100644
+--- a/arch/x86/vdso/vdso32-setup.c
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -41,6 +41,7 @@ enum {
+ #ifdef CONFIG_X86_64
+ #define vdso_enabled sysctl_vsyscall32
+ #define arch_setup_additional_pages syscall32_setup_pages
++extern int sysctl_ldt16;
+ #endif
+
+ /*
+@@ -388,6 +389,13 @@ static ctl_table abi_table2[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
++ {
++ .procname = "ldt16",
++ .data = &sysctl_ldt16,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
+ {}
+ };
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 8176b82..3923064 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -70,6 +70,8 @@ enum ec_command {
+ #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
+ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
++#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
++ * when trying to clear the EC */
+
+ enum {
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+@@ -123,6 +125,7 @@ EXPORT_SYMBOL(first_ec);
+ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
+ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
+ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
++static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
+
+ /* --------------------------------------------------------------------------
+ Transaction Management
+@@ -203,13 +206,13 @@ unlock:
+ spin_unlock_irqrestore(&ec->curr_lock, flags);
+ }
+
+-static int acpi_ec_sync_query(struct acpi_ec *ec);
++static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
+
+ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
+ {
+ if (state & ACPI_EC_FLAG_SCI) {
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
+- return acpi_ec_sync_query(ec);
++ return acpi_ec_sync_query(ec, NULL);
+ }
+ return 0;
+ }
+@@ -449,6 +452,27 @@ int ec_transaction(u8 command,
+
+ EXPORT_SYMBOL(ec_transaction);
+
++/*
++ * Process _Q events that might have accumulated in the EC.
++ * Run with locked ec mutex.
++ */
++static void acpi_ec_clear(struct acpi_ec *ec)
++{
++ int i, status;
++ u8 value = 0;
++
++ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
++ status = acpi_ec_sync_query(ec, &value);
++ if (status || !value)
++ break;
++ }
++
++ if (unlikely(i == ACPI_EC_CLEAR_MAX))
++ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
++ else
++ pr_info("%d stale EC events cleared\n", i);
++}
++
+ void acpi_ec_block_transactions(void)
+ {
+ struct acpi_ec *ec = first_ec;
+@@ -472,6 +496,10 @@ void acpi_ec_unblock_transactions(void)
+ mutex_lock(&ec->lock);
+ /* Allow transactions to be carried out again */
+ clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
++
++ if (EC_FLAGS_CLEAR_ON_RESUME)
++ acpi_ec_clear(ec);
++
+ mutex_unlock(&ec->lock);
+ }
+
+@@ -561,13 +589,18 @@ static void acpi_ec_run(void *cxt)
+ kfree(handler);
+ }
+
+-static int acpi_ec_sync_query(struct acpi_ec *ec)
++static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
+ {
+ u8 value = 0;
+ int status;
+ struct acpi_ec_query_handler *handler, *copy;
+- if ((status = acpi_ec_query_unlocked(ec, &value)))
++
++ status = acpi_ec_query_unlocked(ec, &value);
++ if (data)
++ *data = value;
++ if (status)
+ return status;
++
+ list_for_each_entry(handler, &ec->list, node) {
+ if (value == handler->query_bit) {
+ /* have custom handler for this bit */
+@@ -590,7 +623,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
+ if (!ec)
+ return;
+ mutex_lock(&ec->lock);
+- acpi_ec_sync_query(ec);
++ acpi_ec_sync_query(ec, NULL);
+ mutex_unlock(&ec->lock);
+ }
+
+@@ -828,6 +861,13 @@ static int acpi_ec_add(struct acpi_device *device)
+
+ /* EC is fully operational, allow queries */
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
++
++ /* Clear stale _Q events if hardware might require that */
++ if (EC_FLAGS_CLEAR_ON_RESUME) {
++ mutex_lock(&ec->lock);
++ acpi_ec_clear(ec);
++ mutex_unlock(&ec->lock);
++ }
+ return ret;
+ }
+
+@@ -929,6 +969,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
+ return 0;
+ }
+
++/*
++ * On some hardware it is necessary to clear events accumulated by the EC during
++ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
++ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
++ *
++ * Ideally, the EC should also be instructed NOT to accumulate events during
++ * sleep (which Windows seems to do somehow), but the interface to control this
++ * behaviour is not known at this time.
++ *
++ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
++ * however it is very likely that other Samsung models are affected.
++ *
++ * On systems which don't accumulate _Q events during sleep, this extra check
++ * should be harmless.
++ */
++static int ec_clear_on_resume(const struct dmi_system_id *id)
++{
++ pr_debug("Detected system needing EC poll on resume.\n");
++ EC_FLAGS_CLEAR_ON_RESUME = 1;
++ return 0;
++}
++
+ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -968,6 +1032,9 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
++ {
++ ec_clear_on_resume, "Samsung hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
+ {},
+ };
+
+diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
+index f8f41e0..89b30f3 100644
+--- a/drivers/atm/ambassador.c
++++ b/drivers/atm/ambassador.c
+@@ -802,7 +802,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool,
+ }
+ // cast needed as there is no %? for pointer differences
+ PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
+- skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
++ skb, skb->head, (long) skb_end_offset(skb));
+ rx.handle = virt_to_bus (skb);
+ rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
+ if (rx_give (dev, &rx, pool))
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index b0e75ce..81845fa 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -1258,7 +1258,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
+ tail = readl(SAR_REG_RAWCT);
+
+ pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
+- skb_end_pointer(queue) - queue->head - 16,
++ skb_end_offset(queue) - 16,
+ PCI_DMA_FROMDEVICE);
+
+ while (head != tail) {
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 3539f9b..6fe003a 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -81,6 +81,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x04CA, 0x3004) },
+ { USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x04CA, 0x3006) },
++ { USB_DEVICE(0x04CA, 0x3007) },
+ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
+@@ -123,6 +124,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index f18b5a2..dddcb1d 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -152,6 +152,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
+index 7e2d54b..9b8d231 100644
+--- a/drivers/crypto/caam/error.c
++++ b/drivers/crypto/caam/error.c
+@@ -16,9 +16,13 @@
+ char *tmp; \
+ \
+ tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
+- sprintf(tmp, format, param); \
+- strcat(str, tmp); \
+- kfree(tmp); \
++ if (likely(tmp)) { \
++ sprintf(tmp, format, param); \
++ strcat(str, tmp); \
++ kfree(tmp); \
++ } else { \
++ strcat(str, "kmalloc failure in SPRINTFCAT"); \
++ } \
+ }
+
+ static void report_jump_idx(u32 status, char *outstr)
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index 9a353c2..9b01145 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -218,12 +218,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
+
+ static void mv_chan_activate(struct mv_xor_chan *chan)
+ {
+- u32 activation;
+-
+ dev_dbg(chan->device->common.dev, " activate chan.\n");
+- activation = __raw_readl(XOR_ACTIVATION(chan));
+- activation |= 0x1;
+- __raw_writel(activation, XOR_ACTIVATION(chan));
++
++ /* writel ensures all descriptors are flushed before activation */
++ writel(BIT(0), XOR_ACTIVATION(chan));
+ }
+
+ static char mv_chan_is_busy(struct mv_xor_chan *chan)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 3df56c7..5ee8cca 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -332,9 +332,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+- if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
+- return false;
+-
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index d306cc8..ccf324b 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -173,6 +173,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ }
+ }
+
++ if (!found) {
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
++ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ continue;
++
++ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++ if (!ACPI_FAILURE(status)) {
++ found = true;
++ break;
++ }
++ }
++ }
++
+ if (!found)
+ return false;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index f3ae607..3e35bbe 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -513,22 +513,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 0);
+ rdev = rbo->rdev;
+- if (bo->mem.mem_type == TTM_PL_VRAM) {
+- size = bo->mem.num_pages << PAGE_SHIFT;
+- offset = bo->mem.start << PAGE_SHIFT;
+- if ((offset + size) > rdev->mc.visible_vram_size) {
+- /* hurrah the memory is not visible ! */
+- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+- r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+- if (unlikely(r != 0))
+- return r;
+- offset = bo->mem.start << PAGE_SHIFT;
+- /* this should not happen */
+- if ((offset + size) > rdev->mc.visible_vram_size)
+- return -EINVAL;
+- }
++ if (bo->mem.mem_type != TTM_PL_VRAM)
++ return 0;
++
++ size = bo->mem.num_pages << PAGE_SHIFT;
++ offset = bo->mem.start << PAGE_SHIFT;
++ if ((offset + size) <= rdev->mc.visible_vram_size)
++ return 0;
++
++ /* hurrah the memory is not visible ! */
++ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
++ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
++ r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
++ if (unlikely(r == -ENOMEM)) {
++ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
++ return ttm_bo_validate(bo, &rbo->placement, false, true, false);
++ } else if (unlikely(r != 0)) {
++ return r;
+ }
++
++ offset = bo->mem.start << PAGE_SHIFT;
++ /* this should never happen */
++ if ((offset + size) > rdev->mc.visible_vram_size)
++ return -EINVAL;
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 40932fb..84ba033 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -558,14 +558,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ } *cmd;
+ int ret;
+ struct vmw_resource *res;
++ SVGA3dCmdSurfaceDMASuffix *suffix;
++ uint32_t bo_size;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
++ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
++ header->size - sizeof(*suffix));
++
++ /* Make sure device and verifier stays in sync. */
++ if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
++ DRM_ERROR("Invalid DMA suffix size.\n");
++ return -EINVAL;
++ }
++
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->dma.guest.ptr,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
++ /* Make sure DMA doesn't cross BO boundaries. */
++ bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
++ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
++ DRM_ERROR("Invalid DMA offset.\n");
++ return -EINVAL;
++ }
++
++ bo_size -= cmd->dma.guest.ptr.offset;
++ if (unlikely(suffix->maximumOffset > bo_size))
++ suffix->maximumOffset = bo_size;
++
+ bo = &vmw_bo->base;
+ ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
+ cmd->dma.host.sid, &srf);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ca2b3e6..ccc89b0 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -678,6 +678,13 @@
+ #define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
+ #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
+
++#define USB_VENDOR_ID_SYNAPTICS 0x06cb
++#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
++#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
++#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
++#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
++#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
++
+ #define USB_VENDOR_ID_THRUSTMASTER 0x044f
+
+ #define USB_VENDOR_ID_TOPSEED 0x0766
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index f98fbad..71c2582 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -100,6 +100,11 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
+
+ { 0, 0 }
+ };
+diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
+index cd2a6e4..7da08ac 100644
+--- a/drivers/hwmon/emc1403.c
++++ b/drivers/hwmon/emc1403.c
+@@ -159,7 +159,7 @@ static ssize_t store_hyst(struct device *dev,
+ if (retval < 0)
+ goto fail;
+
+- hyst = val - retval * 1000;
++ hyst = retval * 1000 - val;
+ hyst = DIV_ROUND_CLOSEST(hyst, 1000);
+ if (hyst < 0 || hyst > 255) {
+ retval = -ERANGE;
+@@ -290,7 +290,7 @@ static int emc1403_detect(struct i2c_client *client,
+ }
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
+- if (id != 0x01)
++ if (id < 0x01 || id > 0x04)
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
+index 3c2812f..aadb398 100644
+--- a/drivers/i2c/busses/i2c-designware-core.c
++++ b/drivers/i2c/busses/i2c-designware-core.c
+@@ -346,6 +346,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ dw_writel(dev, ic_con, DW_IC_CON);
+
++ /* enforce disabled interrupts (due to HW issues) */
++ i2c_dw_disable_int(dev);
++
+ /* Enable the adapter */
+ dw_writel(dev, 1, DW_IC_ENABLE);
+
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index 4c17180..7d6d2b7 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -1082,10 +1082,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+
+- i2c->suspended = 0;
+ clk_enable(i2c->clk);
+ s3c24xx_i2c_init(i2c);
+ clk_disable(i2c->clk);
++ i2c->suspended = 0;
+
+ return 0;
+ }
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index e2a9867..342a059 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -11,6 +11,7 @@
+ */
+
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/input.h>
+@@ -783,7 +784,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+ break;
+
+ case 3:
+- etd->reg_10 = 0x0b;
++ if (etd->set_hw_resolution)
++ etd->reg_10 = 0x0b;
++ else
++ etd->reg_10 = 0x03;
++
+ if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
+ rc = -1;
+
+@@ -1206,6 +1211,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
+ }
+
+ /*
++ * Some hw_version 3 models go into error state when we try to set bit 3 of r10
++ */
++static const struct dmi_system_id no_hw_res_dmi_table[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++ {
++ /* Gigabyte U2442 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
++ },
++ },
++#endif
++ { }
++};
++
++/*
+ * determine hardware version and set some properties according to it.
+ */
+ static int elantech_set_properties(struct elantech_data *etd)
+@@ -1254,6 +1275,9 @@ static int elantech_set_properties(struct elantech_data *etd)
+ etd->reports_pressure = true;
+ }
+
++ /* Enable real hardware resolution on hw_version 3 ? */
++ etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
++
+ return 0;
+ }
+
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 9e5f1aa..3569bed 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -128,6 +128,7 @@ struct elantech_data {
+ bool paritycheck;
+ bool jumpy_cursor;
+ bool reports_pressure;
++ bool set_hw_resolution;
+ unsigned char hw_version;
+ unsigned int fw_version;
+ unsigned int single_finger_reports;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 886c191..8a39807 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1394,6 +1394,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
++ /* Lenovo ThinkPad Edge E431 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
++ },
++ .driver_data = (int []){1024, 5022, 2508, 4832},
++ },
++ {
+ /* Lenovo ThinkPad T431s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2d0544c..db4b4a8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8122,7 +8122,8 @@ static int md_notify_reboot(struct notifier_block *this,
+ if (mddev_trylock(mddev)) {
+ if (mddev->pers)
+ __md_stop_writes(mddev);
+- mddev->safemode = 2;
++ if (mddev->persistent)
++ mddev->safemode = 2;
+ mddev_unlock(mddev);
+ }
+ need_delay = 1;
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index 6edc9ba..298703f 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -90,6 +90,7 @@ static long media_device_enum_entities(struct media_device *mdev,
+ struct media_entity *ent;
+ struct media_entity_desc u_ent;
+
++ memset(&u_ent, 0, sizeof(u_ent));
+ if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
+ return -EFAULT;
+
+diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
+index 8aa0585..17125d9 100644
+--- a/drivers/media/video/ov7670.c
++++ b/drivers/media/video/ov7670.c
+@@ -937,7 +937,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
+ * windows that fall outside that.
+ */
+ for (i = 0; i < N_WIN_SIZES; i++) {
+- struct ov7670_win_size *win = &ov7670_win_sizes[index];
++ struct ov7670_win_size *win = &ov7670_win_sizes[i];
+ if (info->min_width && win->width < info->min_width)
+ continue;
+ if (info->min_height && win->height < info->min_height)
+diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
+index c68531b..2671959 100644
+--- a/drivers/media/video/v4l2-compat-ioctl32.c
++++ b/drivers/media/video/v4l2-compat-ioctl32.c
+@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
+
+ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+ {
++ if (get_user(kp->type, &up->type))
++ return -EFAULT;
++
+ switch (kp->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+@@ -208,17 +211,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
+
+ static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+ {
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
+- get_user(kp->type, &up->type))
+- return -EFAULT;
++ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
++ return -EFAULT;
+ return __get_v4l2_format32(kp, up);
+ }
+
+ static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+ {
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
+- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
+- return -EFAULT;
++ copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
++ return -EFAULT;
+ return __get_v4l2_format32(&kp->format, &up->format);
+ }
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1bf36ac..5af2a8f 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4914,6 +4914,7 @@ static int __init bonding_init(void)
+ out:
+ return res;
+ err:
++ bond_destroy_debugfs();
+ rtnl_link_unregister(&bond_link_ops);
+ err_link:
+ unregister_pernet_subsys(&bond_net_ops);
+diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
+index 2c7f503..5192f86 100644
+--- a/drivers/net/can/sja1000/peak_pci.c
++++ b/drivers/net/can/sja1000/peak_pci.c
+@@ -39,9 +39,9 @@ MODULE_LICENSE("GPL v2");
+ #define DRV_NAME "peak_pci"
+
+ struct peak_pci_chan {
+- void __iomem *cfg_base; /* Common for all channels */
+- struct net_device *next_dev; /* Chain of network devices */
+- u16 icr_mask; /* Interrupt mask for fast ack */
++ void __iomem *cfg_base; /* Common for all channels */
++ struct net_device *prev_dev; /* Chain of network devices */
++ u16 icr_mask; /* Interrupt mask for fast ack */
+ };
+
+ #define PEAK_PCI_CAN_CLOCK (16000000 / 2)
+@@ -98,7 +98,7 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
+ {
+ struct sja1000_priv *priv;
+ struct peak_pci_chan *chan;
+- struct net_device *dev, *dev0 = NULL;
++ struct net_device *dev, *prev_dev;
+ void __iomem *cfg_base, *reg_base;
+ u16 sub_sys_id, icr;
+ int i, err, channels;
+@@ -196,18 +196,14 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
+ }
+
+ /* Create chain of SJA1000 devices */
+- if (i == 0)
+- dev0 = dev;
+- else
+- chan->next_dev = dev;
++ chan->prev_dev = pci_get_drvdata(pdev);
++ pci_set_drvdata(pdev, dev);
+
+ dev_info(&pdev->dev,
+ "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
+ dev->name, priv->reg_base, chan->cfg_base, dev->irq);
+ }
+
+- pci_set_drvdata(pdev, dev0);
+-
+ /* Enable interrupts */
+ writew(icr, cfg_base + PITA_ICR + 2);
+
+@@ -217,12 +213,13 @@ failure_remove_channels:
+ /* Disable interrupts */
+ writew(0x0, cfg_base + PITA_ICR + 2);
+
+- for (dev = dev0; dev; dev = chan->next_dev) {
+- unregister_sja1000dev(dev);
+- free_sja1000dev(dev);
++ for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
+ priv = netdev_priv(dev);
+ chan = priv->priv;
+- dev = chan->next_dev;
++ prev_dev = chan->prev_dev;
++
++ unregister_sja1000dev(dev);
++ free_sja1000dev(dev);
+ }
+
+ pci_iounmap(pdev, reg_base);
+@@ -241,7 +238,7 @@ failure_disable_pci:
+
+ static void __devexit peak_pci_remove(struct pci_dev *pdev)
+ {
+- struct net_device *dev = pci_get_drvdata(pdev); /* First device */
++ struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct peak_pci_chan *chan = priv->priv;
+ void __iomem *cfg_base = chan->cfg_base;
+@@ -252,10 +249,12 @@ static void __devexit peak_pci_remove(struct pci_dev *pdev)
+
+ /* Loop over all registered devices */
+ while (1) {
++ struct net_device *prev_dev = chan->prev_dev;
++
+ dev_info(&pdev->dev, "removing device %s\n", dev->name);
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+- dev = chan->next_dev;
++ dev = prev_dev;
+ if (!dev)
+ break;
+ priv = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c77c462..2615433 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -10656,7 +10656,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
+ if (tg3_flag(tp, MAX_RXPEND_64) &&
+ tp->rx_pending > 63)
+ tp->rx_pending = 63;
+- tp->rx_jumbo_pending = ering->rx_jumbo_pending;
++
++ if (tg3_flag(tp, JUMBO_RING_ENABLE))
++ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+ for (i = 0; i < tp->irq_max; i++)
+ tp->napi[i].tx_pending = ering->tx_pending;
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 301b39e..b74cdf6 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -236,11 +236,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ const struct macvlan_dev *vlan = netdev_priv(dev);
+ const struct macvlan_port *port = vlan->port;
+ const struct macvlan_dev *dest;
+- __u8 ip_summed = skb->ip_summed;
+
+ if (vlan->mode == MACVLAN_MODE_BRIDGE) {
+ const struct ethhdr *eth = (void *)skb->data;
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* send to other bridge ports directly */
+ if (is_multicast_ether_addr(eth->h_dest)) {
+@@ -258,7 +256,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ xmit_world:
+- skb->ip_summed = ip_summed;
+ skb->dev = vlan->lowerdev;
+ return dev_queue_xmit(skb);
+ }
+@@ -394,8 +391,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+
+- if (change & IFF_ALLMULTI)
+- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
++ if (dev->flags & IFF_UP) {
++ if (change & IFF_ALLMULTI)
++ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
++ }
+ }
+
+ static void macvlan_set_multicast_list(struct net_device *dev)
+diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
+index e325768..b78ee67 100644
+--- a/drivers/net/wimax/i2400m/usb-rx.c
++++ b/drivers/net/wimax/i2400m/usb-rx.c
+@@ -277,7 +277,7 @@ retry:
+ d_printf(1, dev, "RX: size changed to %d, received %d, "
+ "copied %d, capacity %ld\n",
+ rx_size, read_size, rx_skb->len,
+- (long) (skb_end_pointer(new_skb) - new_skb->head));
++ (long) skb_end_offset(new_skb));
+ goto retry;
+ }
+ /* In most cases, it happens due to the hardware scheduling a
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index 5c38281..1d4c579 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -651,20 +651,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
+ bss_conf->bssid);
+
+ /*
+- * Update the beacon. This is only required on USB devices. PCI
+- * devices fetch beacons periodically.
+- */
+- if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
+- rt2x00queue_update_beacon(rt2x00dev, vif);
+-
+- /*
+ * Start/stop beaconing.
+ */
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon && intf->enable_beacon) {
+- rt2x00queue_clear_beacon(rt2x00dev, vif);
+ rt2x00dev->intf_beaconing--;
+ intf->enable_beacon = false;
++ /*
++ * Clear beacon in the H/W for this vif. This is needed
++ * to disable beaconing on this particular interface
++ * and keep it running on other interfaces.
++ */
++ rt2x00queue_clear_beacon(rt2x00dev, vif);
+
+ if (rt2x00dev->intf_beaconing == 0) {
+ /*
+@@ -675,11 +673,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
+ rt2x00queue_stop_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+-
+-
+ } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
+ rt2x00dev->intf_beaconing++;
+ intf->enable_beacon = true;
++ /*
++ * Upload beacon to the H/W. This is only required on
++ * USB devices. PCI devices fetch beacons periodically.
++ */
++ if (rt2x00_is_usb(rt2x00dev))
++ rt2x00queue_update_beacon(rt2x00dev, vif);
+
+ if (rt2x00dev->intf_beaconing == 1) {
+ /*
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+index d3920da..79fc4b7 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+@@ -1158,12 +1158,23 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ int err = 0;
+ static bool iqk_initialized;
++ unsigned long flags;
++
++ /* As this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
+
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
+ err = _rtl92cu_init_mac(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
+- return err;
++ goto exit;
+ }
+ err = rtl92c_download_fw(hw);
+ if (err) {
+@@ -1171,7 +1182,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ ("Failed to download FW. Init HW without FW now..\n"));
+ err = 1;
+ rtlhal->fw_ready = false;
+- return err;
++ goto exit;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+@@ -1212,6 +1223,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ _update_mac_setting(hw);
+ rtl92c_dm_init(hw);
+ _dump_registers(hw);
++exit:
++ local_irq_restore(flags);
+ return err;
+ }
+
+diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
+index 3ffc1b2..b888675 100644
+--- a/drivers/pci/hotplug/shpchp_ctrl.c
++++ b/drivers/pci/hotplug/shpchp_ctrl.c
+@@ -285,8 +285,8 @@ static int board_added(struct slot *p_slot)
+ return WRONG_BUS_FREQUENCY;
+ }
+
+- bsp = ctrl->pci_dev->bus->cur_bus_speed;
+- msp = ctrl->pci_dev->bus->max_bus_speed;
++ bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
++ msp = ctrl->pci_dev->subordinate->max_bus_speed;
+
+ /* Check if there are other slots or devices on the same bus */
+ if (!list_empty(&ctrl->pci_dev->subordinate->devices))
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 8e6c4fa..2a8d6aa 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -3405,7 +3405,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
+ /* Do not issue duplicate brightness change events to
+ * userspace. tpacpi_detect_brightness_capabilities() must have
+ * been called before this point */
+- if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
++ if (acpi_video_backlight_support()) {
+ pr_info("This ThinkPad has standard ACPI backlight "
+ "brightness control, supported by the ACPI "
+ "video driver\n");
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 987c6d6..01780a9 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -8166,7 +8166,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+
+ mpt2sas_base_free_resources(ioc);
+ pci_save_state(pdev);
+- pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+ }
+diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
+index 2542c37..c5da0d2 100644
+--- a/drivers/staging/octeon/ethernet-tx.c
++++ b/drivers/staging/octeon/ethernet-tx.c
+@@ -344,7 +344,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+ if (unlikely
+ (skb->truesize !=
+- sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
++ sizeof(*skb) + skb_end_offset(skb))) {
+ /*
+ printk("TX buffer truesize has been changed\n");
+ */
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index 7b97e7e..443547b 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
+ return hvc_driver;
+ }
+
+-static int __init hvc_console_setup(struct console *co, char *options)
++static int hvc_console_setup(struct console *co, char *options)
+ {
+ if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
+ return -ENODEV;
+diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
+index 0b00091..ff8aeee 100644
+--- a/drivers/usb/storage/shuttle_usbat.c
++++ b/drivers/usb/storage/shuttle_usbat.c
+@@ -1846,7 +1846,7 @@ static int usbat_probe(struct usb_interface *intf,
+ us->transport_name = "Shuttle USBAT";
+ us->transport = usbat_flash_transport;
+ us->transport_reset = usb_stor_CB_reset;
+- us->max_lun = 1;
++ us->max_lun = 0;
+
+ result = usb_stor_probe2(us);
+ return result;
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 08711bc..49d222d 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -226,6 +226,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
++/* Reported by Daniele Forsi <dforsi@gmail.com> */
++UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
++ "Nokia",
++ "5300",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64 ),
++
++/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
++UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
++ "Nokia",
++ "305",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64),
++
+ /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
+ UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
+ "Nokia",
+diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
+index ac2cf6d..3b15bca 100644
+--- a/drivers/video/tgafb.c
++++ b/drivers/video/tgafb.c
+@@ -192,6 +192,8 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+
+ if (var->xres_virtual != var->xres || var->yres_virtual != var->yres)
+ return -EINVAL;
++ if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len)
++ return -EINVAL;
+ if (var->nonstd)
+ return -EINVAL;
+ if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ)
+@@ -272,6 +274,7 @@ tgafb_set_par(struct fb_info *info)
+ par->yres = info->var.yres;
+ par->pll_freq = pll_freq = 1000000000 / info->var.pixclock;
+ par->bits_per_pixel = info->var.bits_per_pixel;
++ info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
+
+ tga_type = par->tga_type;
+
+@@ -1318,6 +1321,7 @@ tgafb_init_fix(struct fb_info *info)
+ int tga_bus_tc = TGA_BUS_TC(par->dev);
+ u8 tga_type = par->tga_type;
+ const char *tga_type_name = NULL;
++ unsigned memory_size;
+
+ switch (tga_type) {
+ case TGA_TYPE_8PLANE:
+@@ -1325,21 +1329,25 @@ tgafb_init_fix(struct fb_info *info)
+ tga_type_name = "Digital ZLXp-E1";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E1";
++ memory_size = 2097152;
+ break;
+ case TGA_TYPE_24PLANE:
+ if (tga_bus_pci)
+ tga_type_name = "Digital ZLXp-E2";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E2";
++ memory_size = 8388608;
+ break;
+ case TGA_TYPE_24PLUSZ:
+ if (tga_bus_pci)
+ tga_type_name = "Digital ZLXp-E3";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E3";
++ memory_size = 16777216;
+ break;
+ default:
+ tga_type_name = "Unknown";
++ memory_size = 16777216;
+ break;
+ }
+
+@@ -1351,9 +1359,8 @@ tgafb_init_fix(struct fb_info *info)
+ ? FB_VISUAL_PSEUDOCOLOR
+ : FB_VISUAL_DIRECTCOLOR);
+
+- info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
+ info->fix.smem_start = (size_t) par->tga_fb_base;
+- info->fix.smem_len = info->fix.line_length * par->yres;
++ info->fix.smem_len = memory_size;
+ info->fix.mmio_start = (size_t) par->tga_regs_base;
+ info->fix.mmio_len = 512;
+
+@@ -1478,6 +1485,9 @@ tgafb_register(struct device *dev)
+ modedb_tga = &modedb_tc;
+ modedbsize_tga = 1;
+ }
++
++ tgafb_init_fix(info);
++
+ ret = fb_find_mode(&info->var, info,
+ mode_option ? mode_option : mode_option_tga,
+ modedb_tga, modedbsize_tga, NULL,
+@@ -1495,7 +1505,6 @@ tgafb_register(struct device *dev)
+ }
+
+ tgafb_set_par(info);
+- tgafb_init_fix(info);
+
+ if (register_framebuffer(info) < 0) {
+ printk(KERN_ERR "tgafb: Could not register framebuffer\n");
+diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
+index 9c51aff..435a9be1 100644
+--- a/fs/nfsd/nfs4acl.c
++++ b/fs/nfsd/nfs4acl.c
+@@ -373,8 +373,10 @@ sort_pacl(struct posix_acl *pacl)
+ * by uid/gid. */
+ int i, j;
+
+- if (pacl->a_count <= 4)
+- return; /* no users or groups */
++ /* no users or groups */
++ if (!pacl || pacl->a_count <= 4)
++ return;
++
+ i = 1;
+ while (pacl->a_entries[i].e_tag == ACL_USER)
+ i++;
+@@ -498,13 +500,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
+
+ /*
+ * ACLs with no ACEs are treated differently in the inheritable
+- * and effective cases: when there are no inheritable ACEs, we
+- * set a zero-length default posix acl:
++ * and effective cases: when there are no inheritable ACEs,
++ * calls ->set_acl with a NULL ACL structure.
+ */
+- if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
+- pacl = posix_acl_alloc(0, GFP_KERNEL);
+- return pacl ? pacl : ERR_PTR(-ENOMEM);
+- }
++ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
++ return NULL;
++
+ /*
+ * When there are no effective ACEs, the following will end
+ * up setting a 3-element effective posix ACL with all
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 4cef99f..b2e0a55 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -986,6 +986,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
+ }
+ memcpy(clp->cl_name.data, name.data, name.len);
+ clp->cl_name.len = name.len;
++ INIT_LIST_HEAD(&clp->cl_sessions);
++ idr_init(&clp->cl_stateids);
++ atomic_set(&clp->cl_refcount, 0);
++ clp->cl_cb_state = NFSD4_CB_UNKNOWN;
++ INIT_LIST_HEAD(&clp->cl_idhash);
++ INIT_LIST_HEAD(&clp->cl_strhash);
++ INIT_LIST_HEAD(&clp->cl_openowners);
++ INIT_LIST_HEAD(&clp->cl_delegations);
++ INIT_LIST_HEAD(&clp->cl_lru);
++ INIT_LIST_HEAD(&clp->cl_callbacks);
++ spin_lock_init(&clp->cl_lock);
++ rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
+ return clp;
+ }
+
+@@ -999,6 +1011,7 @@ free_client(struct nfs4_client *clp)
+ list_del(&ses->se_perclnt);
+ nfsd4_put_session(ses);
+ }
++ rpc_destroy_wait_queue(&clp->cl_cb_waitq);
+ if (clp->cl_cred.cr_group_info)
+ put_group_info(clp->cl_cred.cr_group_info);
+ kfree(clp->cl_principal);
+@@ -1163,7 +1176,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
+ if (clp == NULL)
+ return NULL;
+
+- INIT_LIST_HEAD(&clp->cl_sessions);
+
+ princ = svc_gss_principal(rqstp);
+ if (princ) {
+@@ -1174,21 +1186,10 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
+ }
+ }
+
+- idr_init(&clp->cl_stateids);
+ memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
+- atomic_set(&clp->cl_refcount, 0);
+- clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+- INIT_LIST_HEAD(&clp->cl_idhash);
+- INIT_LIST_HEAD(&clp->cl_strhash);
+- INIT_LIST_HEAD(&clp->cl_openowners);
+- INIT_LIST_HEAD(&clp->cl_delegations);
+- INIT_LIST_HEAD(&clp->cl_lru);
+- INIT_LIST_HEAD(&clp->cl_callbacks);
+- spin_lock_init(&clp->cl_lock);
+ INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
+ clp->cl_time = get_seconds();
+ clear_bit(0, &clp->cl_cb_slot_busy);
+- rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
+ copy_verf(clp, verf);
+ rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
+ clp->cl_flavor = rqstp->rq_flavor;
+@@ -3375,9 +3376,16 @@ out:
+ static __be32
+ nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+- if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
++ struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
++
++ if (check_for_locks(stp->st_file, lo))
+ return nfserr_locks_held;
+- release_lock_stateid(stp);
++ /*
++ * Currently there's a 1-1 lock stateid<->lockowner
++ * correspondance, and we have to delete the lockowner when we
++ * delete the lock stateid:
++ */
++ unhash_lockowner(lo);
+ return nfs_ok;
+ }
+
+@@ -3812,6 +3820,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
+
+ if (!same_owner_str(&lo->lo_owner, owner, clid))
+ return false;
++ if (list_empty(&lo->lo_owner.so_stateids)) {
++ WARN_ON_ONCE(1);
++ return false;
++ }
+ lst = list_first_entry(&lo->lo_owner.so_stateids,
+ struct nfs4_ol_stateid, st_perstateowner);
+ return lst->st_file->fi_inode == inode;
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index cea4623..6c70ab2 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -155,6 +155,12 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
+ umode_t mode = 0;
+ int not_equiv = 0;
+
++ /*
++ * A null ACL can always be presented as mode bits.
++ */
++ if (!acl)
++ return 0;
++
+ FOREACH_ACL_ENTRY(pa, acl, pe) {
+ switch (pa->e_tag) {
+ case ACL_USER_OBJ:
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 26eafce..a3ebb09 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -260,6 +260,7 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
+ extern int ftrace_arch_read_dyn_info(char *buf, int size);
+
+ extern int skip_trace(unsigned long ip);
++extern void ftrace_module_init(struct module *mod);
+
+ extern void ftrace_disable_daemon(void);
+ extern void ftrace_enable_daemon(void);
+@@ -272,6 +273,7 @@ static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
+ static inline void ftrace_disable_daemon(void) { }
+ static inline void ftrace_enable_daemon(void) { }
+ static inline void ftrace_release_mod(struct module *mod) {}
++static inline void ftrace_module_init(struct module *mod) {}
+ static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+ {
+ return -EINVAL;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index e6796c1..f93d8c1 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -95,7 +95,6 @@ struct kvm_async_pf {
+ unsigned long addr;
+ struct kvm_arch_async_pf arch;
+ struct page *page;
+- bool done;
+ };
+
+ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 13bd6d0..c445e52 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -617,11 +617,21 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+ {
+ return skb->head + skb->end;
+ }
++
++static inline unsigned int skb_end_offset(const struct sk_buff *skb)
++{
++ return skb->end;
++}
+ #else
+ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+ {
+ return skb->end;
+ }
++
++static inline unsigned int skb_end_offset(const struct sk_buff *skb)
++{
++ return skb->end - skb->head;
++}
+ #endif
+
+ /* Internal */
+@@ -2549,7 +2559,7 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
+ return false;
+
+ skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+- if (skb_end_pointer(skb) - skb->head < skb_size)
++ if (skb_end_offset(skb) < skb_size)
+ return false;
+
+ if (skb_shared(skb) || skb_cloned(skb))
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 5e91b72..4913dac 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -34,6 +34,11 @@ struct route_info {
+ #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
+ #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+
++/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
++ * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
++ */
++#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
++
+ /*
+ * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
+ * between IPV6_ADDR_PREFERENCES socket option values
+diff --git a/include/trace/events/module.h b/include/trace/events/module.h
+index 1619327..ca298c7 100644
+--- a/include/trace/events/module.h
++++ b/include/trace/events/module.h
+@@ -78,7 +78,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
+
+ TP_fast_assign(
+ __entry->ip = ip;
+- __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
++ __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
+ __assign_str(name, mod->name);
+ ),
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index b15b4f7..1d1edcb 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4899,6 +4899,9 @@ struct swevent_htable {
+
+ /* Recursion avoidance in each contexts */
+ int recursion[PERF_NR_CONTEXTS];
++
++ /* Keeps track of cpu being initialized/exited */
++ bool online;
+ };
+
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+@@ -5141,8 +5144,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
+ hwc->state = !(flags & PERF_EF_START);
+
+ head = find_swevent_head(swhash, event);
+- if (WARN_ON_ONCE(!head))
++ if (!head) {
++ /*
++ * We can race with cpu hotplug code. Do not
++ * WARN if the cpu just got unplugged.
++ */
++ WARN_ON_ONCE(swhash->online);
+ return -EINVAL;
++ }
+
+ hlist_add_head_rcu(&event->hlist_entry, head);
+
+@@ -6301,6 +6310,9 @@ SYSCALL_DEFINE5(perf_event_open,
+ if (attr.freq) {
+ if (attr.sample_freq > sysctl_perf_event_sample_rate)
+ return -EINVAL;
++ } else {
++ if (attr.sample_period & (1ULL << 63))
++ return -EINVAL;
+ }
+
+ /*
+@@ -7078,6 +7090,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+
+ mutex_lock(&swhash->hlist_mutex);
++ swhash->online = true;
+ if (swhash->hlist_refcount > 0) {
+ struct swevent_hlist *hlist;
+
+@@ -7135,6 +7148,7 @@ static void perf_event_exit_cpu(int cpu)
+ perf_event_exit_cpu_context(cpu);
+
+ mutex_lock(&swhash->hlist_mutex);
++ swhash->online = false;
+ swevent_hlist_release(swhash);
+ mutex_unlock(&swhash->hlist_mutex);
+ }
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 8888815..1bb37d0 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -588,6 +588,55 @@ void exit_pi_state_list(struct task_struct *curr)
+ raw_spin_unlock_irq(&curr->pi_lock);
+ }
+
++/*
++ * We need to check the following states:
++ *
++ * Waiter | pi_state | pi->owner | uTID | uODIED | ?
++ *
++ * [1] NULL | --- | --- | 0 | 0/1 | Valid
++ * [2] NULL | --- | --- | >0 | 0/1 | Valid
++ *
++ * [3] Found | NULL | -- | Any | 0/1 | Invalid
++ *
++ * [4] Found | Found | NULL | 0 | 1 | Valid
++ * [5] Found | Found | NULL | >0 | 1 | Invalid
++ *
++ * [6] Found | Found | task | 0 | 1 | Valid
++ *
++ * [7] Found | Found | NULL | Any | 0 | Invalid
++ *
++ * [8] Found | Found | task | ==taskTID | 0/1 | Valid
++ * [9] Found | Found | task | 0 | 0 | Invalid
++ * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
++ *
++ * [1] Indicates that the kernel can acquire the futex atomically. We
++ * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
++ *
++ * [2] Valid, if TID does not belong to a kernel thread. If no matching
++ * thread is found then it indicates that the owner TID has died.
++ *
++ * [3] Invalid. The waiter is queued on a non PI futex
++ *
++ * [4] Valid state after exit_robust_list(), which sets the user space
++ * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
++ *
++ * [5] The user space value got manipulated between exit_robust_list()
++ * and exit_pi_state_list()
++ *
++ * [6] Valid state after exit_pi_state_list() which sets the new owner in
++ * the pi_state but cannot access the user space value.
++ *
++ * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
++ *
++ * [8] Owner and user space value match
++ *
++ * [9] There is no transient state which sets the user space TID to 0
++ * except exit_robust_list(), but this is indicated by the
++ * FUTEX_OWNER_DIED bit. See [4]
++ *
++ * [10] There is no transient state which leaves owner and user space
++ * TID out of sync.
++ */
+ static int
+ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ union futex_key *key, struct futex_pi_state **ps)
+@@ -603,12 +652,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ plist_for_each_entry_safe(this, next, head, list) {
+ if (match_futex(&this->key, key)) {
+ /*
+- * Another waiter already exists - bump up
+- * the refcount and return its pi_state:
++ * Sanity check the waiter before increasing
++ * the refcount and attaching to it.
+ */
+ pi_state = this->pi_state;
+ /*
+- * Userspace might have messed up non-PI and PI futexes
++ * Userspace might have messed up non-PI and
++ * PI futexes [3]
+ */
+ if (unlikely(!pi_state))
+ return -EINVAL;
+@@ -616,34 +666,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ WARN_ON(!atomic_read(&pi_state->refcount));
+
+ /*
+- * When pi_state->owner is NULL then the owner died
+- * and another waiter is on the fly. pi_state->owner
+- * is fixed up by the task which acquires
+- * pi_state->rt_mutex.
+- *
+- * We do not check for pid == 0 which can happen when
+- * the owner died and robust_list_exit() cleared the
+- * TID.
++ * Handle the owner died case:
+ */
+- if (pid && pi_state->owner) {
++ if (uval & FUTEX_OWNER_DIED) {
++ /*
++ * exit_pi_state_list sets owner to NULL and
++ * wakes the topmost waiter. The task which
++ * acquires the pi_state->rt_mutex will fixup
++ * owner.
++ */
++ if (!pi_state->owner) {
++ /*
++ * No pi state owner, but the user
++ * space TID is not 0. Inconsistent
++ * state. [5]
++ */
++ if (pid)
++ return -EINVAL;
++ /*
++ * Take a ref on the state and
++ * return. [4]
++ */
++ goto out_state;
++ }
++
+ /*
+- * Bail out if user space manipulated the
+- * futex value.
++ * If TID is 0, then either the dying owner
++ * has not yet executed exit_pi_state_list()
++ * or some waiter acquired the rtmutex in the
++ * pi state, but did not yet fixup the TID in
++ * user space.
++ *
++ * Take a ref on the state and return. [6]
+ */
+- if (pid != task_pid_vnr(pi_state->owner))
++ if (!pid)
++ goto out_state;
++ } else {
++ /*
++ * If the owner died bit is not set,
++ * then the pi_state must have an
++ * owner. [7]
++ */
++ if (!pi_state->owner)
+ return -EINVAL;
+ }
+
++ /*
++ * Bail out if user space manipulated the
++ * futex value. If pi state exists then the
++ * owner TID must be the same as the user
++ * space TID. [9/10]
++ */
++ if (pid != task_pid_vnr(pi_state->owner))
++ return -EINVAL;
++
++ out_state:
+ atomic_inc(&pi_state->refcount);
+ *ps = pi_state;
+-
+ return 0;
+ }
+ }
+
+ /*
+ * We are the first waiter - try to look up the real owner and attach
+- * the new pi_state to it, but bail out when TID = 0
++ * the new pi_state to it, but bail out when TID = 0 [1]
+ */
+ if (!pid)
+ return -ESRCH;
+@@ -651,6 +737,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ if (!p)
+ return -ESRCH;
+
++ if (!p->mm) {
++ put_task_struct(p);
++ return -EPERM;
++ }
++
+ /*
+ * We need to look at the task state flags to figure out,
+ * whether the task is exiting. To protect against the do_exit
+@@ -671,6 +762,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ return ret;
+ }
+
++ /*
++ * No existing pi state. First waiter. [2]
++ */
+ pi_state = alloc_pi_state();
+
+ /*
+@@ -742,10 +836,18 @@ retry:
+ return -EDEADLK;
+
+ /*
+- * Surprise - we got the lock. Just return to userspace:
++ * Surprise - we got the lock, but we do not trust user space at all.
+ */
+- if (unlikely(!curval))
+- return 1;
++ if (unlikely(!curval)) {
++ /*
++ * We verify whether there is kernel state for this
++ * futex. If not, we can safely assume, that the 0 ->
++ * TID transition is correct. If state exists, we do
++ * not bother to fixup the user space state as it was
++ * corrupted already.
++ */
++ return futex_top_waiter(hb, key) ? -EINVAL : 1;
++ }
+
+ uval = curval;
+
+@@ -875,6 +977,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ struct task_struct *new_owner;
+ struct futex_pi_state *pi_state = this->pi_state;
+ u32 uninitialized_var(curval), newval;
++ int ret = 0;
+
+ if (!pi_state)
+ return -EINVAL;
+@@ -898,23 +1001,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ new_owner = this->task;
+
+ /*
+- * We pass it to the next owner. (The WAITERS bit is always
+- * kept enabled while there is PI state around. We must also
+- * preserve the owner died bit.)
++ * We pass it to the next owner. The WAITERS bit is always
++ * kept enabled while there is PI state around. We cleanup the
++ * owner died bit, because we are the owner.
+ */
+- if (!(uval & FUTEX_OWNER_DIED)) {
+- int ret = 0;
++ newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+
+- newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+-
+- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+- ret = -EFAULT;
+- else if (curval != uval)
+- ret = -EINVAL;
+- if (ret) {
+- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+- return ret;
+- }
++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++ ret = -EFAULT;
++ else if (curval != uval)
++ ret = -EINVAL;
++ if (ret) {
++ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
++ return ret;
+ }
+
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
+@@ -1193,7 +1292,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
+ *
+ * Returns:
+ * 0 - failed to acquire the lock atomicly
+- * 1 - acquired the lock
++ * >0 - acquired the lock, return value is vpid of the top_waiter
+ * <0 - error
+ */
+ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+@@ -1204,7 +1303,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+ {
+ struct futex_q *top_waiter = NULL;
+ u32 curval;
+- int ret;
++ int ret, vpid;
+
+ if (get_futex_value_locked(&curval, pifutex))
+ return -EFAULT;
+@@ -1232,11 +1331,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+ * the contended case or if set_waiters is 1. The pi_state is returned
+ * in ps in contended cases.
+ */
++ vpid = task_pid_vnr(top_waiter->task);
+ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
+ set_waiters);
+- if (ret == 1)
++ if (ret == 1) {
+ requeue_pi_wake_futex(top_waiter, key2, hb2);
+-
++ return vpid;
++ }
+ return ret;
+ }
+
+@@ -1268,10 +1369,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ struct futex_hash_bucket *hb1, *hb2;
+ struct plist_head *head1;
+ struct futex_q *this, *next;
+- u32 curval2;
+
+ if (requeue_pi) {
+ /*
++ * Requeue PI only works on two distinct uaddrs. This
++ * check is only valid for private futexes. See below.
++ */
++ if (uaddr1 == uaddr2)
++ return -EINVAL;
++
++ /*
+ * requeue_pi requires a pi_state, try to allocate it now
+ * without any locks in case it fails.
+ */
+@@ -1309,6 +1416,15 @@ retry:
+ if (unlikely(ret != 0))
+ goto out_put_key1;
+
++ /*
++ * The check above which compares uaddrs is not sufficient for
++ * shared futexes. We need to compare the keys:
++ */
++ if (requeue_pi && match_futex(&key1, &key2)) {
++ ret = -EINVAL;
++ goto out_put_keys;
++ }
++
+ hb1 = hash_futex(&key1);
+ hb2 = hash_futex(&key2);
+
+@@ -1354,16 +1470,25 @@ retry_private:
+ * At this point the top_waiter has either taken uaddr2 or is
+ * waiting on it. If the former, then the pi_state will not
+ * exist yet, look it up one more time to ensure we have a
+- * reference to it.
++ * reference to it. If the lock was taken, ret contains the
++ * vpid of the top waiter task.
+ */
+- if (ret == 1) {
++ if (ret > 0) {
+ WARN_ON(pi_state);
+ drop_count++;
+ task_count++;
+- ret = get_futex_value_locked(&curval2, uaddr2);
+- if (!ret)
+- ret = lookup_pi_state(curval2, hb2, &key2,
+- &pi_state);
++ /*
++ * If we acquired the lock, then the user
++ * space value of uaddr2 should be vpid. It
++ * cannot be changed by the top waiter as it
++ * is blocked on hb2 lock if it tries to do
++ * so. If something fiddled with it behind our
++ * back the pi state lookup might unearth
++ * it. So we rather use the known value than
++ * rereading and handing potential crap to
++ * lookup_pi_state.
++ */
++ ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
+ }
+
+ switch (ret) {
+@@ -2133,9 +2258,10 @@ retry:
+ /*
+ * To avoid races, try to do the TID -> 0 atomic transition
+ * again. If it succeeds then we can return without waking
+- * anyone else up:
++ * anyone else up. We only try this if neither the waiters nor
++ * the owner died bit are set.
+ */
+- if (!(uval & FUTEX_OWNER_DIED) &&
++ if (!(uval & ~FUTEX_TID_MASK) &&
+ cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
+ goto pi_faulted;
+ /*
+@@ -2167,11 +2293,9 @@ retry:
+ /*
+ * No waiters - kernel unlocks the futex:
+ */
+- if (!(uval & FUTEX_OWNER_DIED)) {
+- ret = unlock_futex_pi(uaddr, uval);
+- if (ret == -EFAULT)
+- goto pi_faulted;
+- }
++ ret = unlock_futex_pi(uaddr, uval);
++ if (ret == -EFAULT)
++ goto pi_faulted;
+
+ out_unlock:
+ spin_unlock(&hb->lock);
+@@ -2331,6 +2455,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ if (ret)
+ goto out_key2;
+
++ /*
++ * The check above which compares uaddrs is not sufficient for
++ * shared futexes. We need to compare the keys:
++ */
++ if (match_futex(&q.key, &key2)) {
++ ret = -EINVAL;
++ goto out_put_keys;
++ }
++
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 60f7e32..20e88af 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -232,6 +232,11 @@ again:
+ goto again;
+ }
+ timer->base = new_base;
++ } else {
++ if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
++ cpu = this_cpu;
++ goto again;
++ }
+ }
+ return new_base;
+ }
+@@ -567,6 +572,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+
+ cpu_base->expires_next.tv64 = expires_next.tv64;
+
++ /*
++ * If a hang was detected in the last timer interrupt then we
++ * leave the hang delay active in the hardware. We want the
++ * system to make progress. That also prevents the following
++ * scenario:
++ * T1 expires 50ms from now
++ * T2 expires 5s from now
++ *
++ * T1 is removed, so this code is called and would reprogram
++ * the hardware to 5s from now. Any hrtimer_start after that
++ * will not reprogram the hardware due to hang_detected being
++ * set. So we'd effectivly block all timers until the T2 event
++ * fires.
++ */
++ if (cpu_base->hang_detected)
++ return;
++
+ if (cpu_base->expires_next.tv64 != KTIME_MAX)
+ tick_program_event(cpu_base->expires_next, 1);
+ }
+@@ -958,11 +980,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ /* Remove an active timer from the queue: */
+ ret = remove_hrtimer(timer, base);
+
+- /* Switch the timer base, if necessary: */
+- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+-
+ if (mode & HRTIMER_MODE_REL) {
+- tim = ktime_add_safe(tim, new_base->get_time());
++ tim = ktime_add_safe(tim, base->get_time());
+ /*
+ * CONFIG_TIME_LOW_RES is a temporary way for architectures
+ * to signal that they simply return xtime in
+@@ -977,6 +996,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+
+ hrtimer_set_expires_range_ns(timer, tim, delta_ns);
+
++ /* Switch the timer base, if necessary: */
++ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
++
+ timer_stats_hrtimer_set_start_info(timer);
+
+ leftmost = enqueue_hrtimer(timer, new_base);
+diff --git a/kernel/module.c b/kernel/module.c
+index 65362d9..95ecd9f 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2888,6 +2888,9 @@ static struct module *load_module(void __user *umod,
+ /* This has to be done once we're sure module name is unique. */
+ dynamic_debug_setup(info.debug, info.num_debug);
+
++ /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
++ ftrace_module_init(mod);
++
+ /* Find duplicate symbols */
+ err = verify_export_symbols(mod);
+ if (err < 0)
+diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
+index a86cf9d..1f4afdd 100644
+--- a/kernel/sched_cpupri.c
++++ b/kernel/sched_cpupri.c
+@@ -68,8 +68,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
+ int idx = 0;
+ int task_pri = convert_prio(p->prio);
+
+- if (task_pri >= MAX_RT_PRIO)
+- return 0;
++ BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
+
+ for (idx = 0; idx < task_pri; idx++) {
+ struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
+diff --git a/kernel/timer.c b/kernel/timer.c
+index f8b05a4..349953e 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -769,7 +769,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
+
+ bit = find_last_bit(&mask, BITS_PER_LONG);
+
+- mask = (1 << bit) - 1;
++ mask = (1UL << bit) - 1;
+
+ expires_limit = expires_limit & ~(mask);
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index a65fa36..dcbafed 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3542,16 +3542,11 @@ static void ftrace_init_module(struct module *mod,
+ ftrace_process_locs(mod, start, end);
+ }
+
+-static int ftrace_module_notify_enter(struct notifier_block *self,
+- unsigned long val, void *data)
++void ftrace_module_init(struct module *mod)
+ {
+- struct module *mod = data;
+-
+- if (val == MODULE_STATE_COMING)
+- ftrace_init_module(mod, mod->ftrace_callsites,
+- mod->ftrace_callsites +
+- mod->num_ftrace_callsites);
+- return 0;
++ ftrace_init_module(mod, mod->ftrace_callsites,
++ mod->ftrace_callsites +
++ mod->num_ftrace_callsites);
+ }
+
+ static int ftrace_module_notify_exit(struct notifier_block *self,
+@@ -3565,11 +3560,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
+ return 0;
+ }
+ #else
+-static int ftrace_module_notify_enter(struct notifier_block *self,
+- unsigned long val, void *data)
+-{
+- return 0;
+-}
+ static int ftrace_module_notify_exit(struct notifier_block *self,
+ unsigned long val, void *data)
+ {
+@@ -3577,11 +3567,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
+ }
+ #endif /* CONFIG_MODULES */
+
+-struct notifier_block ftrace_module_enter_nb = {
+- .notifier_call = ftrace_module_notify_enter,
+- .priority = INT_MAX, /* Run before anything that can use kprobes */
+-};
+-
+ struct notifier_block ftrace_module_exit_nb = {
+ .notifier_call = ftrace_module_notify_exit,
+ .priority = INT_MIN, /* Run after anything that can remove kprobes */
+@@ -3618,10 +3603,6 @@ void __init ftrace_init(void)
+ __start_mcount_loc,
+ __stop_mcount_loc);
+
+- ret = register_module_notifier(&ftrace_module_enter_nb);
+- if (ret)
+- pr_warning("Failed to register trace ftrace module enter notifier\n");
+-
+ ret = register_module_notifier(&ftrace_module_exit_nb);
+ if (ret)
+ pr_warning("Failed to register trace ftrace module exit notifier\n");
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 41b25a0..088fbc5 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -638,6 +638,9 @@ static int tracepoint_module_coming(struct module *mod)
+ struct tp_module *tp_mod, *iter;
+ int ret = 0;
+
++ if (!mod->num_tracepoints)
++ return 0;
++
+ /*
+ * We skip modules that taint the kernel, especially those with different
+ * module headers (for forced load), to make sure we don't cause a crash.
+@@ -681,6 +684,9 @@ static int tracepoint_module_going(struct module *mod)
+ {
+ struct tp_module *pos;
+
++ if (!mod->num_tracepoints)
++ return 0;
++
+ mutex_lock(&tracepoints_mutex);
+ tracepoint_update_probe_range(mod->tracepoints_ptrs,
+ mod->tracepoints_ptrs + mod->num_tracepoints);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 96c4bcf..51901b1 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1033,15 +1033,16 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
+ return 0;
+ } else if (PageHuge(hpage)) {
+ /*
+- * Check "just unpoisoned", "filter hit", and
+- * "race with other subpage."
++ * Check "filter hit" and "race with other subpage."
+ */
+ lock_page(hpage);
+- if (!PageHWPoison(hpage)
+- || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+- || (p != hpage && TestSetPageHWPoison(hpage))) {
+- atomic_long_sub(nr_pages, &mce_bad_pages);
+- return 0;
++ if (PageHWPoison(hpage)) {
++ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
++ || (p != hpage && TestSetPageHWPoison(hpage))) {
++ atomic_long_sub(nr_pages, &mce_bad_pages);
++ unlock_page(hpage);
++ return 0;
++ }
+ }
+ set_page_hwpoison_huge_page(hpage);
+ res = dequeue_hwpoisoned_huge_page(hpage);
+@@ -1093,6 +1094,8 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
+ */
+ if (!PageHWPoison(p)) {
+ printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
++ atomic_long_sub(nr_pages, &mce_bad_pages);
++ put_page(hpage);
+ res = 0;
+ goto out;
+ }
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index b5cd796..d2ac057 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -559,7 +559,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+ * => fast response on large errors; small oscillation near setpoint
+ */
+ setpoint = (freerun + limit) / 2;
+- x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
++ x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+ limit - setpoint + 1);
+ pos_ratio = x;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+@@ -625,7 +625,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+ x_intercept = bdi_setpoint + span;
+
+ if (bdi_dirty < x_intercept - span / 4) {
+- pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
++ pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
+ x_intercept - bdi_setpoint + 1);
+ } else
+ pos_ratio /= 4;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index aa12649..4d99d42 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -610,14 +610,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ struct hci_cp_auth_requested cp;
+
+- /* encrypt must be pending if auth is also pending */
+- set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+-
+ cp.handle = cpu_to_le16(conn->handle);
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
++
++ /* If we're already encrypted set the REAUTH_PEND flag,
++ * otherwise set the ENCRYPT_PEND.
++ */
+ if (conn->key_type != 0xff)
+ set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
++ else
++ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ }
+
+ return 0;
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index cbf9ccd..99a48a3 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -211,11 +211,26 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
++static int br_dev_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[])
++{
++ struct net_bridge *br = netdev_priv(dev);
++
++ if (tb[IFLA_ADDRESS]) {
++ spin_lock_bh(&br->lock);
++ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
++ spin_unlock_bh(&br->lock);
++ }
++
++ return register_netdevice(dev);
++}
++
+ struct rtnl_link_ops br_link_ops __read_mostly = {
+ .kind = "bridge",
+ .priv_size = sizeof(struct net_bridge),
+ .setup = br_dev_setup,
+ .validate = br_validate,
++ .newlink = br_dev_newlink,
+ .dellink = br_dev_delete,
+ };
+
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 5864cc4..45f93f8 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ if (repl->num_counters &&
+ copy_to_user(repl->counters, counterstmp,
+ repl->num_counters * sizeof(struct ebt_counter))) {
+- ret = -EFAULT;
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
+ }
+- else
+- ret = 0;
+
+ /* decrease module count and free resources */
+ EBT_ENTRY_ITERATE(table->entries, table->entries_size,
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index ad5b708..20ba2d5 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -284,6 +284,37 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
+ return r;
+ }
+
++static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
++ int offset, size_t size, bool more)
++{
++ int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
++ int ret;
++
++ ret = kernel_sendpage(sock, page, offset, size, flags);
++ if (ret == -EAGAIN)
++ ret = 0;
++
++ return ret;
++}
++
++static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
++ int offset, size_t size, bool more)
++{
++ int ret;
++ struct kvec iov;
++
++ /* sendpage cannot properly handle pages with page_count == 0,
++ * we need to fallback to sendmsg if that's the case */
++ if (page_count(page) >= 1)
++ return __ceph_tcp_sendpage(sock, page, offset, size, more);
++
++ iov.iov_base = kmap(page) + offset;
++ iov.iov_len = size;
++ ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
++ kunmap(page);
++
++ return ret;
++}
+
+ /*
+ * Shutdown/close the socket for the given connection.
+@@ -851,18 +882,14 @@ static int write_partial_msg_pages(struct ceph_connection *con)
+ cpu_to_le32(crc32c(tmpcrc, base, len));
+ con->out_msg_pos.did_page_crc = 1;
+ }
+- ret = kernel_sendpage(con->sock, page,
++ ret = ceph_tcp_sendpage(con->sock, page,
+ con->out_msg_pos.page_pos + page_shift,
+- len,
+- MSG_DONTWAIT | MSG_NOSIGNAL |
+- MSG_MORE);
++ len, 1);
+
+ if (crc &&
+ (msg->pages || msg->pagelist || msg->bio || in_trail))
+ kunmap(page);
+
+- if (ret == -EAGAIN)
+- ret = 0;
+ if (ret <= 0)
+ goto out;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7bcf37d..854da15 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3648,6 +3648,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+ skb->vlan_tci = 0;
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
++ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+
+ napi->skb = skb;
+ }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 5dea452..9c88080 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -320,6 +320,8 @@ load_b:
+
+ if (skb_is_nonlinear(skb))
+ return 0;
++ if (skb->len < sizeof(struct nlattr))
++ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+@@ -336,11 +338,13 @@ load_b:
+
+ if (skb_is_nonlinear(skb))
+ return 0;
++ if (skb->len < sizeof(struct nlattr))
++ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+ nla = (struct nlattr *)&skb->data[A];
+- if (nla->nla_len > A - skb->len)
++ if (nla->nla_len > skb->len - A)
+ return 0;
+
+ nla = nla_find_nested(nla, X);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5b7d5f2..7beaf10 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -744,7 +744,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
+ return 0;
+ }
+
+-static size_t rtnl_port_size(const struct net_device *dev)
++static size_t rtnl_port_size(const struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+ size_t port_size = nla_total_size(4) /* PORT_VF */
+ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
+@@ -760,7 +761,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
+ size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ + port_size;
+
+- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
++ !(ext_filter_mask & RTEXT_FILTER_VF))
+ return 0;
+ if (dev_num_vf(dev->dev.parent))
+ return port_self_size + vf_ports_size +
+@@ -791,7 +793,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ + nla_total_size(ext_filter_mask
+ & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+- + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
++ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+ }
+@@ -851,11 +853,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
+ return 0;
+ }
+
+-static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
++static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+ int err;
+
+- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
++ !(ext_filter_mask & RTEXT_FILTER_VF))
+ return 0;
+
+ err = rtnl_port_self_fill(skb, dev);
+@@ -1002,7 +1006,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ nla_nest_end(skb, vfinfo);
+ }
+
+- if (rtnl_port_fill(skb, dev))
++ if (rtnl_port_fill(skb, dev, ext_filter_mask))
+ goto nla_put_failure;
+
+ if (dev->rtnl_link_ops) {
+@@ -1057,6 +1061,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ struct hlist_node *node;
+ struct nlattr *tb[IFLA_MAX+1];
+ u32 ext_filter_mask = 0;
++ int err;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+@@ -1077,11 +1082,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+- if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+- NETLINK_CB(cb->skb).pid,
+- cb->nlh->nlmsg_seq, 0,
+- NLM_F_MULTI,
+- ext_filter_mask) <= 0)
++ err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
++ NETLINK_CB(cb->skb).pid,
++ cb->nlh->nlmsg_seq, 0,
++ NLM_F_MULTI,
++ ext_filter_mask);
++ /* If we ran out of room on the first message,
++ * we're in trouble
++ */
++ WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
++
++ if (err <= 0)
+ goto out;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8ac4a0f..9204d9b 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -743,7 +743,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
+ {
+ int headerlen = skb_headroom(skb);
+- unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
++ unsigned int size = skb_end_offset(skb) + skb->data_len;
+ struct sk_buff *n = alloc_skb(size, gfp_mask);
+
+ if (!n)
+@@ -843,7 +843,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
+ {
+ int i;
+ u8 *data;
+- int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
++ int size = nhead + skb_end_offset(skb) + ntail;
+ long off;
+ bool fastpath;
+
+@@ -2642,14 +2642,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
+ if (unlikely(!nskb))
+ goto err;
+
+- hsize = skb_end_pointer(nskb) - nskb->head;
++ hsize = skb_end_offset(nskb);
+ if (skb_cow_head(nskb, doffset + headroom)) {
+ kfree_skb(nskb);
+ goto err;
+ }
+
+- nskb->truesize += skb_end_pointer(nskb) - nskb->head -
+- hsize;
++ nskb->truesize += skb_end_offset(nskb) - hsize;
+ skb_release_head_state(nskb);
+ __skb_push(nskb, doffset);
+ } else {
+@@ -3197,12 +3196,14 @@ EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+ {
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+- unsigned int hdr_len;
+
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+- hdr_len = tcp_hdrlen(skb);
+- else
+- hdr_len = sizeof(struct udphdr);
+- return hdr_len + shinfo->gso_size;
++ return tcp_hdrlen(skb) + shinfo->gso_size;
++
++ /* UFO sets gso_size to the size of the fragmentation
++ * payload, i.e. the size of the L4 (UDP) header is already
++ * accounted for.
++ */
++ return shinfo->gso_size;
+ }
+ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index d01f9c6..76da979 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -752,13 +752,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
+ fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
+ if (fi == NULL)
+ goto failure;
++ fib_info_cnt++;
+ if (cfg->fc_mx) {
+ fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ if (!fi->fib_metrics)
+ goto failure;
+ } else
+ fi->fib_metrics = (u32 *) dst_default_metrics;
+- fib_info_cnt++;
+
+ fi->fib_net = hold_net(net);
+ fi->fib_protocol = cfg->fc_protocol;
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index e0d9f02..7593f3a 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -42,12 +42,12 @@
+ static bool ip_may_fragment(const struct sk_buff *skb)
+ {
+ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
+- !skb->local_df;
++ skb->local_df;
+ }
+
+ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+ {
+- if (skb->len <= mtu || skb->local_df)
++ if (skb->len <= mtu)
+ return false;
+
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index fd7a3f6..bcb6e61 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1039,8 +1039,10 @@ static int __do_replace(struct net *net, const char *name,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 24e556e..f98a1cf 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1227,8 +1227,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 00975b6..d495d4b 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -203,26 +203,33 @@ static int ping_init_sock(struct sock *sk)
+ struct net *net = sock_net(sk);
+ gid_t group = current_egid();
+ gid_t range[2];
+- struct group_info *group_info = get_current_groups();
+- int i, j, count = group_info->ngroups;
++ struct group_info *group_info;
++ int i, j, count;
++ int ret = 0;
+
+ inet_get_ping_group_range_net(net, range, range+1);
+ if (range[0] <= group && group <= range[1])
+ return 0;
+
++ group_info = get_current_groups();
++ count = group_info->ngroups;
+ for (i = 0; i < group_info->nblocks; i++) {
+ int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
+
+ for (j = 0; j < cp_count; j++) {
+ group = group_info->blocks[i][j];
+ if (range[0] <= group && group <= range[1])
+- return 0;
++ goto out_release_group;
+ }
+
+ count -= cp_count;
+ }
+
+- return -EACCES;
++ ret = -EACCES;
++
++out_release_group:
++ put_group_info(group_info);
++ return ret;
+ }
+
+ static void ping_close(struct sock *sk, long timeout)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6768ce2..6526110 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2142,7 +2142,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ struct in_device *out_dev;
+ unsigned int flags = 0;
+ __be32 spec_dst;
+- u32 itag;
++ u32 itag = 0;
+
+ /* get a working reference to the output device */
+ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index b78eac2..ed3d6d4 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -406,7 +406,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+ ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
+ ratio += cnt;
+
+- ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
++ ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
+ }
+
+ /* Some calls are for duplicates without timetamps */
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 94874b0..2e752b2 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1249,8 +1249,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 39e11f9..782f67a 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1056,7 +1056,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu)
+- return mtu;
++ goto out;
+
+ mtu = IPV6_MIN_MTU;
+
+@@ -1066,7 +1066,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
+ mtu = idev->cnf.mtu6;
+ rcu_read_unlock();
+
+- return mtu;
++out:
++ return min_t(unsigned int, mtu, IP6_MAX_MTU);
+ }
+
+ static struct dst_entry *icmp6_dst_gc_list;
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 969cd3e..e0f0934 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -772,9 +772,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ session->deref = pppol2tp_session_sock_put;
+
+ /* If PMTU discovery was enabled, use the MTU that was discovered */
+- dst = sk_dst_get(sk);
++ dst = sk_dst_get(tunnel->sock);
+ if (dst != NULL) {
+- u32 pmtu = dst_mtu(__sk_dst_get(sk));
++ u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
+ if (pmtu != 0)
+ session->mtu = session->mru = pmtu -
+ PPPOL2TP_HEADER_OVERHEAD;
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index e051398..d067ed1 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -201,13 +201,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
+ out:
+ if (err) {
+ m->tcf_qstats.overlimits++;
+- /* should we be asking for packet to be dropped?
+- * may make sense for redirect case only
+- */
+- retval = TC_ACT_SHOT;
+- } else {
++ if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
++ retval = TC_ACT_SHOT;
++ else
++ retval = m->tcf_action;
++ } else
+ retval = m->tcf_action;
+- }
+ spin_unlock(&m->tcf_lock);
+
+ return retval;
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 6f6ad86..de35e01 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -528,8 +528,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ continue;
+ if ((laddr->state == SCTP_ADDR_SRC) &&
+ (AF_INET == laddr->a.sa.sa_family)) {
+- fl4->saddr = laddr->a.v4.sin_addr.s_addr;
+ fl4->fl4_sport = laddr->a.v4.sin_port;
++ flowi4_update_output(fl4,
++ asoc->base.sk->sk_bound_dev_if,
++ RT_CONN_FLAGS(asoc->base.sk),
++ daddr->v4.sin_addr.s_addr,
++ laddr->a.v4.sin_addr.s_addr);
++
+ rt = ip_route_output_key(&init_net, fl4);
+ if (!IS_ERR(rt)) {
+ dst = &rt->dst;
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 619228d..dc5748f 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -569,12 +569,16 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
+ if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
+ strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
+ strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
+- strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0)
++ strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
++ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
++ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
+ return 1;
+ if (info->hdr->e_machine == EM_PPC64)
+ /* Special register function linked on all modules during final link of .ko */
+ if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
+- strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0)
++ strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
++ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
++ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
+ return 1;
+ /* Do not ignore this symbol */
+ return 0;
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 74268b4..bdd2c0d 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -75,7 +75,6 @@ static void async_pf_execute(struct work_struct *work)
+ spin_lock(&vcpu->async_pf.lock);
+ list_add_tail(&apf->link, &vcpu->async_pf.done);
+ apf->page = page;
+- apf->done = true;
+ spin_unlock(&vcpu->async_pf.lock);
+
+ /*
+@@ -88,7 +87,7 @@ static void async_pf_execute(struct work_struct *work)
+ if (waitqueue_active(&vcpu->wq))
+ wake_up_interruptible(&vcpu->wq);
+
+- mmdrop(mm);
++ mmput(mm);
+ kvm_put_kvm(vcpu->kvm);
+ }
+
+@@ -99,10 +98,12 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+ struct kvm_async_pf *work =
+ list_entry(vcpu->async_pf.queue.next,
+ typeof(*work), queue);
+- cancel_work_sync(&work->work);
+ list_del(&work->queue);
+- if (!work->done) /* work was canceled */
++ if (cancel_work_sync(&work->work)) {
++ mmput(work->mm);
++ kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
+ kmem_cache_free(async_pf_cache, work);
++ }
+ }
+
+ spin_lock(&vcpu->async_pf.lock);
+@@ -163,13 +164,12 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ return 0;
+
+ work->page = NULL;
+- work->done = false;
+ work->vcpu = vcpu;
+ work->gva = gva;
+ work->addr = gfn_to_hva(vcpu->kvm, gfn);
+ work->arch = *arch;
+ work->mm = current->mm;
+- atomic_inc(&work->mm->mm_count);
++ atomic_inc(&work->mm->mm_users);
+ kvm_get_kvm(work->vcpu->kvm);
+
+ /* this can't really happen otherwise gfn_to_pfn_async
+@@ -187,7 +187,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ return 1;
+ retry_sync:
+ kvm_put_kvm(work->vcpu->kvm);
+- mmdrop(work->mm);
++ mmput(work->mm);
+ kmem_cache_free(async_pf_cache, work);
+ return 0;
+ }