summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorMax Krummenacher <max.krummenacher@toradex.com>2020-10-10 13:30:49 +0200
committerMax Krummenacher <max.krummenacher@toradex.com>2020-10-10 13:30:49 +0200
commit566575659eec6b0a72de46dd1c3ae8fc221a90a5 (patch)
treefed807b5725b3592752bb19005c9eea9dbcd387d /include/linux
parentf1442a59da02a0b5ef648925f2f274a3e64999cc (diff)
parent18f617d6f398c264e3172532a5d3c656f17cecfa (diff)
Merge tag 'v4.4.238' into toradex_vf_4.4-next
This is the 4.4.238 stable release Signed-off-by: Max Krummenacher <max.krummenacher@toradex.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/libata.h13
-rw-r--r--include/linux/mtd/map.h2
-rw-r--r--include/linux/seqlock.h11
-rw-r--r--include/linux/skbuff.h16
4 files changed, 31 insertions, 11 deletions
diff --git a/include/linux/libata.h b/include/linux/libata.h
index af561d33221d..ec49344f7555 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -500,6 +500,7 @@ enum hsm_task_states {
};
enum ata_completion_errors {
+ AC_ERR_OK = 0, /* no error */
AC_ERR_DEV = (1 << 0), /* device reported error */
AC_ERR_HSM = (1 << 1), /* host state machine violation */
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
@@ -896,9 +897,9 @@ struct ata_port_operations {
/*
* Command execution
*/
- int (*qc_defer)(struct ata_queued_cmd *qc);
- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
- void (*qc_prep)(struct ata_queued_cmd *qc);
+ int (*qc_defer)(struct ata_queued_cmd *qc);
+ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
@@ -1190,7 +1191,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
extern const char *ata_mode_string(unsigned long xfer_mask);
extern unsigned long ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1881,9 +1882,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 676d3d2a1a0a..d8bae7cb86f3 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -307,7 +307,7 @@ void map_destroy(struct mtd_info *mtd);
({ \
int i, ret = 1; \
for (i = 0; i < map_words(map); i++) { \
- if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
+ if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
ret = 0; \
break; \
} \
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e0582106ef4f..a10f36378417 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -242,6 +242,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
+ * Note that, writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes.
+ *
* seqcount_t seq;
* bool X = true, Y = false;
*
@@ -261,11 +268,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 735ff1525f48..95feb153fe9a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1439,6 +1439,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
}
/**
+ * skb_queue_len_lockless - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an &sk_buff queue.
+ * This variant can be used in lockless contexts.
+ */
+static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
+{
+ return READ_ONCE(list_->qlen);
+}
+
+/**
* __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
* @list: queue to initialize
*
@@ -1641,7 +1653,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;
- list->qlen--;
+ WRITE_ONCE(list->qlen, list->qlen - 1);
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
@@ -2651,7 +2663,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;