mirror of
https://github.com/MiSTer-devel/Linux-Kernel_MiSTer.git
synced 2026-04-19 03:04:24 +00:00
v5.15.1
This commit is contained in:
@@ -249,7 +249,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
|
||||
/* the following numa functions are architecture-dependent */
|
||||
void acpi_numa_slit_init (struct acpi_table_slit *slit);
|
||||
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
|
||||
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
|
||||
#else
|
||||
static inline void
|
||||
@@ -1380,13 +1380,11 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
|
||||
extern void acpi_device_notify(struct device *dev);
|
||||
extern void acpi_device_notify_remove(struct device *dev);
|
||||
#else
|
||||
static inline int
|
||||
acpi_platform_notify(struct device *dev, enum kobject_action action)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void acpi_device_notify(struct device *dev) { }
|
||||
static inline void acpi_device_notify_remove(struct device *dev) { }
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_ACPI_H*/
|
||||
|
||||
@@ -321,10 +321,20 @@ asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
|
||||
* from register 0 to 3 on return from the SMC instruction. An optional
|
||||
* quirk structure provides vendor specific behavior.
|
||||
*/
|
||||
#ifdef CONFIG_HAVE_ARM_SMCCC
|
||||
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3, unsigned long a4,
|
||||
unsigned long a5, unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
|
||||
#else
|
||||
static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3, unsigned long a4,
|
||||
unsigned long a5, unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
|
||||
{
|
||||
*res = (struct arm_smccc_res){};
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __arm_smccc_hvc() - make HVC calls
|
||||
|
||||
@@ -77,9 +77,8 @@
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#include <linux/atomic-arch-fallback.h>
|
||||
#include <asm-generic/atomic-instrumented.h>
|
||||
|
||||
#include <asm-generic/atomic-long.h>
|
||||
#include <linux/atomic/atomic-arch-fallback.h>
|
||||
#include <linux/atomic/atomic-long.h>
|
||||
#include <linux/atomic/atomic-instrumented.h>
|
||||
|
||||
#endif /* _LINUX_ATOMIC_H */
|
||||
|
||||
1915
include/linux/atomic/atomic-instrumented.h
Normal file
1915
include/linux/atomic/atomic-instrumented.h
Normal file
File diff suppressed because it is too large
Load Diff
1014
include/linux/atomic/atomic-long.h
Normal file
1014
include/linux/atomic/atomic-long.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -116,6 +116,7 @@ struct bdi_writeback {
|
||||
struct list_head b_dirty_time; /* time stamps are dirty */
|
||||
spinlock_t list_lock; /* protects the b_* lists */
|
||||
|
||||
atomic_t writeback_inodes; /* number of inodes under writeback */
|
||||
struct percpu_counter stat[NR_WB_STAT_ITEMS];
|
||||
|
||||
unsigned long congested; /* WB_[a]sync_congested flags */
|
||||
@@ -142,6 +143,7 @@ struct bdi_writeback {
|
||||
spinlock_t work_lock; /* protects work_list & dwork scheduling */
|
||||
struct list_head work_list;
|
||||
struct delayed_work dwork; /* work item used for writeback */
|
||||
struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
|
||||
|
||||
unsigned long dirty_sleep; /* last wait */
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
|
||||
sb = inode->i_sb;
|
||||
#ifdef CONFIG_BLOCK
|
||||
if (sb_is_blkdev_sb(sb))
|
||||
return I_BDEV(inode)->bd_bdi;
|
||||
return I_BDEV(inode)->bd_disk->bdi;
|
||||
#endif
|
||||
return sb->s_bdi;
|
||||
}
|
||||
@@ -288,6 +288,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
|
||||
return inode->i_wb;
|
||||
}
|
||||
|
||||
static inline struct bdi_writeback *inode_to_wb_wbc(
|
||||
struct inode *inode,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
/*
|
||||
* If wbc does not have inode attached, it means cgroup writeback was
|
||||
* disabled when wbc started. Just use the default wb in that case.
|
||||
*/
|
||||
return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
|
||||
}
|
||||
|
||||
/**
|
||||
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
|
||||
* @inode: target inode
|
||||
@@ -366,6 +377,14 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
|
||||
return &inode_to_bdi(inode)->wb;
|
||||
}
|
||||
|
||||
static inline struct bdi_writeback *inode_to_wb_wbc(
|
||||
struct inode *inode,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
return inode_to_wb(inode);
|
||||
}
|
||||
|
||||
|
||||
static inline struct bdi_writeback *
|
||||
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
|
||||
{
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
#ifndef __LINUX_BIO_H
|
||||
#define __LINUX_BIO_H
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/ioprio.h>
|
||||
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
|
||||
@@ -375,7 +374,7 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip,
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
extern void bio_trim(struct bio *bio, int offset, int size);
|
||||
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
|
||||
extern struct bio *bio_split(struct bio *bio, int sectors,
|
||||
gfp_t gfp, struct bio_set *bs);
|
||||
|
||||
@@ -401,6 +400,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
|
||||
enum {
|
||||
BIOSET_NEED_BVECS = BIT(0),
|
||||
BIOSET_NEED_RESCUER = BIT(1),
|
||||
BIOSET_PERCPU_CACHE = BIT(2),
|
||||
};
|
||||
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
|
||||
extern void bioset_exit(struct bio_set *);
|
||||
@@ -409,6 +409,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
|
||||
|
||||
struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
|
||||
struct bio_set *bs);
|
||||
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
|
||||
struct bio_set *bs);
|
||||
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
|
||||
extern void bio_put(struct bio *);
|
||||
|
||||
@@ -519,47 +521,6 @@ static inline void bio_clone_blkg_association(struct bio *dst,
|
||||
struct bio *src) { }
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* remember never ever reenable interrupts between a bvec_kmap_irq and
|
||||
* bvec_kunmap_irq!
|
||||
*/
|
||||
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
/*
|
||||
* might not be a highmem page, but the preempt/irq count
|
||||
* balancing is a lot nicer this way
|
||||
*/
|
||||
local_irq_save(*flags);
|
||||
addr = (unsigned long) kmap_atomic(bvec->bv_page);
|
||||
|
||||
BUG_ON(addr & ~PAGE_MASK);
|
||||
|
||||
return (char *) addr + bvec->bv_offset;
|
||||
}
|
||||
|
||||
static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
|
||||
{
|
||||
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
|
||||
|
||||
kunmap_atomic((void *) ptr);
|
||||
local_irq_restore(*flags);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
|
||||
{
|
||||
return page_address(bvec->bv_page) + bvec->bv_offset;
|
||||
}
|
||||
|
||||
static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
|
||||
{
|
||||
*flags = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
|
||||
*
|
||||
@@ -699,6 +660,11 @@ struct bio_set {
|
||||
struct kmem_cache *bio_slab;
|
||||
unsigned int front_pad;
|
||||
|
||||
/*
|
||||
* per-cpu bio alloc cache
|
||||
*/
|
||||
struct bio_alloc_cache __percpu *cache;
|
||||
|
||||
mempool_t bio_pool;
|
||||
mempool_t bvec_pool;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
@@ -715,6 +681,11 @@ struct bio_set {
|
||||
struct bio_list rescue_list;
|
||||
struct work_struct rescue_work;
|
||||
struct workqueue_struct *rescue_workqueue;
|
||||
|
||||
/*
|
||||
* Hot un-plug notifier for the per-cpu cache, if used
|
||||
*/
|
||||
struct hlist_node cpuhp_dead;
|
||||
};
|
||||
|
||||
static inline bool bioset_initialized(struct bio_set *bs)
|
||||
|
||||
@@ -227,6 +227,12 @@ unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, un
|
||||
int bitmap_print_to_pagebuf(bool list, char *buf,
|
||||
const unsigned long *maskp, int nmaskbits);
|
||||
|
||||
extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
|
||||
int nmaskbits, loff_t off, size_t count);
|
||||
|
||||
extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
|
||||
int nmaskbits, loff_t off, size_t count);
|
||||
|
||||
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
|
||||
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/typecheck.h>
|
||||
|
||||
#include <uapi/linux/kernel.h>
|
||||
|
||||
@@ -253,6 +254,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
|
||||
__clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ptr_set_bit - Set bit in a pointer's value
|
||||
* @nr: the bit to set
|
||||
* @addr: the address of the pointer variable
|
||||
*
|
||||
* Example:
|
||||
* void *p = foo();
|
||||
* __ptr_set_bit(bit, &p);
|
||||
*/
|
||||
#define __ptr_set_bit(nr, addr) \
|
||||
({ \
|
||||
typecheck_pointer(*(addr)); \
|
||||
__set_bit(nr, (unsigned long *)(addr)); \
|
||||
})
|
||||
|
||||
/**
|
||||
* __ptr_clear_bit - Clear bit in a pointer's value
|
||||
* @nr: the bit to clear
|
||||
* @addr: the address of the pointer variable
|
||||
*
|
||||
* Example:
|
||||
* void *p = foo();
|
||||
* __ptr_clear_bit(bit, &p);
|
||||
*/
|
||||
#define __ptr_clear_bit(nr, addr) \
|
||||
({ \
|
||||
typecheck_pointer(*(addr)); \
|
||||
__clear_bit(nr, (unsigned long *)(addr)); \
|
||||
})
|
||||
|
||||
/**
|
||||
* __ptr_test_bit - Test bit in a pointer's value
|
||||
* @nr: the bit to test
|
||||
* @addr: the address of the pointer variable
|
||||
*
|
||||
* Example:
|
||||
* void *p = foo();
|
||||
* if (__ptr_test_bit(bit, &p)) {
|
||||
* ...
|
||||
* } else {
|
||||
* ...
|
||||
* }
|
||||
*/
|
||||
#define __ptr_test_bit(nr, addr) \
|
||||
({ \
|
||||
typecheck_pointer(*(addr)); \
|
||||
test_bit(nr, (unsigned long *)(addr)); \
|
||||
})
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef set_mask_bits
|
||||
|
||||
@@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
|
||||
typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
|
||||
size_t size);
|
||||
typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
|
||||
struct seq_file *s);
|
||||
|
||||
struct blkcg_policy {
|
||||
int plid;
|
||||
|
||||
@@ -404,7 +404,13 @@ enum {
|
||||
BLK_MQ_F_STACKING = 1 << 2,
|
||||
BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
|
||||
BLK_MQ_F_BLOCKING = 1 << 5,
|
||||
/* Do not allow an I/O scheduler to be configured. */
|
||||
BLK_MQ_F_NO_SCHED = 1 << 6,
|
||||
/*
|
||||
* Select 'none' during queue registration in case of a single hwq
|
||||
* or shared hwqs instead of 'mq-deadline'.
|
||||
*/
|
||||
BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
|
||||
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
|
||||
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
|
||||
|
||||
@@ -426,18 +432,14 @@ enum {
|
||||
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
|
||||
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
|
||||
|
||||
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
|
||||
struct lock_class_key *lkclass);
|
||||
#define blk_mq_alloc_disk(set, queuedata) \
|
||||
({ \
|
||||
static struct lock_class_key __key; \
|
||||
struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata); \
|
||||
\
|
||||
if (!IS_ERR(__disk)) \
|
||||
lockdep_init_map(&__disk->lockdep_map, \
|
||||
"(bio completion)", &__key, 0); \
|
||||
__disk; \
|
||||
__blk_mq_alloc_disk(set, queuedata, &__key); \
|
||||
})
|
||||
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
|
||||
void *queuedata);
|
||||
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
|
||||
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
struct request_queue *q);
|
||||
|
||||
@@ -34,14 +34,10 @@ struct block_device {
|
||||
void * bd_holder;
|
||||
int bd_holders;
|
||||
bool bd_write_holder;
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct list_head bd_holder_disks;
|
||||
#endif
|
||||
struct kobject *bd_holder_dir;
|
||||
u8 bd_partno;
|
||||
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
|
||||
struct gendisk * bd_disk;
|
||||
struct backing_dev_info *bd_bdi;
|
||||
|
||||
/* The counter of freeze processes */
|
||||
int bd_fsfreeze_count;
|
||||
@@ -281,6 +277,7 @@ struct bio {
|
||||
};
|
||||
|
||||
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
|
||||
#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
|
||||
|
||||
/*
|
||||
* bio flags
|
||||
@@ -301,6 +298,7 @@ enum {
|
||||
BIO_TRACKED, /* set if bio goes through the rq_qos path */
|
||||
BIO_REMAPPED,
|
||||
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
|
||||
BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
|
||||
BIO_FLAG_LAST
|
||||
};
|
||||
|
||||
|
||||
@@ -11,14 +11,12 @@
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/backing-dev-defs.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bsg.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
@@ -28,14 +26,11 @@
|
||||
#include <linux/sbitmap.h>
|
||||
|
||||
struct module;
|
||||
struct scsi_ioctl_command;
|
||||
|
||||
struct request_queue;
|
||||
struct elevator_queue;
|
||||
struct blk_trace;
|
||||
struct request;
|
||||
struct sg_io_hdr;
|
||||
struct bsg_job;
|
||||
struct blkcg_gq;
|
||||
struct blk_flush_queue;
|
||||
struct pr_ops;
|
||||
@@ -275,9 +270,6 @@ enum blk_queue_state {
|
||||
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
|
||||
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
|
||||
|
||||
#define BLK_SCSI_MAX_CMDS (256)
|
||||
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
||||
|
||||
/*
|
||||
* Zoned block device models (zoned limit).
|
||||
*
|
||||
@@ -398,8 +390,6 @@ struct request_queue {
|
||||
struct blk_mq_hw_ctx **queue_hw_ctx;
|
||||
unsigned int nr_hw_queues;
|
||||
|
||||
struct backing_dev_info *backing_dev_info;
|
||||
|
||||
/*
|
||||
* The queue owner gets to use this for whatever they like.
|
||||
* ll_rw_blk doesn't touch it.
|
||||
@@ -424,6 +414,8 @@ struct request_queue {
|
||||
|
||||
spinlock_t queue_lock;
|
||||
|
||||
struct gendisk *disk;
|
||||
|
||||
/*
|
||||
* queue kobject
|
||||
*/
|
||||
@@ -506,11 +498,6 @@ struct request_queue {
|
||||
unsigned int max_active_zones;
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
/*
|
||||
* sg stuff
|
||||
*/
|
||||
unsigned int sg_timeout;
|
||||
unsigned int sg_reserved_size;
|
||||
int node;
|
||||
struct mutex debugfs_mutex;
|
||||
#ifdef CONFIG_BLK_DEV_IO_TRACE
|
||||
@@ -537,10 +524,6 @@ struct request_queue {
|
||||
|
||||
int mq_freeze_depth;
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
struct bsg_class_device bsg_dev;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING
|
||||
/* Throttle data */
|
||||
struct throtl_data *td;
|
||||
@@ -664,8 +647,6 @@ extern void blk_clear_pm_only(struct request_queue *q);
|
||||
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
|
||||
(dir), (attrs))
|
||||
|
||||
#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
|
||||
|
||||
static inline bool queue_is_mq(struct request_queue *q)
|
||||
{
|
||||
return q->mq_ops;
|
||||
@@ -888,16 +869,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
|
||||
struct request *rq);
|
||||
int blk_rq_append_bio(struct request *rq, struct bio *bio);
|
||||
extern void blk_queue_split(struct bio **);
|
||||
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
|
||||
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
|
||||
unsigned int, void __user *);
|
||||
extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
unsigned int, void __user *);
|
||||
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
struct scsi_ioctl_command __user *);
|
||||
extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
|
||||
extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
|
||||
|
||||
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
|
||||
extern void blk_queue_exit(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
@@ -941,6 +912,10 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
||||
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
|
||||
#endif
|
||||
|
||||
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
|
||||
#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
|
||||
#define SECTOR_MASK (PAGE_SECTORS - 1)
|
||||
|
||||
/*
|
||||
* blk_rq_pos() : the current sector
|
||||
* blk_rq_bytes() : bytes left in the entire request
|
||||
@@ -1139,7 +1114,7 @@ void blk_queue_zone_write_granularity(struct request_queue *q,
|
||||
unsigned int size);
|
||||
extern void blk_queue_alignment_offset(struct request_queue *q,
|
||||
unsigned int alignment);
|
||||
void blk_queue_update_readahead(struct request_queue *q);
|
||||
void disk_update_readahead(struct gendisk *disk);
|
||||
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
||||
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
||||
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
|
||||
@@ -1346,8 +1321,6 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
|
||||
gfp_mask, 0);
|
||||
}
|
||||
|
||||
extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
|
||||
|
||||
static inline bool bdev_is_partition(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_partno;
|
||||
@@ -1376,6 +1349,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q)
|
||||
return q->limits.max_sectors;
|
||||
}
|
||||
|
||||
static inline unsigned int queue_max_bytes(struct request_queue *q)
|
||||
{
|
||||
return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
|
||||
}
|
||||
|
||||
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
|
||||
{
|
||||
return q->limits.max_hw_sectors;
|
||||
@@ -1871,6 +1849,13 @@ struct block_device_operations {
|
||||
char *(*devnode)(struct gendisk *disk, umode_t *mode);
|
||||
struct module *owner;
|
||||
const struct pr_ops *pr_ops;
|
||||
|
||||
/*
|
||||
* Special callback for probing GPT entry at a given sector.
|
||||
* Needed by Android devices, used by GPT scanner and MMC blk
|
||||
* driver.
|
||||
*/
|
||||
int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@@ -2000,8 +1985,6 @@ void blkdev_put_no_open(struct block_device *bdev);
|
||||
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
|
||||
void bdev_add(struct block_device *bdev, dev_t dev);
|
||||
struct block_device *I_BDEV(struct inode *inode);
|
||||
struct block_device *bdgrab(struct block_device *bdev);
|
||||
void bdput(struct block_device *);
|
||||
int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
|
||||
loff_t lend);
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
|
||||
}
|
||||
|
||||
/* Tree-based key-value access APIs */
|
||||
struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
|
||||
struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent,
|
||||
const char *key);
|
||||
|
||||
const char * __init xbc_node_find_value(struct xbc_node *parent,
|
||||
@@ -148,7 +148,7 @@ xbc_find_value(const char *key, struct xbc_node **vnode)
|
||||
*/
|
||||
static inline struct xbc_node * __init xbc_find_node(const char *key)
|
||||
{
|
||||
return xbc_node_find_child(NULL, key);
|
||||
return xbc_node_find_subkey(NULL, key);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -23,22 +23,73 @@ struct ctl_table_header;
|
||||
struct task_struct;
|
||||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
|
||||
extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
|
||||
#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
|
||||
|
||||
#define BPF_CGROUP_STORAGE_NEST_MAX 8
|
||||
|
||||
struct bpf_cgroup_storage_info {
|
||||
struct task_struct *task;
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
||||
enum cgroup_bpf_attach_type {
|
||||
CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
|
||||
CGROUP_INET_INGRESS = 0,
|
||||
CGROUP_INET_EGRESS,
|
||||
CGROUP_INET_SOCK_CREATE,
|
||||
CGROUP_SOCK_OPS,
|
||||
CGROUP_DEVICE,
|
||||
CGROUP_INET4_BIND,
|
||||
CGROUP_INET6_BIND,
|
||||
CGROUP_INET4_CONNECT,
|
||||
CGROUP_INET6_CONNECT,
|
||||
CGROUP_INET4_POST_BIND,
|
||||
CGROUP_INET6_POST_BIND,
|
||||
CGROUP_UDP4_SENDMSG,
|
||||
CGROUP_UDP6_SENDMSG,
|
||||
CGROUP_SYSCTL,
|
||||
CGROUP_UDP4_RECVMSG,
|
||||
CGROUP_UDP6_RECVMSG,
|
||||
CGROUP_GETSOCKOPT,
|
||||
CGROUP_SETSOCKOPT,
|
||||
CGROUP_INET4_GETPEERNAME,
|
||||
CGROUP_INET6_GETPEERNAME,
|
||||
CGROUP_INET4_GETSOCKNAME,
|
||||
CGROUP_INET6_GETSOCKNAME,
|
||||
CGROUP_INET_SOCK_RELEASE,
|
||||
MAX_CGROUP_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
|
||||
* to use bpf cgroup storage simultaneously.
|
||||
*/
|
||||
DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
|
||||
bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
|
||||
#define CGROUP_ATYPE(type) \
|
||||
case BPF_##type: return type
|
||||
|
||||
static inline enum cgroup_bpf_attach_type
|
||||
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
|
||||
{
|
||||
switch (attach_type) {
|
||||
CGROUP_ATYPE(CGROUP_INET_INGRESS);
|
||||
CGROUP_ATYPE(CGROUP_INET_EGRESS);
|
||||
CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
|
||||
CGROUP_ATYPE(CGROUP_SOCK_OPS);
|
||||
CGROUP_ATYPE(CGROUP_DEVICE);
|
||||
CGROUP_ATYPE(CGROUP_INET4_BIND);
|
||||
CGROUP_ATYPE(CGROUP_INET6_BIND);
|
||||
CGROUP_ATYPE(CGROUP_INET4_CONNECT);
|
||||
CGROUP_ATYPE(CGROUP_INET6_CONNECT);
|
||||
CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
|
||||
CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
|
||||
CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
|
||||
CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
|
||||
CGROUP_ATYPE(CGROUP_SYSCTL);
|
||||
CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
|
||||
CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
|
||||
CGROUP_ATYPE(CGROUP_GETSOCKOPT);
|
||||
CGROUP_ATYPE(CGROUP_SETSOCKOPT);
|
||||
CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
|
||||
CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
|
||||
CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
|
||||
CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
|
||||
CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
|
||||
default:
|
||||
return CGROUP_BPF_ATTACH_TYPE_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
#undef CGROUP_ATYPE
|
||||
|
||||
extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
|
||||
|
||||
#define for_each_cgroup_storage_type(stype) \
|
||||
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
|
||||
@@ -80,15 +131,15 @@ struct bpf_prog_array;
|
||||
|
||||
struct cgroup_bpf {
|
||||
/* array of effective progs in this cgroup */
|
||||
struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
|
||||
struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
|
||||
/* attached progs to this cgroup and attach flags
|
||||
* when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
|
||||
* have either zero or one element
|
||||
* when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
|
||||
*/
|
||||
struct list_head progs[MAX_BPF_ATTACH_TYPE];
|
||||
u32 flags[MAX_BPF_ATTACH_TYPE];
|
||||
struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
|
||||
/* list of cgroup shared storages */
|
||||
struct list_head storages;
|
||||
@@ -128,28 +179,28 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
|
||||
int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
enum bpf_attach_type type);
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_sk(struct sock *sk,
|
||||
enum bpf_attach_type type);
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
||||
struct sockaddr *uaddr,
|
||||
enum bpf_attach_type type,
|
||||
enum cgroup_bpf_attach_type atype,
|
||||
void *t_ctx,
|
||||
u32 *flags);
|
||||
|
||||
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
||||
struct bpf_sock_ops_kern *sock_ops,
|
||||
enum bpf_attach_type type);
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
short access, enum bpf_attach_type type);
|
||||
short access, enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
||||
struct ctl_table *table, int write,
|
||||
char **buf, size_t *pcount, loff_t *ppos,
|
||||
enum bpf_attach_type type);
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
|
||||
int *optname, char __user *optval,
|
||||
@@ -172,44 +223,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
|
||||
return BPF_CGROUP_STORAGE_SHARED;
|
||||
}
|
||||
|
||||
static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
|
||||
*storage[MAX_BPF_CGROUP_STORAGE_TYPE])
|
||||
{
|
||||
enum bpf_cgroup_storage_type stype;
|
||||
int i, err = 0;
|
||||
|
||||
preempt_disable();
|
||||
for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
|
||||
if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
|
||||
continue;
|
||||
|
||||
this_cpu_write(bpf_cgroup_storage_info[i].task, current);
|
||||
for_each_cgroup_storage_type(stype)
|
||||
this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
|
||||
storage[stype]);
|
||||
goto out;
|
||||
}
|
||||
err = -EBUSY;
|
||||
WARN_ON_ONCE(1);
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void bpf_cgroup_storage_unset(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
|
||||
if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
|
||||
continue;
|
||||
|
||||
this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
struct bpf_cgroup_storage *
|
||||
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
|
||||
void *key, bool locked);
|
||||
@@ -230,9 +243,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
|
||||
BPF_CGROUP_INET_INGRESS); \
|
||||
CGROUP_INET_INGRESS); \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
@@ -240,54 +253,54 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
|
||||
typeof(sk) __sk = sk_to_full_sk(sk); \
|
||||
if (sk_fullsock(__sk)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
|
||||
BPF_CGROUP_INET_EGRESS); \
|
||||
CGROUP_INET_EGRESS); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
|
||||
#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(type)) { \
|
||||
__ret = __cgroup_bpf_run_filter_sk(sk, type); \
|
||||
if (cgroup_bpf_enabled(atype)) { \
|
||||
__ret = __cgroup_bpf_run_filter_sk(sk, atype); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
|
||||
|
||||
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
|
||||
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
|
||||
({ \
|
||||
u32 __unused_flags; \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(type)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
||||
if (cgroup_bpf_enabled(atype)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
|
||||
NULL, \
|
||||
&__unused_flags); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
|
||||
({ \
|
||||
u32 __unused_flags; \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(type)) { \
|
||||
if (cgroup_bpf_enabled(atype)) { \
|
||||
lock_sock(sk); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
|
||||
t_ctx, \
|
||||
&__unused_flags); \
|
||||
release_sock(sk); \
|
||||
@@ -300,13 +313,13 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
* (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
|
||||
* should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
|
||||
*/
|
||||
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \
|
||||
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
|
||||
({ \
|
||||
u32 __flags = 0; \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(type)) { \
|
||||
if (cgroup_bpf_enabled(atype)) { \
|
||||
lock_sock(sk); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
|
||||
NULL, &__flags); \
|
||||
release_sock(sk); \
|
||||
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
|
||||
@@ -316,33 +329,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
|
||||
((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
|
||||
cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
|
||||
((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
|
||||
cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
|
||||
(sk)->sk_prot->pre_connect)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
|
||||
|
||||
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
|
||||
* fullsock and its parent fullsock cannot be traced by
|
||||
@@ -362,33 +375,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
|
||||
sock_ops, \
|
||||
BPF_CGROUP_SOCK_OPS); \
|
||||
CGROUP_SOCK_OPS); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
|
||||
if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
|
||||
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
|
||||
if (__sk && sk_fullsock(__sk)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
|
||||
sock_ops, \
|
||||
BPF_CGROUP_SOCK_OPS); \
|
||||
CGROUP_SOCK_OPS); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
|
||||
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
|
||||
if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
|
||||
__ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
|
||||
access, \
|
||||
BPF_CGROUP_DEVICE); \
|
||||
CGROUP_DEVICE); \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
@@ -397,10 +410,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
|
||||
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
|
||||
buf, count, pos, \
|
||||
BPF_CGROUP_SYSCTL); \
|
||||
CGROUP_SYSCTL); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
@@ -408,7 +421,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
kernel_optval) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \
|
||||
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
|
||||
optname, optval, \
|
||||
optlen, \
|
||||
@@ -419,7 +432,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
|
||||
get_user(__ret, optlen); \
|
||||
__ret; \
|
||||
})
|
||||
@@ -428,7 +441,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
max_optlen, retval) \
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
|
||||
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
|
||||
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
|
||||
tcp_bpf_bypass_getsockopt, \
|
||||
@@ -443,7 +456,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
optlen, retval) \
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
|
||||
__ret = __cgroup_bpf_run_filter_getsockopt_kern( \
|
||||
sock, level, optname, optval, optlen, retval); \
|
||||
__ret; \
|
||||
@@ -487,9 +500,6 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int bpf_cgroup_storage_set(
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
|
||||
static inline void bpf_cgroup_storage_unset(void) {}
|
||||
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
|
||||
struct bpf_map *map) { return 0; }
|
||||
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
|
||||
@@ -505,14 +515,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define cgroup_bpf_enabled(type) (0)
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
|
||||
#define cgroup_bpf_enabled(atype) (0)
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
|
||||
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
|
||||
@@ -524,7 +534,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
|
||||
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
|
||||
|
||||
@@ -168,6 +168,7 @@ struct bpf_map {
|
||||
u32 max_entries;
|
||||
u32 map_flags;
|
||||
int spin_lock_off; /* >=0 valid offset, <0 error */
|
||||
int timer_off; /* >=0 valid offset, <0 error */
|
||||
u32 id;
|
||||
int numa_node;
|
||||
u32 btf_key_type_id;
|
||||
@@ -197,30 +198,53 @@ static inline bool map_value_has_spin_lock(const struct bpf_map *map)
|
||||
return map->spin_lock_off >= 0;
|
||||
}
|
||||
|
||||
static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
|
||||
static inline bool map_value_has_timer(const struct bpf_map *map)
|
||||
{
|
||||
if (likely(!map_value_has_spin_lock(map)))
|
||||
return;
|
||||
*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
|
||||
(struct bpf_spin_lock){};
|
||||
return map->timer_off >= 0;
|
||||
}
|
||||
|
||||
/* copy everything but bpf_spin_lock */
|
||||
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
|
||||
{
|
||||
if (unlikely(map_value_has_spin_lock(map)))
|
||||
*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
|
||||
(struct bpf_spin_lock){};
|
||||
if (unlikely(map_value_has_timer(map)))
|
||||
*(struct bpf_timer *)(dst + map->timer_off) =
|
||||
(struct bpf_timer){};
|
||||
}
|
||||
|
||||
/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
|
||||
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
|
||||
{
|
||||
if (unlikely(map_value_has_spin_lock(map))) {
|
||||
u32 off = map->spin_lock_off;
|
||||
u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;
|
||||
|
||||
memcpy(dst, src, off);
|
||||
memcpy(dst + off + sizeof(struct bpf_spin_lock),
|
||||
src + off + sizeof(struct bpf_spin_lock),
|
||||
map->value_size - off - sizeof(struct bpf_spin_lock));
|
||||
if (unlikely(map_value_has_spin_lock(map))) {
|
||||
s_off = map->spin_lock_off;
|
||||
s_sz = sizeof(struct bpf_spin_lock);
|
||||
} else if (unlikely(map_value_has_timer(map))) {
|
||||
t_off = map->timer_off;
|
||||
t_sz = sizeof(struct bpf_timer);
|
||||
}
|
||||
|
||||
if (unlikely(s_sz || t_sz)) {
|
||||
if (s_off < t_off || !s_sz) {
|
||||
swap(s_off, t_off);
|
||||
swap(s_sz, t_sz);
|
||||
}
|
||||
memcpy(dst, src, t_off);
|
||||
memcpy(dst + t_off + t_sz,
|
||||
src + t_off + t_sz,
|
||||
s_off - t_off - t_sz);
|
||||
memcpy(dst + s_off + s_sz,
|
||||
src + s_off + s_sz,
|
||||
map->value_size - s_off - s_sz);
|
||||
} else {
|
||||
memcpy(dst, src, map->value_size);
|
||||
}
|
||||
}
|
||||
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
|
||||
bool lock_src);
|
||||
void bpf_timer_cancel_and_free(void *timer);
|
||||
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
|
||||
|
||||
struct bpf_offload_dev;
|
||||
@@ -314,6 +338,7 @@ enum bpf_arg_type {
|
||||
ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
|
||||
ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */
|
||||
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
|
||||
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
|
||||
__BPF_ARG_TYPE_MAX,
|
||||
};
|
||||
|
||||
@@ -553,6 +578,12 @@ struct btf_func_model {
|
||||
* programs only. Should not be used with normal calls and indirect calls.
|
||||
*/
|
||||
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
|
||||
/* Store IP address of the caller on the trampoline stack,
|
||||
* so it's available for trampoline's programs.
|
||||
*/
|
||||
#define BPF_TRAMP_F_IP_ARG BIT(3)
|
||||
/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
|
||||
#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
|
||||
|
||||
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
|
||||
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
|
||||
@@ -898,8 +929,11 @@ struct bpf_array_aux {
|
||||
* stored in the map to make sure that all callers and callees have
|
||||
* the same prog type and JITed flag.
|
||||
*/
|
||||
enum bpf_prog_type type;
|
||||
bool jited;
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
enum bpf_prog_type type;
|
||||
bool jited;
|
||||
} owner;
|
||||
/* Programs with direct jumps into programs part of this array. */
|
||||
struct list_head poke_progs;
|
||||
struct bpf_map *map;
|
||||
@@ -1073,7 +1107,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
|
||||
/* an array of programs to be executed under rcu_lock.
|
||||
*
|
||||
* Typical usage:
|
||||
* ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
|
||||
* ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
|
||||
*
|
||||
* the structure returned by bpf_prog_array_alloc() should be populated
|
||||
* with program pointers and the last pointer must be NULL.
|
||||
@@ -1084,7 +1118,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
|
||||
*/
|
||||
struct bpf_prog_array_item {
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
||||
union {
|
||||
struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
||||
u64 bpf_cookie;
|
||||
};
|
||||
};
|
||||
|
||||
struct bpf_prog_array {
|
||||
@@ -1110,73 +1147,133 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
|
||||
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
|
||||
struct bpf_prog *exclude_prog,
|
||||
struct bpf_prog *include_prog,
|
||||
u64 bpf_cookie,
|
||||
struct bpf_prog_array **new_array);
|
||||
|
||||
struct bpf_run_ctx {};
|
||||
|
||||
struct bpf_cg_run_ctx {
|
||||
struct bpf_run_ctx run_ctx;
|
||||
const struct bpf_prog_array_item *prog_item;
|
||||
};
|
||||
|
||||
struct bpf_trace_run_ctx {
|
||||
struct bpf_run_ctx run_ctx;
|
||||
u64 bpf_cookie;
|
||||
};
|
||||
|
||||
static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
|
||||
{
|
||||
struct bpf_run_ctx *old_ctx = NULL;
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
old_ctx = current->bpf_ctx;
|
||||
current->bpf_ctx = new_ctx;
|
||||
#endif
|
||||
return old_ctx;
|
||||
}
|
||||
|
||||
static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
|
||||
{
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
current->bpf_ctx = old_ctx;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
|
||||
#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
|
||||
/* BPF program asks to set CN on the packet. */
|
||||
#define BPF_RET_SET_CN (1 << 0)
|
||||
|
||||
/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY,
|
||||
* if bpf_cgroup_storage_set() failed, the rest of programs
|
||||
* will not execute. This should be a really rare scenario
|
||||
* as it requires BPF_CGROUP_STORAGE_NEST_MAX number of
|
||||
* preemptions all between bpf_cgroup_storage_set() and
|
||||
* bpf_cgroup_storage_unset() on the same cpu.
|
||||
*/
|
||||
#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \
|
||||
({ \
|
||||
struct bpf_prog_array_item *_item; \
|
||||
struct bpf_prog *_prog; \
|
||||
struct bpf_prog_array *_array; \
|
||||
u32 _ret = 1; \
|
||||
u32 func_ret; \
|
||||
migrate_disable(); \
|
||||
rcu_read_lock(); \
|
||||
_array = rcu_dereference(array); \
|
||||
_item = &_array->items[0]; \
|
||||
while ((_prog = READ_ONCE(_item->prog))) { \
|
||||
if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
|
||||
break; \
|
||||
func_ret = func(_prog, ctx); \
|
||||
_ret &= (func_ret & 1); \
|
||||
*(ret_flags) |= (func_ret >> 1); \
|
||||
bpf_cgroup_storage_unset(); \
|
||||
_item++; \
|
||||
} \
|
||||
rcu_read_unlock(); \
|
||||
migrate_enable(); \
|
||||
_ret; \
|
||||
})
|
||||
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
|
||||
|
||||
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
|
||||
({ \
|
||||
struct bpf_prog_array_item *_item; \
|
||||
struct bpf_prog *_prog; \
|
||||
struct bpf_prog_array *_array; \
|
||||
u32 _ret = 1; \
|
||||
migrate_disable(); \
|
||||
rcu_read_lock(); \
|
||||
_array = rcu_dereference(array); \
|
||||
if (unlikely(check_non_null && !_array))\
|
||||
goto _out; \
|
||||
_item = &_array->items[0]; \
|
||||
while ((_prog = READ_ONCE(_item->prog))) { \
|
||||
if (!set_cg_storage) { \
|
||||
_ret &= func(_prog, ctx); \
|
||||
} else { \
|
||||
if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
|
||||
break; \
|
||||
_ret &= func(_prog, ctx); \
|
||||
bpf_cgroup_storage_unset(); \
|
||||
} \
|
||||
_item++; \
|
||||
} \
|
||||
_out: \
|
||||
rcu_read_unlock(); \
|
||||
migrate_enable(); \
|
||||
_ret; \
|
||||
})
|
||||
static __always_inline u32
|
||||
BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
|
||||
const void *ctx, bpf_prog_run_fn run_prog,
|
||||
u32 *ret_flags)
|
||||
{
|
||||
const struct bpf_prog_array_item *item;
|
||||
const struct bpf_prog *prog;
|
||||
const struct bpf_prog_array *array;
|
||||
struct bpf_run_ctx *old_run_ctx;
|
||||
struct bpf_cg_run_ctx run_ctx;
|
||||
u32 ret = 1;
|
||||
u32 func_ret;
|
||||
|
||||
migrate_disable();
|
||||
rcu_read_lock();
|
||||
array = rcu_dereference(array_rcu);
|
||||
item = &array->items[0];
|
||||
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
|
||||
while ((prog = READ_ONCE(item->prog))) {
|
||||
run_ctx.prog_item = item;
|
||||
func_ret = run_prog(prog, ctx);
|
||||
ret &= (func_ret & 1);
|
||||
*(ret_flags) |= (func_ret >> 1);
|
||||
item++;
|
||||
}
|
||||
bpf_reset_run_ctx(old_run_ctx);
|
||||
rcu_read_unlock();
|
||||
migrate_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline u32
|
||||
BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
|
||||
const void *ctx, bpf_prog_run_fn run_prog)
|
||||
{
|
||||
const struct bpf_prog_array_item *item;
|
||||
const struct bpf_prog *prog;
|
||||
const struct bpf_prog_array *array;
|
||||
struct bpf_run_ctx *old_run_ctx;
|
||||
struct bpf_cg_run_ctx run_ctx;
|
||||
u32 ret = 1;
|
||||
|
||||
migrate_disable();
|
||||
rcu_read_lock();
|
||||
array = rcu_dereference(array_rcu);
|
||||
item = &array->items[0];
|
||||
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
|
||||
while ((prog = READ_ONCE(item->prog))) {
|
||||
run_ctx.prog_item = item;
|
||||
ret &= run_prog(prog, ctx);
|
||||
item++;
|
||||
}
|
||||
bpf_reset_run_ctx(old_run_ctx);
|
||||
rcu_read_unlock();
|
||||
migrate_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline u32
|
||||
BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
|
||||
const void *ctx, bpf_prog_run_fn run_prog)
|
||||
{
|
||||
const struct bpf_prog_array_item *item;
|
||||
const struct bpf_prog *prog;
|
||||
const struct bpf_prog_array *array;
|
||||
struct bpf_run_ctx *old_run_ctx;
|
||||
struct bpf_trace_run_ctx run_ctx;
|
||||
u32 ret = 1;
|
||||
|
||||
migrate_disable();
|
||||
rcu_read_lock();
|
||||
array = rcu_dereference(array_rcu);
|
||||
if (unlikely(!array))
|
||||
goto out;
|
||||
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
|
||||
item = &array->items[0];
|
||||
while ((prog = READ_ONCE(item->prog))) {
|
||||
run_ctx.bpf_cookie = item->bpf_cookie;
|
||||
ret &= run_prog(prog, ctx);
|
||||
item++;
|
||||
}
|
||||
bpf_reset_run_ctx(old_run_ctx);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
migrate_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
|
||||
* so BPF programs can request cwr for TCP packets.
|
||||
@@ -1205,7 +1302,7 @@ _out: \
|
||||
u32 _flags = 0; \
|
||||
bool _cn; \
|
||||
u32 _ret; \
|
||||
_ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \
|
||||
_ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
|
||||
_cn = _flags & BPF_RET_SET_CN; \
|
||||
if (_ret) \
|
||||
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
|
||||
@@ -1214,12 +1311,6 @@ _out: \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
|
||||
__BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
|
||||
|
||||
#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
|
||||
__BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
DECLARE_PER_CPU(int, bpf_prog_active);
|
||||
extern struct mutex bpf_stats_enabled_mutex;
|
||||
@@ -1398,6 +1489,9 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
|
||||
struct seq_file *seq);
|
||||
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
|
||||
struct bpf_link_info *info);
|
||||
typedef const struct bpf_func_proto *
|
||||
(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
|
||||
const struct bpf_prog *prog);
|
||||
|
||||
enum bpf_iter_feature {
|
||||
BPF_ITER_RESCHED = BIT(0),
|
||||
@@ -1410,6 +1504,7 @@ struct bpf_iter_reg {
|
||||
bpf_iter_detach_target_t detach_target;
|
||||
bpf_iter_show_fdinfo_t show_fdinfo;
|
||||
bpf_iter_fill_link_info_t fill_link_info;
|
||||
bpf_iter_get_func_proto_t get_func_proto;
|
||||
u32 ctx_arg_info_size;
|
||||
u32 feature;
|
||||
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
|
||||
@@ -1432,6 +1527,8 @@ struct bpf_iter__bpf_map_elem {
|
||||
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
|
||||
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
|
||||
bool bpf_iter_prog_supported(struct bpf_prog *prog);
|
||||
const struct bpf_func_proto *
|
||||
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
|
||||
int bpf_iter_new_fd(struct bpf_link *link);
|
||||
bool bpf_link_is_iter(struct bpf_link *link);
|
||||
@@ -1509,12 +1606,12 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
||||
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
|
||||
struct bpf_prog *xdp_prog, struct bpf_map *map,
|
||||
bool exclude_ingress);
|
||||
bool dev_map_can_have_prog(struct bpf_map *map);
|
||||
|
||||
void __cpu_map_flush(void);
|
||||
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx);
|
||||
bool cpu_map_prog_allowed(struct bpf_map *map);
|
||||
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
|
||||
struct sk_buff *skb);
|
||||
|
||||
/* Return map's numa specified by userspace */
|
||||
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
|
||||
@@ -1711,6 +1808,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool cpu_map_prog_allowed(struct bpf_map *map)
|
||||
{
|
||||
return false;
|
||||
@@ -1852,6 +1955,12 @@ void bpf_map_offload_map_free(struct bpf_map *map);
|
||||
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
|
||||
const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
|
||||
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
|
||||
void sock_map_unhash(struct sock *sk);
|
||||
void sock_map_close(struct sock *sk, long timeout);
|
||||
#else
|
||||
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
|
||||
union bpf_attr *attr)
|
||||
@@ -1884,24 +1993,6 @@ static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
|
||||
|
||||
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
|
||||
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
|
||||
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
|
||||
void sock_map_unhash(struct sock *sk);
|
||||
void sock_map_close(struct sock *sk, long timeout);
|
||||
|
||||
void bpf_sk_reuseport_detach(struct sock *sk);
|
||||
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
|
||||
void *value);
|
||||
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags);
|
||||
#else
|
||||
static inline void bpf_sk_reuseport_detach(struct sock *sk)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
|
||||
@@ -1921,7 +2012,21 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
|
||||
|
||||
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
|
||||
void bpf_sk_reuseport_detach(struct sock *sk);
|
||||
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
|
||||
void *value);
|
||||
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags);
|
||||
#else
|
||||
static inline void bpf_sk_reuseport_detach(struct sock *sk)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
|
||||
void *key, void *value)
|
||||
{
|
||||
@@ -1998,9 +2103,8 @@ extern const struct bpf_func_proto bpf_task_storage_get_proto;
|
||||
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
|
||||
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
|
||||
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
|
||||
|
||||
const struct bpf_func_proto *bpf_tracing_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
|
||||
|
||||
const struct bpf_func_proto *tracing_prog_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
||||
@@ -101,14 +101,14 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
|
||||
#endif
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
|
||||
#ifdef CONFIG_NET
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
|
||||
#ifdef CONFIG_BPF_LSM
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
|
||||
#endif
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
|
||||
#ifdef CONFIG_NET
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
|
||||
#if defined(CONFIG_XDP_SOCKETS)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
|
||||
@@ -136,3 +136,6 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
|
||||
#endif
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
|
||||
#endif
|
||||
|
||||
@@ -53,7 +53,14 @@ struct bpf_reg_state {
|
||||
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
|
||||
* PTR_TO_MAP_VALUE_OR_NULL
|
||||
*/
|
||||
struct bpf_map *map_ptr;
|
||||
struct {
|
||||
struct bpf_map *map_ptr;
|
||||
/* To distinguish map lookups from outer map
|
||||
* the map_uid is non-zero for registers
|
||||
* pointing to inner maps.
|
||||
*/
|
||||
u32 map_uid;
|
||||
};
|
||||
|
||||
/* for PTR_TO_BTF_ID */
|
||||
struct {
|
||||
@@ -201,12 +208,19 @@ struct bpf_func_state {
|
||||
* zero == main subprog
|
||||
*/
|
||||
u32 subprogno;
|
||||
/* Every bpf_timer_start will increment async_entry_cnt.
|
||||
* It's used to distinguish:
|
||||
* void foo(void) { for(;;); }
|
||||
* void foo(void) { bpf_timer_set_callback(,foo); }
|
||||
*/
|
||||
u32 async_entry_cnt;
|
||||
bool in_callback_fn;
|
||||
bool in_async_callback_fn;
|
||||
|
||||
/* The following fields should be last. See copy_func_state() */
|
||||
int acquired_refs;
|
||||
struct bpf_reference_state *refs;
|
||||
int allocated_stack;
|
||||
bool in_callback_fn;
|
||||
struct bpf_stack_state *stack;
|
||||
};
|
||||
|
||||
@@ -392,6 +406,7 @@ struct bpf_subprog_info {
|
||||
bool has_tail_call;
|
||||
bool tail_call_reachable;
|
||||
bool has_ld_abs;
|
||||
bool is_async_cb;
|
||||
};
|
||||
|
||||
/* single container for all structs
|
||||
|
||||
@@ -62,9 +62,17 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
|
||||
return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
|
||||
}
|
||||
|
||||
static inline void *memdup_bpfptr(bpfptr_t src, size_t len)
|
||||
static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
|
||||
{
|
||||
return memdup_sockptr((sockptr_t) src, len);
|
||||
void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN);
|
||||
|
||||
if (!p)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (copy_from_bpfptr(p, src, len)) {
|
||||
kvfree(p);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <scsi/scsi_request.h>
|
||||
|
||||
struct bsg_job;
|
||||
struct request;
|
||||
struct device;
|
||||
struct scatterlist;
|
||||
|
||||
@@ -4,36 +4,16 @@
|
||||
|
||||
#include <uapi/linux/bsg.h>
|
||||
|
||||
struct request;
|
||||
struct bsg_device;
|
||||
struct device;
|
||||
struct request_queue;
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_BSG
|
||||
struct bsg_ops {
|
||||
int (*check_proto)(struct sg_io_v4 *hdr);
|
||||
int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
|
||||
fmode_t mode);
|
||||
int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
|
||||
void (*free_rq)(struct request *rq);
|
||||
};
|
||||
typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
|
||||
fmode_t mode, unsigned int timeout);
|
||||
|
||||
struct bsg_class_device {
|
||||
struct device *class_dev;
|
||||
int minor;
|
||||
struct request_queue *queue;
|
||||
const struct bsg_ops *ops;
|
||||
};
|
||||
struct bsg_device *bsg_register_queue(struct request_queue *q,
|
||||
struct device *parent, const char *name,
|
||||
bsg_sg_io_fn *sg_io_fn);
|
||||
void bsg_unregister_queue(struct bsg_device *bcd);
|
||||
|
||||
int bsg_register_queue(struct request_queue *q, struct device *parent,
|
||||
const char *name, const struct bsg_ops *ops);
|
||||
int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
|
||||
void bsg_unregister_queue(struct request_queue *q);
|
||||
#else
|
||||
static inline int bsg_scsi_register_queue(struct request_queue *q,
|
||||
struct device *parent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void bsg_unregister_queue(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BLK_DEV_BSG */
|
||||
#endif /* _LINUX_BSG_H */
|
||||
|
||||
@@ -99,6 +99,7 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
|
||||
const struct btf_member *m,
|
||||
u32 expected_offset, u32 expected_size);
|
||||
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
|
||||
int btf_find_timer(const struct btf *btf, const struct btf_type *t);
|
||||
bool btf_type_is_void(const struct btf_type *t);
|
||||
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
|
||||
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
|
||||
|
||||
@@ -82,6 +82,9 @@ __BTF_ID_LIST(name, globl)
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
|
||||
BTF_ID_LIST(name) \
|
||||
BTF_ID(prefix, typename)
|
||||
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
|
||||
BTF_ID_LIST_GLOBAL(name) \
|
||||
BTF_ID(prefix, typename)
|
||||
|
||||
/*
|
||||
* The BTF_ID_UNUSED macro defines 4 zero bytes.
|
||||
@@ -148,6 +151,7 @@ extern struct btf_id_set name;
|
||||
#define BTF_ID_UNUSED
|
||||
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
|
||||
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1];
|
||||
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_SET_END(name)
|
||||
@@ -172,7 +176,8 @@ extern struct btf_id_set name;
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock)
|
||||
|
||||
enum {
|
||||
#define BTF_SOCK_TYPE(name, str) name,
|
||||
@@ -184,4 +189,6 @@ MAX_BTF_SOCK_TYPE,
|
||||
extern u32 btf_sock_ids[];
|
||||
#endif
|
||||
|
||||
extern u32 btf_task_struct_ids[];
|
||||
|
||||
#endif
|
||||
|
||||
@@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
|
||||
struct buffer_head *__bread_gfp(struct block_device *,
|
||||
sector_t block, unsigned size, gfp_t gfp);
|
||||
void invalidate_bh_lrus(void);
|
||||
void invalidate_bh_lrus_cpu(int cpu);
|
||||
void invalidate_bh_lrus_cpu(void);
|
||||
bool has_bh_in_lru(int cpu, void *dummy);
|
||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
||||
void free_buffer_head(struct buffer_head * bh);
|
||||
@@ -408,8 +408,8 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
|
||||
static inline void invalidate_inode_buffers(struct inode *inode) {}
|
||||
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
|
||||
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
|
||||
static inline void invalidate_bh_lrus_cpu(int cpu) {}
|
||||
static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
|
||||
static inline void invalidate_bh_lrus_cpu(void) {}
|
||||
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
|
||||
#define buffer_heads_over_limit 0
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
@@ -4,9 +4,10 @@
|
||||
*
|
||||
* Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
|
||||
*/
|
||||
#ifndef __LINUX_BVEC_ITER_H
|
||||
#define __LINUX_BVEC_ITER_H
|
||||
#ifndef __LINUX_BVEC_H
|
||||
#define __LINUX_BVEC_H
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/limits.h>
|
||||
@@ -183,4 +184,61 @@ static inline void bvec_advance(const struct bio_vec *bvec,
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __LINUX_BVEC_ITER_H */
|
||||
/**
|
||||
* bvec_kmap_local - map a bvec into the kernel virtual address space
|
||||
* @bvec: bvec to map
|
||||
*
|
||||
* Must be called on single-page bvecs only. Call kunmap_local on the returned
|
||||
* address to unmap.
|
||||
*/
|
||||
static inline void *bvec_kmap_local(struct bio_vec *bvec)
|
||||
{
|
||||
return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* memcpy_from_bvec - copy data from a bvec
|
||||
* @bvec: bvec to copy from
|
||||
*
|
||||
* Must be called on single-page bvecs only.
|
||||
*/
|
||||
static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
|
||||
{
|
||||
memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
|
||||
}
|
||||
|
||||
/**
|
||||
* memcpy_to_bvec - copy data to a bvec
|
||||
* @bvec: bvec to copy to
|
||||
*
|
||||
* Must be called on single-page bvecs only.
|
||||
*/
|
||||
static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
|
||||
{
|
||||
memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
|
||||
}
|
||||
|
||||
/**
|
||||
* memzero_bvec - zero all data in a bvec
|
||||
* @bvec: bvec to zero
|
||||
*
|
||||
* Must be called on single-page bvecs only.
|
||||
*/
|
||||
static inline void memzero_bvec(struct bio_vec *bvec)
|
||||
{
|
||||
memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
|
||||
}
|
||||
|
||||
/**
|
||||
* bvec_virt - return the virtual address for a bvec
|
||||
* @bvec: bvec to return the virtual address for
|
||||
*
|
||||
* Note: the caller must ensure that @bvec->bv_page is not a highmem page.
|
||||
*/
|
||||
static inline void *bvec_virt(struct bio_vec *bvec)
|
||||
{
|
||||
WARN_ON_ONCE(PageHighMem(bvec->bv_page));
|
||||
return page_address(bvec->bv_page) + bvec->bv_offset;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_BVEC_H */
|
||||
|
||||
@@ -79,24 +79,6 @@ struct cpu_cacheinfo {
|
||||
bool cpu_map_populated;
|
||||
};
|
||||
|
||||
/*
|
||||
* Helpers to make sure "func" is executed on the cpu whose cache
|
||||
* attributes are being detected
|
||||
*/
|
||||
#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
|
||||
static inline void _##func(void *ret) \
|
||||
{ \
|
||||
int cpu = smp_processor_id(); \
|
||||
*(int *)ret = __##func(cpu); \
|
||||
} \
|
||||
\
|
||||
int func(unsigned int cpu) \
|
||||
{ \
|
||||
int ret; \
|
||||
smp_call_function_single(cpu, _##func, &ret, true); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
|
||||
int init_cache_level(unsigned int cpu);
|
||||
int populate_cache_leaves(unsigned int cpu);
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
* quanta, from when the bit is sent on the TX pin to when it is
|
||||
* received on the RX pin of the transmitter. Possible options:
|
||||
*
|
||||
* O: automatic mode. The controller dynamically measure @tdcv
|
||||
* 0: automatic mode. The controller dynamically measures @tdcv
|
||||
* for each transmitted CAN FD frame.
|
||||
*
|
||||
* Other values: manual mode. Use the fixed provided value.
|
||||
@@ -45,7 +45,7 @@
|
||||
* @tdco: Transmitter Delay Compensation Offset. Offset value, in time
|
||||
* quanta, defining the distance between the start of the bit
|
||||
* reception on the RX pin of the transceiver and the SSP
|
||||
* position such as SSP = @tdcv + @tdco.
|
||||
* position such that SSP = @tdcv + @tdco.
|
||||
*
|
||||
* If @tdco is zero, then TDC is disabled and both @tdcv and
|
||||
* @tdcf should be ignored.
|
||||
|
||||
@@ -32,6 +32,12 @@ enum can_mode {
|
||||
CAN_MODE_SLEEP
|
||||
};
|
||||
|
||||
enum can_termination_gpio {
|
||||
CAN_TERMINATION_GPIO_DISABLED = 0,
|
||||
CAN_TERMINATION_GPIO_ENABLED,
|
||||
CAN_TERMINATION_GPIO_MAX,
|
||||
};
|
||||
|
||||
/*
|
||||
* CAN common private data
|
||||
*/
|
||||
@@ -55,6 +61,8 @@ struct can_priv {
|
||||
unsigned int termination_const_cnt;
|
||||
const u16 *termination_const;
|
||||
u16 termination;
|
||||
struct gpio_desc *termination_gpio;
|
||||
u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX];
|
||||
|
||||
enum can_state state;
|
||||
|
||||
|
||||
23
include/linux/can/platform/flexcan.h
Normal file
23
include/linux/can/platform/flexcan.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _CAN_PLATFORM_FLEXCAN_H
|
||||
#define _CAN_PLATFORM_FLEXCAN_H
|
||||
|
||||
struct flexcan_platform_data {
|
||||
u32 clock_frequency;
|
||||
u8 clk_src;
|
||||
};
|
||||
|
||||
#endif /* _CAN_PLATFORM_FLEXCAN_H */
|
||||
@@ -20,6 +20,7 @@ struct can_rx_offload {
|
||||
bool drop);
|
||||
|
||||
struct sk_buff_head skb_queue;
|
||||
struct sk_buff_head skb_irq_queue;
|
||||
u32 skb_queue_len_max;
|
||||
|
||||
unsigned int mb_first;
|
||||
@@ -48,14 +49,11 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
|
||||
unsigned int *frame_len_ptr);
|
||||
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
|
||||
struct sk_buff *skb);
|
||||
void can_rx_offload_irq_finish(struct can_rx_offload *offload);
|
||||
void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
|
||||
void can_rx_offload_del(struct can_rx_offload *offload);
|
||||
void can_rx_offload_enable(struct can_rx_offload *offload);
|
||||
|
||||
static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
|
||||
{
|
||||
napi_schedule(&offload->napi);
|
||||
}
|
||||
|
||||
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
|
||||
{
|
||||
napi_disable(&offload->napi);
|
||||
|
||||
@@ -86,11 +86,13 @@ struct cdrom_device_ops {
|
||||
/* play stuff */
|
||||
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
|
||||
|
||||
/* driver specifications */
|
||||
const int capability; /* capability flags */
|
||||
/* handle uniform packets for scsi type devices (scsi,atapi) */
|
||||
int (*generic_packet) (struct cdrom_device_info *,
|
||||
struct packet_command *);
|
||||
int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
|
||||
u32 lba, u32 nframes, u8 *last_sense);
|
||||
/* driver specifications */
|
||||
const int capability; /* capability flags */
|
||||
};
|
||||
|
||||
int cdrom_multisession(struct cdrom_device_info *cdi,
|
||||
|
||||
@@ -299,6 +299,7 @@ enum {
|
||||
CEPH_SESSION_FLUSHMSG_ACK,
|
||||
CEPH_SESSION_FORCE_RO,
|
||||
CEPH_SESSION_REJECT,
|
||||
CEPH_SESSION_REQUEST_FLUSH_MDLOG,
|
||||
};
|
||||
|
||||
extern const char *ceph_session_op_name(int op);
|
||||
|
||||
@@ -752,107 +752,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
|
||||
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
|
||||
* per-socket cgroup information except for memcg association.
|
||||
*
|
||||
* On legacy hierarchies, net_prio and net_cls controllers directly set
|
||||
* attributes on each sock which can then be tested by the network layer.
|
||||
* On the default hierarchy, each sock is associated with the cgroup it was
|
||||
* created in and the networking layer can match the cgroup directly.
|
||||
*
|
||||
* To avoid carrying all three cgroup related fields separately in sock,
|
||||
* sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
|
||||
* On boot, sock_cgroup_data records the cgroup that the sock was created
|
||||
* in so that cgroup2 matches can be made; however, once either net_prio or
|
||||
* net_cls starts being used, the area is overridden to carry prioidx and/or
|
||||
* classid. The two modes are distinguished by whether the lowest bit is
|
||||
* set. Clear bit indicates cgroup pointer while set bit prioidx and
|
||||
* classid.
|
||||
*
|
||||
* While userland may start using net_prio or net_cls at any time, once
|
||||
* either is used, cgroup2 matching no longer works. There is no reason to
|
||||
* mix the two and this is in line with how legacy and v2 compatibility is
|
||||
* handled. On mode switch, cgroup references which are already being
|
||||
* pointed to by socks may be leaked. While this can be remedied by adding
|
||||
* synchronization around sock_cgroup_data, given that the number of leaked
|
||||
* cgroups is bound and highly unlikely to be high, this seems to be the
|
||||
* better trade-off.
|
||||
* On legacy hierarchies, net_prio and net_cls controllers directly
|
||||
* set attributes on each sock which can then be tested by the network
|
||||
* layer. On the default hierarchy, each sock is associated with the
|
||||
* cgroup it was created in and the networking layer can match the
|
||||
* cgroup directly.
|
||||
*/
|
||||
struct sock_cgroup_data {
|
||||
union {
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
struct {
|
||||
u8 is_data : 1;
|
||||
u8 no_refcnt : 1;
|
||||
u8 unused : 6;
|
||||
u8 padding;
|
||||
u16 prioidx;
|
||||
u32 classid;
|
||||
} __packed;
|
||||
#else
|
||||
struct {
|
||||
u32 classid;
|
||||
u16 prioidx;
|
||||
u8 padding;
|
||||
u8 unused : 6;
|
||||
u8 no_refcnt : 1;
|
||||
u8 is_data : 1;
|
||||
} __packed;
|
||||
struct cgroup *cgroup; /* v2 */
|
||||
#ifdef CONFIG_CGROUP_NET_CLASSID
|
||||
u32 classid; /* v1 */
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUP_NET_PRIO
|
||||
u16 prioidx; /* v1 */
|
||||
#endif
|
||||
u64 val;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* There's a theoretical window where the following accessors race with
|
||||
* updaters and return part of the previous pointer as the prioidx or
|
||||
* classid. Such races are short-lived and the result isn't critical.
|
||||
*/
|
||||
static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
|
||||
{
|
||||
/* fallback to 1 which is always the ID of the root cgroup */
|
||||
return (skcd->is_data & 1) ? skcd->prioidx : 1;
|
||||
#ifdef CONFIG_CGROUP_NET_PRIO
|
||||
return READ_ONCE(skcd->prioidx);
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
|
||||
{
|
||||
/* fallback to 0 which is the unconfigured default classid */
|
||||
return (skcd->is_data & 1) ? skcd->classid : 0;
|
||||
#ifdef CONFIG_CGROUP_NET_CLASSID
|
||||
return READ_ONCE(skcd->classid);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* If invoked concurrently, the updaters may clobber each other. The
|
||||
* caller is responsible for synchronization.
|
||||
*/
|
||||
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
|
||||
u16 prioidx)
|
||||
{
|
||||
struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
|
||||
|
||||
if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
|
||||
return;
|
||||
|
||||
if (!(skcd_buf.is_data & 1)) {
|
||||
skcd_buf.val = 0;
|
||||
skcd_buf.is_data = 1;
|
||||
}
|
||||
|
||||
skcd_buf.prioidx = prioidx;
|
||||
WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
|
||||
#ifdef CONFIG_CGROUP_NET_PRIO
|
||||
WRITE_ONCE(skcd->prioidx, prioidx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
|
||||
u32 classid)
|
||||
{
|
||||
struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
|
||||
|
||||
if (sock_cgroup_classid(&skcd_buf) == classid)
|
||||
return;
|
||||
|
||||
if (!(skcd_buf.is_data & 1)) {
|
||||
skcd_buf.val = 0;
|
||||
skcd_buf.is_data = 1;
|
||||
}
|
||||
|
||||
skcd_buf.classid = classid;
|
||||
WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
|
||||
#ifdef CONFIG_CGROUP_NET_CLASSID
|
||||
WRITE_ONCE(skcd->classid, classid);
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* CONFIG_SOCK_CGROUP_DATA */
|
||||
|
||||
@@ -829,33 +829,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
|
||||
*/
|
||||
#ifdef CONFIG_SOCK_CGROUP_DATA
|
||||
|
||||
#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
|
||||
extern spinlock_t cgroup_sk_update_lock;
|
||||
#endif
|
||||
|
||||
void cgroup_sk_alloc_disable(void);
|
||||
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
|
||||
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
|
||||
void cgroup_sk_free(struct sock_cgroup_data *skcd);
|
||||
|
||||
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
|
||||
{
|
||||
#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
|
||||
unsigned long v;
|
||||
|
||||
/*
|
||||
* @skcd->val is 64bit but the following is safe on 32bit too as we
|
||||
* just need the lower ulong to be written and read atomically.
|
||||
*/
|
||||
v = READ_ONCE(skcd->val);
|
||||
|
||||
if (v & 3)
|
||||
return &cgrp_dfl_root.cgrp;
|
||||
|
||||
return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
|
||||
#else
|
||||
return (struct cgroup *)(unsigned long)skcd->val;
|
||||
#endif
|
||||
return skcd->cgroup;
|
||||
}
|
||||
|
||||
#else /* CONFIG_CGROUP_DATA */
|
||||
|
||||
@@ -342,7 +342,7 @@ struct clk_fixed_rate {
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
|
||||
#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
|
||||
|
||||
extern const struct clk_ops clk_fixed_rate_ops;
|
||||
struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
|
||||
@@ -1001,6 +1001,12 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
|
||||
* CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are
|
||||
* used for the divider register. Setting this flag makes the register
|
||||
* accesses big endian.
|
||||
* CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might
|
||||
* be saturated and the caller will get quite far from the good enough
|
||||
* approximation. Instead the caller may require, by setting this flag,
|
||||
* to shift left by a few bits in case, when the asked one is quite small
|
||||
* to satisfy the desired range of denominator. It assumes that on the
|
||||
* caller's side the power-of-two capable prescaler exists.
|
||||
*/
|
||||
struct clk_fractional_divider {
|
||||
struct clk_hw hw;
|
||||
@@ -1022,8 +1028,8 @@ struct clk_fractional_divider {
|
||||
|
||||
#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
|
||||
#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1)
|
||||
#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2)
|
||||
|
||||
extern const struct clk_ops clk_fractional_divider_ops;
|
||||
struct clk *clk_register_fractional_divider(struct device *dev,
|
||||
const char *name, const char *parent_name, unsigned long flags,
|
||||
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
|
||||
@@ -1069,9 +1075,9 @@ struct clk_multiplier {
|
||||
|
||||
#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
|
||||
|
||||
#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
|
||||
#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
|
||||
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
|
||||
#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
|
||||
#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
|
||||
|
||||
extern const struct clk_ops clk_multiplier_ops;
|
||||
|
||||
|
||||
@@ -137,6 +137,32 @@
|
||||
#define AT91_PMC_PLLADIV2_ON (1 << 12)
|
||||
#define AT91_PMC_H32MXDIV BIT(24)
|
||||
|
||||
#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */
|
||||
#define AT91_PMC_MCR_V2_ID_MSK (0xF)
|
||||
#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK)
|
||||
#define AT91_PMC_MCR_V2_CMD (1 << 7)
|
||||
#define AT91_PMC_MCR_V2_DIV (7 << 8)
|
||||
#define AT91_PMC_MCR_V2_DIV1 (0 << 8)
|
||||
#define AT91_PMC_MCR_V2_DIV2 (1 << 8)
|
||||
#define AT91_PMC_MCR_V2_DIV4 (2 << 8)
|
||||
#define AT91_PMC_MCR_V2_DIV8 (3 << 8)
|
||||
#define AT91_PMC_MCR_V2_DIV16 (4 << 8)
|
||||
#define AT91_PMC_MCR_V2_DIV32 (5 << 8)
|
||||
#define AT91_PMC_MCR_V2_DIV64 (6 << 8)
|
||||
#define AT91_PMC_MCR_V2_DIV3 (7 << 8)
|
||||
#define AT91_PMC_MCR_V2_CSS (0x1F << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16)
|
||||
#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16)
|
||||
#define AT91_PMC_MCR_V2_EN (1 << 28)
|
||||
|
||||
#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */
|
||||
|
||||
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Parsing command line, get the partitions information.
|
||||
*
|
||||
* Written by Cai Zhiyong <caizhiyong@huawei.com>
|
||||
*
|
||||
*/
|
||||
#ifndef CMDLINEPARSEH
|
||||
#define CMDLINEPARSEH
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/* partition flags */
|
||||
#define PF_RDONLY 0x01 /* Device is read only */
|
||||
#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
|
||||
|
||||
struct cmdline_subpart {
|
||||
char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
|
||||
sector_t from;
|
||||
sector_t size;
|
||||
int flags;
|
||||
struct cmdline_subpart *next_subpart;
|
||||
};
|
||||
|
||||
struct cmdline_parts {
|
||||
char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
|
||||
unsigned int nr_subparts;
|
||||
struct cmdline_subpart *subpart;
|
||||
struct cmdline_parts *next_parts;
|
||||
};
|
||||
|
||||
void cmdline_parts_free(struct cmdline_parts **parts);
|
||||
|
||||
int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
|
||||
|
||||
struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
|
||||
const char *bdev);
|
||||
|
||||
int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
|
||||
int slot,
|
||||
int (*add_part)(int, struct cmdline_subpart *, void *),
|
||||
void *param);
|
||||
|
||||
#endif /* CMDLINEPARSEH */
|
||||
@@ -84,6 +84,8 @@ static inline unsigned long compact_gap(unsigned int order)
|
||||
extern unsigned int sysctl_compaction_proactiveness;
|
||||
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *length, loff_t *ppos);
|
||||
extern int compaction_proactiveness_sysctl_handler(struct ctl_table *table,
|
||||
int write, void *buffer, size_t *length, loff_t *ppos);
|
||||
extern int sysctl_extfrag_threshold;
|
||||
extern int sysctl_compact_unevictable_allowed;
|
||||
|
||||
|
||||
@@ -20,11 +20,8 @@
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#include <asm/compat.h>
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include <asm/siginfo.h>
|
||||
#include <asm/signal.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
|
||||
/*
|
||||
@@ -95,8 +92,6 @@ struct compat_iovec {
|
||||
compat_size_t iov_len;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#ifndef compat_user_stack_pointer
|
||||
#define compat_user_stack_pointer() current_user_stack_pointer()
|
||||
#endif
|
||||
@@ -131,9 +126,11 @@ struct compat_tms {
|
||||
|
||||
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
|
||||
|
||||
#ifndef compat_sigset_t
|
||||
typedef struct {
|
||||
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
|
||||
} compat_sigset_t;
|
||||
#endif
|
||||
|
||||
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
|
||||
size_t sigsetsize);
|
||||
@@ -384,6 +381,7 @@ struct compat_keyctl_kdf_params {
|
||||
__u32 __spare[8];
|
||||
};
|
||||
|
||||
struct compat_stat;
|
||||
struct compat_statfs;
|
||||
struct compat_statfs64;
|
||||
struct compat_old_linux_dirent;
|
||||
@@ -397,14 +395,6 @@ struct compat_kexec_segment;
|
||||
struct compat_mq_attr;
|
||||
struct compat_msgbuf;
|
||||
|
||||
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
|
||||
|
||||
#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
|
||||
|
||||
long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
|
||||
unsigned long bitmap_size);
|
||||
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
|
||||
unsigned long bitmap_size);
|
||||
void copy_siginfo_to_external32(struct compat_siginfo *to,
|
||||
const struct kernel_siginfo *from);
|
||||
int copy_siginfo_from_user32(kernel_siginfo_t *to,
|
||||
@@ -428,7 +418,7 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
|
||||
unsigned int size)
|
||||
{
|
||||
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
|
||||
#ifdef __BIG_ENDIAN
|
||||
#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT)
|
||||
compat_sigset_t v;
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
|
||||
@@ -521,8 +511,6 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
|
||||
struct epoll_event; /* fortunately, this one is fixed-layout */
|
||||
|
||||
extern void __user *compat_alloc_user_space(unsigned long len);
|
||||
|
||||
int compat_restore_altstack(const compat_stack_t __user *uss);
|
||||
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
|
||||
#define unsafe_compat_save_altstack(uss, sp, label) do { \
|
||||
@@ -809,26 +797,6 @@ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr
|
||||
/* mm/fadvise.c: No generic prototype for fadvise64_64 */
|
||||
|
||||
/* mm/, CONFIG_MMU only */
|
||||
asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
|
||||
compat_ulong_t mode,
|
||||
compat_ulong_t __user *nmask,
|
||||
compat_ulong_t maxnode, compat_ulong_t flags);
|
||||
asmlinkage long compat_sys_get_mempolicy(int __user *policy,
|
||||
compat_ulong_t __user *nmask,
|
||||
compat_ulong_t maxnode,
|
||||
compat_ulong_t addr,
|
||||
compat_ulong_t flags);
|
||||
asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
|
||||
compat_ulong_t maxnode);
|
||||
asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
|
||||
compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
|
||||
const compat_ulong_t __user *new_nodes);
|
||||
asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
|
||||
__u32 __user *pages,
|
||||
const int __user *nodes,
|
||||
int __user *status,
|
||||
int flags);
|
||||
|
||||
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
|
||||
compat_pid_t pid, int sig,
|
||||
struct compat_siginfo __user *uinfo);
|
||||
@@ -929,17 +897,6 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
|
||||
|
||||
#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
|
||||
|
||||
|
||||
/*
|
||||
* For most but not all architectures, "am I in a compat syscall?" and
|
||||
* "am I a compat task?" are the same question. For architectures on which
|
||||
* they aren't the same question, arch code can override in_compat_syscall.
|
||||
*/
|
||||
|
||||
#ifndef in_compat_syscall
|
||||
static inline bool in_compat_syscall(void) { return is_compat_task(); }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ns_to_old_timeval32 - Compat version of ns_to_timeval
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
@@ -969,6 +926,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
|
||||
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
|
||||
struct compat_statfs64 __user * buf);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
/*
|
||||
* For most but not all architectures, "am I in a compat syscall?" and
|
||||
* "am I a compat task?" are the same question. For architectures on which
|
||||
* they aren't the same question, arch code can override in_compat_syscall.
|
||||
*/
|
||||
#ifndef in_compat_syscall
|
||||
static inline bool in_compat_syscall(void) { return is_compat_task(); }
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_COMPAT */
|
||||
|
||||
#define is_compat_task() (0)
|
||||
@@ -978,6 +946,15 @@ static inline bool in_compat_syscall(void) { return false; }
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
|
||||
|
||||
#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
|
||||
|
||||
long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
|
||||
unsigned long bitmap_size);
|
||||
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
|
||||
unsigned long bitmap_size);
|
||||
|
||||
/*
|
||||
* Some legacy ABIs like the i386 one use less than natural alignment for 64-bit
|
||||
* types, and will need special compat treatment for that. Most architectures
|
||||
|
||||
@@ -62,19 +62,6 @@
|
||||
#define __no_sanitize_coverage
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Not all versions of clang implement the type-generic versions
|
||||
* of the builtin overflow checkers. Fortunately, clang implements
|
||||
* __has_builtin allowing us to avoid awkward version
|
||||
* checks. Unfortunately, we don't know which version of gcc clang
|
||||
* pretends to be, so the macro may or may not be defined.
|
||||
*/
|
||||
#if __has_builtin(__builtin_mul_overflow) && \
|
||||
__has_builtin(__builtin_add_overflow) && \
|
||||
__has_builtin(__builtin_sub_overflow)
|
||||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
||||
#if __has_feature(shadow_call_stack)
|
||||
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
|
||||
#endif
|
||||
|
||||
@@ -43,9 +43,6 @@
|
||||
|
||||
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
|
||||
|
||||
#define __compiletime_warning(message) __attribute__((__warning__(message)))
|
||||
#define __compiletime_error(message) __attribute__((__error__(message)))
|
||||
|
||||
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
|
||||
#define __latent_entropy __attribute__((latent_entropy))
|
||||
#endif
|
||||
@@ -98,10 +95,8 @@
|
||||
|
||||
#if GCC_VERSION >= 70000
|
||||
#define KASAN_ABI_VERSION 5
|
||||
#elif GCC_VERSION >= 50000
|
||||
#else
|
||||
#define KASAN_ABI_VERSION 4
|
||||
#elif GCC_VERSION >= 40902
|
||||
#define KASAN_ABI_VERSION 3
|
||||
#endif
|
||||
|
||||
#if __has_attribute(__no_sanitize_address__)
|
||||
@@ -128,10 +123,6 @@
|
||||
#define __no_sanitize_coverage
|
||||
#endif
|
||||
|
||||
#if GCC_VERSION >= 50100
|
||||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Turn individual warnings and errors on and off locally, depending
|
||||
* on version.
|
||||
|
||||
@@ -188,6 +188,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
(typeof(ptr)) (__ptr + (off)); })
|
||||
#endif
|
||||
|
||||
#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
|
||||
|
||||
#ifndef OPTIMIZER_HIDE_VAR
|
||||
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
||||
#define OPTIMIZER_HIDE_VAR(var) \
|
||||
|
||||
@@ -20,26 +20,6 @@
|
||||
* Provide links to the documentation of each supported compiler, if it exists.
|
||||
*/
|
||||
|
||||
/*
|
||||
* __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
|
||||
* In the meantime, to support gcc < 5, we implement __has_attribute
|
||||
* by hand.
|
||||
*/
|
||||
#ifndef __has_attribute
|
||||
# define __has_attribute(x) __GCC4_has_attribute_##x
|
||||
# define __GCC4_has_attribute___assume_aligned__ 1
|
||||
# define __GCC4_has_attribute___copy__ 0
|
||||
# define __GCC4_has_attribute___designated_init__ 0
|
||||
# define __GCC4_has_attribute___externally_visible__ 1
|
||||
# define __GCC4_has_attribute___no_caller_saved_registers__ 0
|
||||
# define __GCC4_has_attribute___noclone__ 1
|
||||
# define __GCC4_has_attribute___no_profile_instrument_function__ 0
|
||||
# define __GCC4_has_attribute___nonstring__ 0
|
||||
# define __GCC4_has_attribute___no_sanitize_address__ 1
|
||||
# define __GCC4_has_attribute___no_sanitize_undefined__ 1
|
||||
# define __GCC4_has_attribute___fallthrough__ 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
|
||||
*/
|
||||
@@ -74,7 +54,6 @@
|
||||
* compiler should see some alignment anyway, when the return value is
|
||||
* massaged by 'flags = ptr & 3; ptr &= ~3;').
|
||||
*
|
||||
* Optional: only supported since gcc >= 4.9
|
||||
* Optional: not supported by icc
|
||||
*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
|
||||
@@ -137,6 +116,17 @@
|
||||
# define __designated_init
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Optional: only supported since clang >= 14.0
|
||||
*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute
|
||||
*/
|
||||
#if __has_attribute(__error__)
|
||||
# define __compiletime_error(msg) __attribute__((__error__(msg)))
|
||||
#else
|
||||
# define __compiletime_error(msg)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Optional: not supported by clang
|
||||
*
|
||||
@@ -298,6 +288,17 @@
|
||||
*/
|
||||
#define __must_check __attribute__((__warn_unused_result__))
|
||||
|
||||
/*
|
||||
* Optional: only supported since clang >= 14.0
|
||||
*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute
|
||||
*/
|
||||
#if __has_attribute(__warning__)
|
||||
# define __compiletime_warning(msg) __attribute__((__warning__(msg)))
|
||||
#else
|
||||
# define __compiletime_warning(msg)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
|
||||
|
||||
@@ -294,12 +294,6 @@ struct ftrace_likely_data {
|
||||
#ifndef __compiletime_object_size
|
||||
# define __compiletime_object_size(obj) -1
|
||||
#endif
|
||||
#ifndef __compiletime_warning
|
||||
# define __compiletime_warning(message)
|
||||
#endif
|
||||
#ifndef __compiletime_error
|
||||
# define __compiletime_error(message)
|
||||
#endif
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
# define __compiletime_assert(condition, msg, prefix, suffix) \
|
||||
|
||||
@@ -220,6 +220,10 @@ struct coresight_sysfs_link {
|
||||
* @nr_links: number of sysfs links created to other components from this
|
||||
* device. These will appear in the "connections" group.
|
||||
* @has_conns_grp: Have added a "connections" group for sysfs links.
|
||||
* @feature_csdev_list: List of complex feature programming added to the device.
|
||||
* @config_csdev_list: List of system configurations added to the device.
|
||||
* @cscfg_csdev_lock: Protect the lists of configurations and features.
|
||||
* @active_cscfg_ctxt: Context information for current active system configuration.
|
||||
*/
|
||||
struct coresight_device {
|
||||
struct coresight_platform_data *pdata;
|
||||
@@ -241,6 +245,11 @@ struct coresight_device {
|
||||
int nr_links;
|
||||
bool has_conns_grp;
|
||||
bool ect_enabled; /* true only if associated ect device is enabled */
|
||||
/* system configuration and feature lists */
|
||||
struct list_head feature_csdev_list;
|
||||
struct list_head config_csdev_list;
|
||||
spinlock_t cscfg_csdev_lock;
|
||||
void *active_cscfg_ctxt;
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -162,15 +162,15 @@ struct counter_count_ext {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
enum counter_count_function {
|
||||
COUNTER_COUNT_FUNCTION_INCREASE = 0,
|
||||
COUNTER_COUNT_FUNCTION_DECREASE,
|
||||
COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
|
||||
COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
|
||||
COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B,
|
||||
COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
|
||||
COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
|
||||
COUNTER_COUNT_FUNCTION_QUADRATURE_X4
|
||||
enum counter_function {
|
||||
COUNTER_FUNCTION_INCREASE = 0,
|
||||
COUNTER_FUNCTION_DECREASE,
|
||||
COUNTER_FUNCTION_PULSE_DIRECTION,
|
||||
COUNTER_FUNCTION_QUADRATURE_X1_A,
|
||||
COUNTER_FUNCTION_QUADRATURE_X1_B,
|
||||
COUNTER_FUNCTION_QUADRATURE_X2_A,
|
||||
COUNTER_FUNCTION_QUADRATURE_X2_B,
|
||||
COUNTER_FUNCTION_QUADRATURE_X4
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -192,7 +192,7 @@ struct counter_count {
|
||||
const char *name;
|
||||
|
||||
size_t function;
|
||||
const enum counter_count_function *functions_list;
|
||||
const enum counter_function *functions_list;
|
||||
size_t num_functions;
|
||||
|
||||
struct counter_synapse *synapses;
|
||||
@@ -290,16 +290,16 @@ struct counter_device_state {
|
||||
const struct attribute_group **groups;
|
||||
};
|
||||
|
||||
enum counter_signal_value {
|
||||
COUNTER_SIGNAL_LOW = 0,
|
||||
COUNTER_SIGNAL_HIGH
|
||||
enum counter_signal_level {
|
||||
COUNTER_SIGNAL_LEVEL_LOW,
|
||||
COUNTER_SIGNAL_LEVEL_HIGH,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct counter_ops - Callbacks from driver
|
||||
* @signal_read: optional read callback for Signal attribute. The read
|
||||
* value of the respective Signal should be passed back via
|
||||
* the val parameter.
|
||||
* level of the respective Signal should be passed back via
|
||||
* the level parameter.
|
||||
* @count_read: optional read callback for Count attribute. The read
|
||||
* value of the respective Count should be passed back via
|
||||
* the val parameter.
|
||||
@@ -324,7 +324,7 @@ enum counter_signal_value {
|
||||
struct counter_ops {
|
||||
int (*signal_read)(struct counter_device *counter,
|
||||
struct counter_signal *signal,
|
||||
enum counter_signal_value *val);
|
||||
enum counter_signal_level *level);
|
||||
int (*count_read)(struct counter_device *counter,
|
||||
struct counter_count *count, unsigned long *val);
|
||||
int (*count_write)(struct counter_device *counter,
|
||||
|
||||
@@ -143,12 +143,6 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
|
||||
static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
|
||||
#endif /* !CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/* Wrappers which go away once all code is converted */
|
||||
static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
|
||||
static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
|
||||
static inline void get_online_cpus(void) { cpus_read_lock(); }
|
||||
static inline void put_online_cpus(void) { cpus_read_unlock(); }
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP_SMP
|
||||
extern int freeze_secondary_cpus(int primary);
|
||||
extern void thaw_secondary_cpus(void);
|
||||
|
||||
@@ -9,10 +9,14 @@
|
||||
#define _LINUX_CPUFREQ_H
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sysfs.h>
|
||||
@@ -365,14 +369,17 @@ struct cpufreq_driver {
|
||||
int (*suspend)(struct cpufreq_policy *policy);
|
||||
int (*resume)(struct cpufreq_policy *policy);
|
||||
|
||||
/* Will be called after the driver is fully initialized */
|
||||
void (*ready)(struct cpufreq_policy *policy);
|
||||
|
||||
struct freq_attr **attr;
|
||||
|
||||
/* platform specific boost support code */
|
||||
bool boost_enabled;
|
||||
int (*set_boost)(struct cpufreq_policy *policy, int state);
|
||||
|
||||
/*
|
||||
* Set by drivers that want to register with the energy model after the
|
||||
* policy is properly initialized, but before the governor is started.
|
||||
*/
|
||||
void (*register_em)(struct cpufreq_policy *policy);
|
||||
};
|
||||
|
||||
/* flags */
|
||||
@@ -995,6 +1002,55 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline int parse_perf_domain(int cpu, const char *list_name,
|
||||
const char *cell_name)
|
||||
{
|
||||
struct device_node *cpu_np;
|
||||
struct of_phandle_args args;
|
||||
int ret;
|
||||
|
||||
cpu_np = of_cpu_device_node_get(cpu);
|
||||
if (!cpu_np)
|
||||
return -ENODEV;
|
||||
|
||||
ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
|
||||
&args);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
of_node_put(cpu_np);
|
||||
|
||||
return args.args[0];
|
||||
}
|
||||
|
||||
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
|
||||
const char *cell_name, struct cpumask *cpumask)
|
||||
{
|
||||
int target_idx;
|
||||
int cpu, ret;
|
||||
|
||||
ret = parse_perf_domain(pcpu, list_name, cell_name);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
target_idx = ret;
|
||||
cpumask_set_cpu(pcpu, cpumask);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == pcpu)
|
||||
continue;
|
||||
|
||||
ret = parse_perf_domain(pcpu, list_name, cell_name);
|
||||
if (ret < 0)
|
||||
continue;
|
||||
|
||||
if (target_idx == ret)
|
||||
cpumask_set_cpu(cpu, cpumask);
|
||||
}
|
||||
|
||||
return target_idx;
|
||||
}
|
||||
#else
|
||||
static inline int cpufreq_boost_trigger_state(int state)
|
||||
{
|
||||
@@ -1014,6 +1070,12 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
|
||||
const char *cell_name, struct cpumask *cpumask)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
||||
@@ -1035,7 +1097,6 @@ void arch_set_freq_scale(const struct cpumask *cpus,
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* the following are really really optional */
|
||||
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
|
||||
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
|
||||
@@ -1046,4 +1107,10 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
|
||||
void cpufreq_generic_init(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table,
|
||||
unsigned int transition_latency);
|
||||
|
||||
static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
|
||||
{
|
||||
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
|
||||
policy->related_cpus);
|
||||
}
|
||||
#endif /* _LINUX_CPUFREQ_H */
|
||||
|
||||
@@ -22,8 +22,42 @@
|
||||
* AP_ACTIVE AP_ACTIVE
|
||||
*/
|
||||
|
||||
/*
|
||||
* CPU hotplug states. The state machine invokes the installed state
|
||||
* startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
|
||||
* during a CPU online operation. During a CPU offline operation the
|
||||
* installed teardown callbacks are invoked in the reverse order from
|
||||
* CPU_ONLINE - 1 down to CPUHP_OFFLINE.
|
||||
*
|
||||
* The state space has three sections: PREPARE, STARTING and ONLINE.
|
||||
*
|
||||
* PREPARE: The callbacks are invoked on a control CPU before the
|
||||
* hotplugged CPU is started up or after the hotplugged CPU has died.
|
||||
*
|
||||
* STARTING: The callbacks are invoked on the hotplugged CPU from the low level
|
||||
* hotplug startup/teardown code with interrupts disabled.
|
||||
*
|
||||
* ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU
|
||||
* hotplug thread with interrupts and preemption enabled.
|
||||
*
|
||||
* Adding explicit states to this enum is only necessary when:
|
||||
*
|
||||
* 1) The state is within the STARTING section
|
||||
*
|
||||
* 2) The state has ordering constraints vs. other states in the
|
||||
* same section.
|
||||
*
|
||||
* If neither #1 nor #2 apply, please use the dynamic state space when
|
||||
* setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE
|
||||
* for the @state argument of the setup function.
|
||||
*
|
||||
* See Documentation/core-api/cpu_hotplug.rst for further information and
|
||||
* examples.
|
||||
*/
|
||||
enum cpuhp_state {
|
||||
CPUHP_INVALID = -1,
|
||||
|
||||
/* PREPARE section invoked on a control CPU */
|
||||
CPUHP_OFFLINE = 0,
|
||||
CPUHP_CREATE_THREADS,
|
||||
CPUHP_PERF_PREPARE,
|
||||
@@ -38,6 +72,8 @@ enum cpuhp_state {
|
||||
CPUHP_SLUB_DEAD,
|
||||
CPUHP_DEBUG_OBJ_DEAD,
|
||||
CPUHP_MM_WRITEBACK_DEAD,
|
||||
/* Must be after CPUHP_MM_VMSTAT_DEAD */
|
||||
CPUHP_MM_DEMOTION_DEAD,
|
||||
CPUHP_MM_VMSTAT_DEAD,
|
||||
CPUHP_SOFTIRQ_DEAD,
|
||||
CPUHP_NET_MVNETA_DEAD,
|
||||
@@ -46,12 +82,14 @@ enum cpuhp_state {
|
||||
CPUHP_ARM_OMAP_WAKE_DEAD,
|
||||
CPUHP_IRQ_POLL_DEAD,
|
||||
CPUHP_BLOCK_SOFTIRQ_DEAD,
|
||||
CPUHP_BIO_DEAD,
|
||||
CPUHP_ACPI_CPUDRV_DEAD,
|
||||
CPUHP_S390_PFAULT_DEAD,
|
||||
CPUHP_BLK_MQ_DEAD,
|
||||
CPUHP_FS_BUFF_DEAD,
|
||||
CPUHP_PRINTK_DEAD,
|
||||
CPUHP_MM_MEMCQ_DEAD,
|
||||
CPUHP_XFS_DEAD,
|
||||
CPUHP_PERCPU_CNT_DEAD,
|
||||
CPUHP_RADIX_DEAD,
|
||||
CPUHP_PAGE_ALLOC,
|
||||
@@ -93,6 +131,11 @@ enum cpuhp_state {
|
||||
CPUHP_BP_PREPARE_DYN,
|
||||
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
|
||||
CPUHP_BRINGUP_CPU,
|
||||
|
||||
/*
|
||||
* STARTING section invoked on the hotplugged CPU in low level
|
||||
* bringup and teardown code.
|
||||
*/
|
||||
CPUHP_AP_IDLE_DEAD,
|
||||
CPUHP_AP_OFFLINE,
|
||||
CPUHP_AP_SCHED_STARTING,
|
||||
@@ -153,6 +196,8 @@ enum cpuhp_state {
|
||||
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
|
||||
CPUHP_AP_ONLINE,
|
||||
CPUHP_TEARDOWN_CPU,
|
||||
|
||||
/* Online section invoked on the hotplugged CPU from the hotplug thread */
|
||||
CPUHP_AP_ONLINE_IDLE,
|
||||
CPUHP_AP_SCHED_WAIT_EMPTY,
|
||||
CPUHP_AP_SMPBOOT_THREADS,
|
||||
@@ -197,6 +242,8 @@ enum cpuhp_state {
|
||||
CPUHP_AP_BASE_CACHEINFO_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
||||
/* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
|
||||
CPUHP_AP_MM_DEMOTION_ONLINE,
|
||||
CPUHP_AP_X86_HPET_ONLINE,
|
||||
CPUHP_AP_X86_KVM_CLK_ONLINE,
|
||||
CPUHP_AP_DTPM_CPU_ONLINE,
|
||||
@@ -214,14 +261,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
|
||||
int (*teardown)(unsigned int cpu),
|
||||
bool multi_instance);
|
||||
/**
|
||||
* cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
|
||||
* cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup
|
||||
* callback
|
||||
* @state: The state for which the calls are installed
|
||||
* @name: Name of the callback (will be used in debug output)
|
||||
* @startup: startup callback function
|
||||
* @teardown: teardown callback function
|
||||
* @startup: startup callback function or NULL if not required
|
||||
* @teardown: teardown callback function or NULL if not required
|
||||
*
|
||||
* Installs the callback functions and invokes the startup callback on
|
||||
* the present cpus which have already reached the @state.
|
||||
* Installs the callback functions and invokes the @startup callback on
|
||||
* the online cpus which have already reached the @state.
|
||||
*/
|
||||
static inline int cpuhp_setup_state(enum cpuhp_state state,
|
||||
const char *name,
|
||||
@@ -231,6 +279,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
|
||||
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling
|
||||
* @startup callback from a cpus_read_lock()
|
||||
* held region
|
||||
* @state: The state for which the calls are installed
|
||||
* @name: Name of the callback (will be used in debug output)
|
||||
* @startup: startup callback function or NULL if not required
|
||||
* @teardown: teardown callback function or NULL if not required
|
||||
*
|
||||
* Same as cpuhp_setup_state() except that it must be invoked from within a
|
||||
* cpus_read_lock() held region.
|
||||
*/
|
||||
static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
|
||||
const char *name,
|
||||
int (*startup)(unsigned int cpu),
|
||||
@@ -242,14 +302,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
|
||||
|
||||
/**
|
||||
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
|
||||
* callbacks
|
||||
* @startup callback
|
||||
* @state: The state for which the calls are installed
|
||||
* @name: Name of the callback.
|
||||
* @startup: startup callback function
|
||||
* @teardown: teardown callback function
|
||||
* @startup: startup callback function or NULL if not required
|
||||
* @teardown: teardown callback function or NULL if not required
|
||||
*
|
||||
* Same as @cpuhp_setup_state except that no calls are executed are invoked
|
||||
* during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
|
||||
* Same as cpuhp_setup_state() except that the @startup callback is not
|
||||
* invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n.
|
||||
*/
|
||||
static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
|
||||
const char *name,
|
||||
@@ -260,6 +320,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
|
||||
false);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without
|
||||
* invoking the @startup callback from
|
||||
* a cpus_read_lock() held region
|
||||
* callbacks
|
||||
* @state: The state for which the calls are installed
|
||||
* @name: Name of the callback.
|
||||
* @startup: startup callback function or NULL if not required
|
||||
* @teardown: teardown callback function or NULL if not required
|
||||
*
|
||||
* Same as cpuhp_setup_state_nocalls() except that it must be invoked from
|
||||
* within a cpus_read_lock() held region.
|
||||
*/
|
||||
static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
|
||||
const char *name,
|
||||
int (*startup)(unsigned int cpu),
|
||||
@@ -273,13 +346,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
|
||||
* cpuhp_setup_state_multi - Add callbacks for multi state
|
||||
* @state: The state for which the calls are installed
|
||||
* @name: Name of the callback.
|
||||
* @startup: startup callback function
|
||||
* @teardown: teardown callback function
|
||||
* @startup: startup callback function or NULL if not required
|
||||
* @teardown: teardown callback function or NULL if not required
|
||||
*
|
||||
* Sets the internal multi_instance flag and prepares a state to work as a multi
|
||||
* instance callback. No callbacks are invoked at this point. The callbacks are
|
||||
* invoked once an instance for this state are registered via
|
||||
* @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
|
||||
* cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls()
|
||||
*/
|
||||
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
|
||||
const char *name,
|
||||
@@ -304,9 +377,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
|
||||
* @state: The state for which the instance is installed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
* Installs the instance for the @state and invokes the startup callback on
|
||||
* the present cpus which have already reached the @state. The @state must have
|
||||
* been earlier marked as multi-instance by @cpuhp_setup_state_multi.
|
||||
* Installs the instance for the @state and invokes the registered startup
|
||||
* callback on the online cpus which have already reached the @state. The
|
||||
* @state must have been earlier marked as multi-instance by
|
||||
* cpuhp_setup_state_multi().
|
||||
*/
|
||||
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
|
||||
struct hlist_node *node)
|
||||
@@ -320,8 +394,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state,
|
||||
* @state: The state for which the instance is installed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
* Installs the instance for the @state The @state must have been earlier
|
||||
* marked as multi-instance by @cpuhp_setup_state_multi.
|
||||
* Installs the instance for the @state. The @state must have been earlier
|
||||
* marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or
|
||||
* HOTPLUG_CPU=n.
|
||||
*/
|
||||
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
|
||||
struct hlist_node *node)
|
||||
@@ -329,6 +404,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
|
||||
return __cpuhp_state_add_instance(state, node, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state
|
||||
* without invoking the startup
|
||||
* callback from a cpus_read_lock()
|
||||
* held region.
|
||||
* @state: The state for which the instance is installed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
* Same as cpuhp_state_add_instance_nocalls() except that it must be
|
||||
* invoked from within a cpus_read_lock() held region.
|
||||
*/
|
||||
static inline int
|
||||
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
|
||||
struct hlist_node *node)
|
||||
@@ -344,7 +430,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
|
||||
* @state: The state for which the calls are removed
|
||||
*
|
||||
* Removes the callback functions and invokes the teardown callback on
|
||||
* the present cpus which have already reached the @state.
|
||||
* the online cpus which have already reached the @state.
|
||||
*/
|
||||
static inline void cpuhp_remove_state(enum cpuhp_state state)
|
||||
{
|
||||
@@ -353,7 +439,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state)
|
||||
|
||||
/**
|
||||
* cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
|
||||
* teardown
|
||||
* the teardown callback
|
||||
* @state: The state for which the calls are removed
|
||||
*/
|
||||
static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
|
||||
@@ -361,6 +447,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
|
||||
__cpuhp_remove_state(state, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking
|
||||
* teardown from a cpus_read_lock() held region.
|
||||
* @state: The state for which the calls are removed
|
||||
*
|
||||
* Same as cpuhp_remove_state nocalls() except that it must be invoked
|
||||
* from within a cpus_read_lock() held region.
|
||||
*/
|
||||
static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
|
||||
{
|
||||
__cpuhp_remove_state_cpuslocked(state, false);
|
||||
@@ -388,8 +482,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
|
||||
* @state: The state from which the instance is removed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
* Removes the instance and invokes the teardown callback on the present cpus
|
||||
* which have already reached the @state.
|
||||
* Removes the instance and invokes the teardown callback on the online cpus
|
||||
* which have already reached @state.
|
||||
*/
|
||||
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
|
||||
struct hlist_node *node)
|
||||
@@ -399,7 +493,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
|
||||
|
||||
/**
|
||||
* cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
|
||||
* without invoking the reatdown callback
|
||||
* without invoking the teardown callback
|
||||
* @state: The state from which the instance is removed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
|
||||
@@ -983,6 +983,45 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
|
||||
nr_cpu_ids);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
|
||||
* hex values of cpumask
|
||||
*
|
||||
* @buf: the buffer to copy into
|
||||
* @mask: the cpumask to copy
|
||||
* @off: in the string from which we are copying, we copy to @buf
|
||||
* @count: the maximum number of bytes to print
|
||||
*
|
||||
* The function prints the cpumask into the buffer as hex values of
|
||||
* cpumask; Typically used by bin_attribute to export cpumask bitmask
|
||||
* ABI.
|
||||
*
|
||||
* Returns the length of how many bytes have been copied, excluding
|
||||
* terminating '\0'.
|
||||
*/
|
||||
static inline ssize_t
|
||||
cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
|
||||
nr_cpu_ids, off, count) - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpumap_print_list_to_buf - copies the cpumask into the buffer as
|
||||
* comma-separated list of cpus
|
||||
*
|
||||
* Everything is same with the above cpumap_print_bitmask_to_buf()
|
||||
* except the print format.
|
||||
*/
|
||||
static inline ssize_t
|
||||
cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
|
||||
nr_cpu_ids, off, count) - 1;
|
||||
}
|
||||
|
||||
#if NR_CPUS <= BITS_PER_LONG
|
||||
#define CPU_MASK_ALL \
|
||||
(cpumask_t) { { \
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#ifdef CONFIG_CPUSETS
|
||||
@@ -58,7 +59,7 @@ extern void cpuset_wait_for_hotplug(void);
|
||||
extern void cpuset_read_lock(void);
|
||||
extern void cpuset_read_unlock(void);
|
||||
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
|
||||
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
|
||||
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
|
||||
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
|
||||
#define cpuset_current_mems_allowed (current->mems_allowed)
|
||||
void cpuset_init_current_mems_allowed(void);
|
||||
@@ -184,11 +185,12 @@ static inline void cpuset_read_unlock(void) { }
|
||||
static inline void cpuset_cpus_allowed(struct task_struct *p,
|
||||
struct cpumask *mask)
|
||||
{
|
||||
cpumask_copy(mask, cpu_possible_mask);
|
||||
cpumask_copy(mask, task_cpu_possible_mask(p));
|
||||
}
|
||||
|
||||
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
|
||||
static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
|
||||
|
||||
@@ -10,13 +10,14 @@
|
||||
|
||||
#include <linux/pgtable.h> /* for pgprot_t */
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
/* For IS_ENABLED(CONFIG_CRASH_DUMP) */
|
||||
#define ELFCORE_ADDR_MAX (-1ULL)
|
||||
#define ELFCORE_ADDR_ERR (-2ULL)
|
||||
|
||||
extern unsigned long long elfcorehdr_addr;
|
||||
extern unsigned long long elfcorehdr_size;
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
|
||||
extern void elfcorehdr_free(unsigned long long addr);
|
||||
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
|
||||
|
||||
268
include/linux/damon.h
Normal file
268
include/linux/damon.h
Normal file
@@ -0,0 +1,268 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* DAMON api
|
||||
*
|
||||
* Author: SeongJae Park <sjpark@amazon.de>
|
||||
*/
|
||||
|
||||
#ifndef _DAMON_H_
|
||||
#define _DAMON_H_
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/time64.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Minimal region size. Every damon_region is aligned by this. */
|
||||
#define DAMON_MIN_REGION PAGE_SIZE
|
||||
|
||||
/**
|
||||
* struct damon_addr_range - Represents an address region of [@start, @end).
|
||||
* @start: Start address of the region (inclusive).
|
||||
* @end: End address of the region (exclusive).
|
||||
*/
|
||||
struct damon_addr_range {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct damon_region - Represents a monitoring target region.
|
||||
* @ar: The address range of the region.
|
||||
* @sampling_addr: Address of the sample for the next access check.
|
||||
* @nr_accesses: Access frequency of this region.
|
||||
* @list: List head for siblings.
|
||||
*/
|
||||
struct damon_region {
|
||||
struct damon_addr_range ar;
|
||||
unsigned long sampling_addr;
|
||||
unsigned int nr_accesses;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct damon_target - Represents a monitoring target.
|
||||
* @id: Unique identifier for this target.
|
||||
* @nr_regions: Number of monitoring target regions of this target.
|
||||
* @regions_list: Head of the monitoring target regions of this target.
|
||||
* @list: List head for siblings.
|
||||
*
|
||||
* Each monitoring context could have multiple targets. For example, a context
|
||||
* for virtual memory address spaces could have multiple target processes. The
|
||||
* @id of each target should be unique among the targets of the context. For
|
||||
* example, in the virtual address monitoring context, it could be a pidfd or
|
||||
* an address of an mm_struct.
|
||||
*/
|
||||
struct damon_target {
|
||||
unsigned long id;
|
||||
unsigned int nr_regions;
|
||||
struct list_head regions_list;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct damon_ctx;
|
||||
|
||||
/**
|
||||
* struct damon_primitive Monitoring primitives for given use cases.
|
||||
*
|
||||
* @init: Initialize primitive-internal data structures.
|
||||
* @update: Update primitive-internal data structures.
|
||||
* @prepare_access_checks: Prepare next access check of target regions.
|
||||
* @check_accesses: Check the accesses to target regions.
|
||||
* @reset_aggregated: Reset aggregated accesses monitoring results.
|
||||
* @target_valid: Determine if the target is valid.
|
||||
* @cleanup: Clean up the context.
|
||||
*
|
||||
* DAMON can be extended for various address spaces and usages. For this,
|
||||
* users should register the low level primitives for their target address
|
||||
* space and usecase via the &damon_ctx.primitive. Then, the monitoring thread
|
||||
* (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
|
||||
* the monitoring, @update after each &damon_ctx.primitive_update_interval, and
|
||||
* @check_accesses, @target_valid and @prepare_access_checks after each
|
||||
* &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each
|
||||
* &damon_ctx.aggr_interval.
|
||||
*
|
||||
* @init should initialize primitive-internal data structures. For example,
|
||||
* this could be used to construct proper monitoring target regions and link
|
||||
* those to @damon_ctx.adaptive_targets.
|
||||
* @update should update the primitive-internal data structures. For example,
|
||||
* this could be used to update monitoring target regions for current status.
|
||||
* @prepare_access_checks should manipulate the monitoring regions to be
|
||||
* prepared for the next access check.
|
||||
* @check_accesses should check the accesses to each region that made after the
|
||||
* last preparation and update the number of observed accesses of each region.
|
||||
* It should also return max number of observed accesses that made as a result
|
||||
* of its update. The value will be used for regions adjustment threshold.
|
||||
* @reset_aggregated should reset the access monitoring results that aggregated
|
||||
* by @check_accesses.
|
||||
* @target_valid should check whether the target is still valid for the
|
||||
* monitoring.
|
||||
* @cleanup is called from @kdamond just before its termination.
|
||||
*/
|
||||
struct damon_primitive {
|
||||
void (*init)(struct damon_ctx *context);
|
||||
void (*update)(struct damon_ctx *context);
|
||||
void (*prepare_access_checks)(struct damon_ctx *context);
|
||||
unsigned int (*check_accesses)(struct damon_ctx *context);
|
||||
void (*reset_aggregated)(struct damon_ctx *context);
|
||||
bool (*target_valid)(void *target);
|
||||
void (*cleanup)(struct damon_ctx *context);
|
||||
};
|
||||
|
||||
/*
|
||||
* struct damon_callback Monitoring events notification callbacks.
|
||||
*
|
||||
* @before_start: Called before starting the monitoring.
|
||||
* @after_sampling: Called after each sampling.
|
||||
* @after_aggregation: Called after each aggregation.
|
||||
* @before_terminate: Called before terminating the monitoring.
|
||||
* @private: User private data.
|
||||
*
|
||||
* The monitoring thread (&damon_ctx.kdamond) calls @before_start and
|
||||
* @before_terminate just before starting and finishing the monitoring,
|
||||
* respectively. Therefore, those are good places for installing and cleaning
|
||||
* @private.
|
||||
*
|
||||
* The monitoring thread calls @after_sampling and @after_aggregation for each
|
||||
* of the sampling intervals and aggregation intervals, respectively.
|
||||
* Therefore, users can safely access the monitoring results without additional
|
||||
* protection. For the reason, users are recommended to use these callback for
|
||||
* the accesses to the results.
|
||||
*
|
||||
* If any callback returns non-zero, monitoring stops.
|
||||
*/
|
||||
struct damon_callback {
|
||||
void *private;
|
||||
|
||||
int (*before_start)(struct damon_ctx *context);
|
||||
int (*after_sampling)(struct damon_ctx *context);
|
||||
int (*after_aggregation)(struct damon_ctx *context);
|
||||
int (*before_terminate)(struct damon_ctx *context);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct damon_ctx - Represents a context for each monitoring. This is the
|
||||
* main interface that allows users to set the attributes and get the results
|
||||
* of the monitoring.
|
||||
*
|
||||
* @sample_interval: The time between access samplings.
|
||||
* @aggr_interval: The time between monitor results aggregations.
|
||||
* @primitive_update_interval: The time between monitoring primitive updates.
|
||||
*
|
||||
* For each @sample_interval, DAMON checks whether each region is accessed or
|
||||
* not. It aggregates and keeps the access information (number of accesses to
|
||||
* each region) for @aggr_interval time. DAMON also checks whether the target
|
||||
* memory regions need update (e.g., by ``mmap()`` calls from the application,
|
||||
* in case of virtual memory monitoring) and applies the changes for each
|
||||
* @primitive_update_interval. All time intervals are in micro-seconds.
|
||||
* Please refer to &struct damon_primitive and &struct damon_callback for more
|
||||
* detail.
|
||||
*
|
||||
* @kdamond: Kernel thread who does the monitoring.
|
||||
* @kdamond_stop: Notifies whether kdamond should stop.
|
||||
* @kdamond_lock: Mutex for the synchronizations with @kdamond.
|
||||
*
|
||||
* For each monitoring context, one kernel thread for the monitoring is
|
||||
* created. The pointer to the thread is stored in @kdamond.
|
||||
*
|
||||
* Once started, the monitoring thread runs until explicitly required to be
|
||||
* terminated or every monitoring target is invalid. The validity of the
|
||||
* targets is checked via the &damon_primitive.target_valid of @primitive. The
|
||||
* termination can also be explicitly requested by writing non-zero to
|
||||
* @kdamond_stop. The thread sets @kdamond to NULL when it terminates.
|
||||
* Therefore, users can know whether the monitoring is ongoing or terminated by
|
||||
* reading @kdamond. Reads and writes to @kdamond and @kdamond_stop from
|
||||
* outside of the monitoring thread must be protected by @kdamond_lock.
|
||||
*
|
||||
* Note that the monitoring thread protects only @kdamond and @kdamond_stop via
|
||||
* @kdamond_lock. Accesses to other fields must be protected by themselves.
|
||||
*
|
||||
* @primitive: Set of monitoring primitives for given use cases.
|
||||
* @callback: Set of callbacks for monitoring events notifications.
|
||||
*
|
||||
* @min_nr_regions: The minimum number of adaptive monitoring regions.
|
||||
* @max_nr_regions: The maximum number of adaptive monitoring regions.
|
||||
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
|
||||
*/
|
||||
struct damon_ctx {
|
||||
unsigned long sample_interval;
|
||||
unsigned long aggr_interval;
|
||||
unsigned long primitive_update_interval;
|
||||
|
||||
/* private: internal use only */
|
||||
struct timespec64 last_aggregation;
|
||||
struct timespec64 last_primitive_update;
|
||||
|
||||
/* public: */
|
||||
struct task_struct *kdamond;
|
||||
bool kdamond_stop;
|
||||
struct mutex kdamond_lock;
|
||||
|
||||
struct damon_primitive primitive;
|
||||
struct damon_callback callback;
|
||||
|
||||
unsigned long min_nr_regions;
|
||||
unsigned long max_nr_regions;
|
||||
struct list_head adaptive_targets;
|
||||
};
|
||||
|
||||
#define damon_next_region(r) \
|
||||
(container_of(r->list.next, struct damon_region, list))
|
||||
|
||||
#define damon_prev_region(r) \
|
||||
(container_of(r->list.prev, struct damon_region, list))
|
||||
|
||||
#define damon_for_each_region(r, t) \
|
||||
list_for_each_entry(r, &t->regions_list, list)
|
||||
|
||||
#define damon_for_each_region_safe(r, next, t) \
|
||||
list_for_each_entry_safe(r, next, &t->regions_list, list)
|
||||
|
||||
#define damon_for_each_target(t, ctx) \
|
||||
list_for_each_entry(t, &(ctx)->adaptive_targets, list)
|
||||
|
||||
#define damon_for_each_target_safe(t, next, ctx) \
|
||||
list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
|
||||
|
||||
#ifdef CONFIG_DAMON
|
||||
|
||||
struct damon_region *damon_new_region(unsigned long start, unsigned long end);
|
||||
inline void damon_insert_region(struct damon_region *r,
|
||||
struct damon_region *prev, struct damon_region *next,
|
||||
struct damon_target *t);
|
||||
void damon_add_region(struct damon_region *r, struct damon_target *t);
|
||||
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
|
||||
|
||||
struct damon_target *damon_new_target(unsigned long id);
|
||||
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
|
||||
void damon_free_target(struct damon_target *t);
|
||||
void damon_destroy_target(struct damon_target *t);
|
||||
unsigned int damon_nr_regions(struct damon_target *t);
|
||||
|
||||
struct damon_ctx *damon_new_ctx(void);
|
||||
void damon_destroy_ctx(struct damon_ctx *ctx);
|
||||
int damon_set_targets(struct damon_ctx *ctx,
|
||||
unsigned long *ids, ssize_t nr_ids);
|
||||
int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
|
||||
unsigned long aggr_int, unsigned long primitive_upd_int,
|
||||
unsigned long min_nr_reg, unsigned long max_nr_reg);
|
||||
int damon_nr_running_ctxs(void);
|
||||
|
||||
int damon_start(struct damon_ctx **ctxs, int nr_ctxs);
|
||||
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
|
||||
|
||||
#endif /* CONFIG_DAMON */
|
||||
|
||||
#ifdef CONFIG_DAMON_VADDR
|
||||
|
||||
/* Monitoring primitives for virtual memory address spaces */
|
||||
void damon_va_init(struct damon_ctx *ctx);
|
||||
void damon_va_update(struct damon_ctx *ctx);
|
||||
void damon_va_prepare_access_checks(struct damon_ctx *ctx);
|
||||
unsigned int damon_va_check_accesses(struct damon_ctx *ctx);
|
||||
bool damon_va_target_valid(void *t);
|
||||
void damon_va_cleanup(struct damon_ctx *ctx);
|
||||
void damon_va_set_primitives(struct damon_ctx *ctx);
|
||||
|
||||
#endif /* CONFIG_DAMON_VADDR */
|
||||
|
||||
#endif /* _DAMON_H */
|
||||
@@ -41,7 +41,6 @@ struct dax_operations {
|
||||
extern struct attribute_group dax_attribute_group;
|
||||
|
||||
#if IS_ENABLED(CONFIG_DAX)
|
||||
struct dax_device *dax_get_by_host(const char *host);
|
||||
struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops, unsigned long flags);
|
||||
void put_dax(struct dax_device *dax_dev);
|
||||
@@ -58,8 +57,6 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev)
|
||||
{
|
||||
__set_dax_synchronous(dax_dev);
|
||||
}
|
||||
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
||||
int blocksize, sector_t start, sector_t len);
|
||||
/*
|
||||
* Check if given mapping is supported by the file / underlying device.
|
||||
*/
|
||||
@@ -73,10 +70,6 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
|
||||
return dax_synchronous(dax_dev);
|
||||
}
|
||||
#else
|
||||
static inline struct dax_device *dax_get_by_host(const char *host)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops, unsigned long flags)
|
||||
{
|
||||
@@ -106,12 +99,6 @@ static inline bool dax_synchronous(struct dax_device *dax_dev)
|
||||
static inline void set_dax_synchronous(struct dax_device *dax_dev)
|
||||
{
|
||||
}
|
||||
static inline bool dax_supported(struct dax_device *dax_dev,
|
||||
struct block_device *bdev, int blocksize, sector_t start,
|
||||
sector_t len)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
|
||||
struct dax_device *dax_dev)
|
||||
{
|
||||
@@ -122,22 +109,12 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
|
||||
struct writeback_control;
|
||||
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
||||
#if IS_ENABLED(CONFIG_FS_DAX)
|
||||
bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
|
||||
static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||
{
|
||||
return __bdev_dax_supported(bdev, blocksize);
|
||||
}
|
||||
|
||||
bool __generic_fsdax_supported(struct dax_device *dax_dev,
|
||||
bool generic_fsdax_supported(struct dax_device *dax_dev,
|
||||
struct block_device *bdev, int blocksize, sector_t start,
|
||||
sector_t sectors);
|
||||
static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
|
||||
struct block_device *bdev, int blocksize, sector_t start,
|
||||
sector_t sectors)
|
||||
{
|
||||
return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
|
||||
sectors);
|
||||
}
|
||||
|
||||
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
||||
int blocksize, sector_t start, sector_t len);
|
||||
|
||||
static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
@@ -153,15 +130,11 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t st
|
||||
dax_entry_t dax_lock_page(struct page *page);
|
||||
void dax_unlock_page(struct page *page, dax_entry_t cookie);
|
||||
#else
|
||||
static inline bool bdev_dax_supported(struct block_device *bdev,
|
||||
int blocksize)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#define generic_fsdax_supported NULL
|
||||
|
||||
static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
|
||||
static inline bool dax_supported(struct dax_device *dax_dev,
|
||||
struct block_device *bdev, int blocksize, sector_t start,
|
||||
sector_t sectors)
|
||||
sector_t len)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
#define __LINUX_DEBUG_LOCKING_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
|
||||
@@ -38,8 +38,8 @@ __printf(3, 4) __cold
|
||||
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
|
||||
|
||||
__printf(3, 4) __cold
|
||||
void dev_printk(const char *level, const struct device *dev,
|
||||
const char *fmt, ...);
|
||||
void _dev_printk(const char *level, const struct device *dev,
|
||||
const char *fmt, ...);
|
||||
__printf(2, 3) __cold
|
||||
void _dev_emerg(const struct device *dev, const char *fmt, ...);
|
||||
__printf(2, 3) __cold
|
||||
@@ -69,7 +69,7 @@ static inline void __dev_printk(const char *level, const struct device *dev,
|
||||
struct va_format *vaf)
|
||||
{}
|
||||
static inline __printf(3, 4)
|
||||
void dev_printk(const char *level, const struct device *dev,
|
||||
void _dev_printk(const char *level, const struct device *dev,
|
||||
const char *fmt, ...)
|
||||
{}
|
||||
|
||||
@@ -97,25 +97,57 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Need to take variadic arguments even though we don't use them, as dev_fmt()
|
||||
* may only just have been expanded and may result in multiple arguments.
|
||||
*/
|
||||
#define dev_printk_index_emit(level, fmt, ...) \
|
||||
printk_index_subsys_emit("%s %s: ", level, fmt)
|
||||
|
||||
#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \
|
||||
({ \
|
||||
dev_printk_index_emit(level, fmt); \
|
||||
_p_func(dev, fmt, ##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
/*
|
||||
* Some callsites directly call dev_printk rather than going through the
|
||||
* dev_<level> infrastructure, so we need to emit here as well as inside those
|
||||
* level-specific macros. Only one index entry will be produced, either way,
|
||||
* since dev_printk's `fmt` isn't known at compile time if going through the
|
||||
* dev_<level> macros.
|
||||
*
|
||||
* dev_fmt() isn't called for dev_printk when used directly, as it's used by
|
||||
* the dev_<level> macros internally which already have dev_fmt() processed.
|
||||
*
|
||||
* We also can't use dev_printk_index_wrap directly, because we have a separate
|
||||
* level to process.
|
||||
*/
|
||||
#define dev_printk(level, dev, fmt, ...) \
|
||||
({ \
|
||||
dev_printk_index_emit(level, fmt); \
|
||||
_dev_printk(level, dev, fmt, ##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
/*
|
||||
* #defines for all the dev_<level> macros to prefix with whatever
|
||||
* possible use of #define dev_fmt(fmt) ...
|
||||
*/
|
||||
|
||||
#define dev_emerg(dev, fmt, ...) \
|
||||
_dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_crit(dev, fmt, ...) \
|
||||
_dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_alert(dev, fmt, ...) \
|
||||
_dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_err(dev, fmt, ...) \
|
||||
_dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_warn(dev, fmt, ...) \
|
||||
_dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_notice(dev, fmt, ...) \
|
||||
_dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_info(dev, fmt, ...) \
|
||||
_dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_emerg(dev, fmt, ...) \
|
||||
dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_crit(dev, fmt, ...) \
|
||||
dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_alert(dev, fmt, ...) \
|
||||
dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_err(dev, fmt, ...) \
|
||||
dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_warn(dev, fmt, ...) \
|
||||
dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_notice(dev, fmt, ...) \
|
||||
dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
#define dev_info(dev, fmt, ...) \
|
||||
dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__)
|
||||
|
||||
#if defined(CONFIG_DYNAMIC_DEBUG) || \
|
||||
(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
|
||||
|
||||
@@ -31,7 +31,7 @@ enum dm_queue_mode {
|
||||
DM_TYPE_DAX_BIO_BASED = 3,
|
||||
};
|
||||
|
||||
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
|
||||
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
|
||||
|
||||
union map_info {
|
||||
void *ptr;
|
||||
@@ -151,7 +151,6 @@ typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
|
||||
void *addr, size_t bytes, struct iov_iter *i);
|
||||
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
|
||||
size_t nr_pages);
|
||||
#define PAGE_SECTORS (PAGE_SIZE / 512)
|
||||
|
||||
void dm_error(const char *message);
|
||||
|
||||
@@ -603,6 +602,10 @@ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
|
||||
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
|
||||
0 : scnprintf(result + sz, maxlen - sz, x))
|
||||
|
||||
#define DMEMIT_TARGET_NAME_VERSION(y) \
|
||||
DMEMIT("target_name=%s,target_version=%u.%u.%u", \
|
||||
(y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
|
||||
|
||||
/*
|
||||
* Definitions of return values from target end_io function.
|
||||
*/
|
||||
|
||||
@@ -424,6 +424,7 @@ struct dev_links_info {
|
||||
* @dma_pools: Dma pools (if dma'ble device).
|
||||
* @dma_mem: Internal for coherent mem override.
|
||||
* @cma_area: Contiguous memory area for dma allocations
|
||||
* @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
|
||||
* @archdata: For arch-specific additions.
|
||||
* @of_node: Associated device tree node.
|
||||
* @fwnode: Associated device node supplied by platform firmware.
|
||||
@@ -533,6 +534,9 @@ struct device {
|
||||
#ifdef CONFIG_DMA_CMA
|
||||
struct cma *cma_area; /* contiguous memory area for dma
|
||||
allocations */
|
||||
#endif
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
struct io_tlb_mem *dma_io_tlb_mem;
|
||||
#endif
|
||||
/* arch specific additions */
|
||||
struct dev_archdata archdata;
|
||||
|
||||
@@ -91,7 +91,7 @@ struct bus_type {
|
||||
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
|
||||
int (*probe)(struct device *dev);
|
||||
void (*sync_state)(struct device *dev);
|
||||
int (*remove)(struct device *dev);
|
||||
void (*remove)(struct device *dev);
|
||||
void (*shutdown)(struct device *dev);
|
||||
|
||||
int (*online)(struct device *dev);
|
||||
|
||||
@@ -38,6 +38,7 @@ struct dfl_device {
|
||||
int id;
|
||||
u16 type;
|
||||
u16 feature_id;
|
||||
u8 revision;
|
||||
struct resource mmio_res;
|
||||
int *irqs;
|
||||
unsigned int num_irqs;
|
||||
|
||||
@@ -54,7 +54,7 @@ struct dma_buf_ops {
|
||||
* device), and otherwise need to fail the attach operation.
|
||||
*
|
||||
* The exporter should also in general check whether the current
|
||||
* allocation fullfills the DMA constraints of the new device. If this
|
||||
* allocation fulfills the DMA constraints of the new device. If this
|
||||
* is not the case, and the allocation cannot be moved, it should also
|
||||
* fail the attach operation.
|
||||
*
|
||||
@@ -96,6 +96,12 @@ struct dma_buf_ops {
|
||||
* This is called automatically for non-dynamic importers from
|
||||
* dma_buf_attach().
|
||||
*
|
||||
* Note that similar to non-dynamic exporters in their @map_dma_buf
|
||||
* callback the driver must guarantee that the memory is available for
|
||||
* use and cleared of any old data by the time this function returns.
|
||||
* Drivers which pipeline their buffer moves internally must wait for
|
||||
* all moves and clears to complete.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success, negative error code on failure.
|
||||
@@ -144,9 +150,18 @@ struct dma_buf_ops {
|
||||
* This is always called with the dmabuf->resv object locked when
|
||||
* the dynamic_mapping flag is true.
|
||||
*
|
||||
* Note that for non-dynamic exporters the driver must guarantee that
|
||||
* that the memory is available for use and cleared of any old data by
|
||||
* the time this function returns. Drivers which pipeline their buffer
|
||||
* moves internally must wait for all moves and clears to complete.
|
||||
* Dynamic exporters do not need to follow this rule: For non-dynamic
|
||||
* importers the buffer is already pinned through @pin, which has the
|
||||
* same requirements. Dynamic importers otoh are required to obey the
|
||||
* dma_resv fences.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* A &sg_table scatter list of or the backing storage of the DMA buffer,
|
||||
* A &sg_table scatter list of the backing storage of the DMA buffer,
|
||||
* already mapped into the device address space of the &device attached
|
||||
* with the provided &dma_buf_attachment. The addresses and lengths in
|
||||
* the scatter list are PAGE_SIZE aligned.
|
||||
@@ -168,7 +183,7 @@ struct dma_buf_ops {
|
||||
*
|
||||
* This is called by dma_buf_unmap_attachment() and should unmap and
|
||||
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
|
||||
* For static dma_buf handling this might also unpins the backing
|
||||
* For static dma_buf handling this might also unpin the backing
|
||||
* storage if this is the last mapping of the DMA buffer.
|
||||
*/
|
||||
void (*unmap_dma_buf)(struct dma_buf_attachment *,
|
||||
@@ -237,7 +252,7 @@ struct dma_buf_ops {
|
||||
* This callback is used by the dma_buf_mmap() function
|
||||
*
|
||||
* Note that the mapping needs to be incoherent, userspace is expected
|
||||
* to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
|
||||
* to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
|
||||
*
|
||||
* Because dma-buf buffers have invariant size over their lifetime, the
|
||||
* dma-buf core checks whether a vma is too large and rejects such
|
||||
@@ -274,27 +289,6 @@ struct dma_buf_ops {
|
||||
|
||||
/**
|
||||
* struct dma_buf - shared buffer object
|
||||
* @size: size of the buffer; invariant over the lifetime of the buffer.
|
||||
* @file: file pointer used for sharing buffers across, and for refcounting.
|
||||
* @attachments: list of dma_buf_attachment that denotes all devices attached,
|
||||
* protected by dma_resv lock.
|
||||
* @ops: dma_buf_ops associated with this buffer object.
|
||||
* @lock: used internally to serialize list manipulation, attach/detach and
|
||||
* vmap/unmap
|
||||
* @vmapping_counter: used internally to refcnt the vmaps
|
||||
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
|
||||
* @exp_name: name of the exporter; useful for debugging.
|
||||
* @name: userspace-provided name; useful for accounting and debugging,
|
||||
* protected by @resv.
|
||||
* @name_lock: spinlock to protect name access
|
||||
* @owner: pointer to exporter module; used for refcounting when exporter is a
|
||||
* kernel module.
|
||||
* @list_node: node for dma_buf accounting and debugging.
|
||||
* @priv: exporter specific private data for this buffer object.
|
||||
* @resv: reservation object linked to this dma-buf
|
||||
* @poll: for userspace poll support
|
||||
* @cb_excl: for userspace poll support
|
||||
* @cb_shared: for userspace poll support
|
||||
*
|
||||
* This represents a shared buffer, created by calling dma_buf_export(). The
|
||||
* userspace representation is a normal file descriptor, which can be created by
|
||||
@@ -306,30 +300,152 @@ struct dma_buf_ops {
|
||||
* Device DMA access is handled by the separate &struct dma_buf_attachment.
|
||||
*/
|
||||
struct dma_buf {
|
||||
/**
|
||||
* @size:
|
||||
*
|
||||
* Size of the buffer; invariant over the lifetime of the buffer.
|
||||
*/
|
||||
size_t size;
|
||||
|
||||
/**
|
||||
* @file:
|
||||
*
|
||||
* File pointer used for sharing buffers across, and for refcounting.
|
||||
* See dma_buf_get() and dma_buf_put().
|
||||
*/
|
||||
struct file *file;
|
||||
|
||||
/**
|
||||
* @attachments:
|
||||
*
|
||||
* List of dma_buf_attachment that denotes all devices attached,
|
||||
* protected by &dma_resv lock @resv.
|
||||
*/
|
||||
struct list_head attachments;
|
||||
|
||||
/** @ops: dma_buf_ops associated with this buffer object. */
|
||||
const struct dma_buf_ops *ops;
|
||||
|
||||
/**
|
||||
* @lock:
|
||||
*
|
||||
* Used internally to serialize list manipulation, attach/detach and
|
||||
* vmap/unmap. Note that in many cases this is superseeded by
|
||||
* dma_resv_lock() on @resv.
|
||||
*/
|
||||
struct mutex lock;
|
||||
|
||||
/**
|
||||
* @vmapping_counter:
|
||||
*
|
||||
* Used internally to refcnt the vmaps returned by dma_buf_vmap().
|
||||
* Protected by @lock.
|
||||
*/
|
||||
unsigned vmapping_counter;
|
||||
|
||||
/**
|
||||
* @vmap_ptr:
|
||||
* The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
|
||||
*/
|
||||
struct dma_buf_map vmap_ptr;
|
||||
|
||||
/**
|
||||
* @exp_name:
|
||||
*
|
||||
* Name of the exporter; useful for debugging. See the
|
||||
* DMA_BUF_SET_NAME IOCTL.
|
||||
*/
|
||||
const char *exp_name;
|
||||
|
||||
/**
|
||||
* @name:
|
||||
*
|
||||
* Userspace-provided name; useful for accounting and debugging,
|
||||
* protected by dma_resv_lock() on @resv and @name_lock for read access.
|
||||
*/
|
||||
const char *name;
|
||||
|
||||
/** @name_lock: Spinlock to protect name acces for read access. */
|
||||
spinlock_t name_lock;
|
||||
|
||||
/**
|
||||
* @owner:
|
||||
*
|
||||
* Pointer to exporter module; used for refcounting when exporter is a
|
||||
* kernel module.
|
||||
*/
|
||||
struct module *owner;
|
||||
|
||||
/** @list_node: node for dma_buf accounting and debugging. */
|
||||
struct list_head list_node;
|
||||
|
||||
/** @priv: exporter specific private data for this buffer object. */
|
||||
void *priv;
|
||||
|
||||
/**
|
||||
* @resv:
|
||||
*
|
||||
* Reservation object linked to this dma-buf.
|
||||
*
|
||||
* IMPLICIT SYNCHRONIZATION RULES:
|
||||
*
|
||||
* Drivers which support implicit synchronization of buffer access as
|
||||
* e.g. exposed in `Implicit Fence Poll Support`_ must follow the
|
||||
* below rules.
|
||||
*
|
||||
* - Drivers must add a shared fence through dma_resv_add_shared_fence()
|
||||
* for anything the userspace API considers a read access. This highly
|
||||
* depends upon the API and window system.
|
||||
*
|
||||
* - Similarly drivers must set the exclusive fence through
|
||||
* dma_resv_add_excl_fence() for anything the userspace API considers
|
||||
* write access.
|
||||
*
|
||||
* - Drivers may just always set the exclusive fence, since that only
|
||||
* causes unecessarily synchronization, but no correctness issues.
|
||||
*
|
||||
* - Some drivers only expose a synchronous userspace API with no
|
||||
* pipelining across drivers. These do not set any fences for their
|
||||
* access. An example here is v4l.
|
||||
*
|
||||
* DYNAMIC IMPORTER RULES:
|
||||
*
|
||||
* Dynamic importers, see dma_buf_attachment_is_dynamic(), have
|
||||
* additional constraints on how they set up fences:
|
||||
*
|
||||
* - Dynamic importers must obey the exclusive fence and wait for it to
|
||||
* signal before allowing access to the buffer's underlying storage
|
||||
* through the device.
|
||||
*
|
||||
* - Dynamic importers should set fences for any access that they can't
|
||||
* disable immediately from their &dma_buf_attach_ops.move_notify
|
||||
* callback.
|
||||
*/
|
||||
struct dma_resv *resv;
|
||||
|
||||
/* poll support */
|
||||
/** @poll: for userspace poll support */
|
||||
wait_queue_head_t poll;
|
||||
|
||||
/** @cb_excl: for userspace poll support */
|
||||
/** @cb_shared: for userspace poll support */
|
||||
struct dma_buf_poll_cb_t {
|
||||
struct dma_fence_cb cb;
|
||||
wait_queue_head_t *poll;
|
||||
|
||||
__poll_t active;
|
||||
} cb_excl, cb_shared;
|
||||
#ifdef CONFIG_DMABUF_SYSFS_STATS
|
||||
/**
|
||||
* @sysfs_entry:
|
||||
*
|
||||
* For exposing information about this buffer in sysfs. See also
|
||||
* `DMA-BUF statistics`_ for the uapi this enables.
|
||||
*/
|
||||
struct dma_buf_sysfs_entry {
|
||||
struct kobject kobj;
|
||||
struct dma_buf *dmabuf;
|
||||
} *sysfs_entry;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -464,7 +580,7 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
|
||||
|
||||
/**
|
||||
* dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
|
||||
* mappinsg
|
||||
* mappings
|
||||
* @attach: the DMA-buf attachment to check
|
||||
*
|
||||
* Returns true if a DMA-buf importer wants to call the map/unmap functions with
|
||||
|
||||
@@ -12,25 +12,41 @@
|
||||
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* struct dma_fence_chain - fence to represent an node of a fence chain
|
||||
* @base: fence base class
|
||||
* @lock: spinlock for fence handling
|
||||
* @prev: previous fence of the chain
|
||||
* @prev_seqno: original previous seqno before garbage collection
|
||||
* @fence: encapsulated fence
|
||||
* @cb: callback structure for signaling
|
||||
* @work: irq work item for signaling
|
||||
* @lock: spinlock for fence handling
|
||||
*/
|
||||
struct dma_fence_chain {
|
||||
struct dma_fence base;
|
||||
spinlock_t lock;
|
||||
struct dma_fence __rcu *prev;
|
||||
u64 prev_seqno;
|
||||
struct dma_fence *fence;
|
||||
struct dma_fence_cb cb;
|
||||
struct irq_work work;
|
||||
union {
|
||||
/**
|
||||
* @cb: callback for signaling
|
||||
*
|
||||
* This is used to add the callback for signaling the
|
||||
* complection of the fence chain. Never used at the same time
|
||||
* as the irq work.
|
||||
*/
|
||||
struct dma_fence_cb cb;
|
||||
|
||||
/**
|
||||
* @work: irq work item for signaling
|
||||
*
|
||||
* Irq work structure to allow us to add the callback without
|
||||
* running into lock inversion. Never used at the same time as
|
||||
* the callback.
|
||||
*/
|
||||
struct irq_work work;
|
||||
};
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
extern const struct dma_fence_ops dma_fence_chain_ops;
|
||||
@@ -51,6 +67,30 @@ to_dma_fence_chain(struct dma_fence *fence)
|
||||
return container_of(fence, struct dma_fence_chain, base);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_chain_alloc
|
||||
*
|
||||
* Returns a new struct dma_fence_chain object or NULL on failure.
|
||||
*/
|
||||
static inline struct dma_fence_chain *dma_fence_chain_alloc(void)
|
||||
{
|
||||
return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
|
||||
};
|
||||
|
||||
/**
|
||||
* dma_fence_chain_free
|
||||
* @chain: chain node to free
|
||||
*
|
||||
* Frees up an allocated but not used struct dma_fence_chain object. This
|
||||
* doesn't need an RCU grace period since the fence was never initialized nor
|
||||
* published. After dma_fence_chain_init() has been called the fence must be
|
||||
* released by calling dma_fence_put(), and not through this function.
|
||||
*/
|
||||
static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
|
||||
{
|
||||
kfree(chain);
|
||||
};
|
||||
|
||||
/**
|
||||
* dma_fence_chain_for_each - iterate over all fences in chain
|
||||
* @iter: current fence
|
||||
|
||||
@@ -20,6 +20,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain);
|
||||
|
||||
/* Setup call for arch DMA mapping code */
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
|
||||
int iommu_dma_init_fq(struct iommu_domain *domain);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
/*
|
||||
@@ -54,6 +55,11 @@ static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
||||
@@ -41,8 +41,9 @@ struct dma_map_ops {
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
/*
|
||||
* map_sg returns 0 on error and a value > 0 on success.
|
||||
* It should never return a value < 0.
|
||||
* map_sg should return a negative error code on error. See
|
||||
* dma_map_sgtable() for a list of appropriate error codes
|
||||
* and their meanings.
|
||||
*/
|
||||
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
@@ -170,13 +171,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
|
||||
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
|
||||
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, size_t size, int *ret);
|
||||
|
||||
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
|
||||
dma_addr_t *dma_handle);
|
||||
int dma_release_from_global_coherent(int order, void *vaddr);
|
||||
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
|
||||
size_t size, int *ret);
|
||||
|
||||
#else
|
||||
static inline int dma_declare_coherent_memory(struct device *dev,
|
||||
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
|
||||
@@ -186,7 +180,16 @@ static inline int dma_declare_coherent_memory(struct device *dev,
|
||||
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
|
||||
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
|
||||
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
|
||||
#endif /* CONFIG_DMA_DECLARE_COHERENT */
|
||||
|
||||
#ifdef CONFIG_DMA_GLOBAL_POOL
|
||||
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
|
||||
dma_addr_t *dma_handle);
|
||||
int dma_release_from_global_coherent(int order, void *vaddr);
|
||||
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
|
||||
size_t size, int *ret);
|
||||
int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
|
||||
#else
|
||||
static inline void *dma_alloc_from_global_coherent(struct device *dev,
|
||||
ssize_t size, dma_addr_t *dma_handle)
|
||||
{
|
||||
@@ -201,7 +204,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DMA_DECLARE_COHERENT */
|
||||
#endif /* CONFIG_DMA_GLOBAL_POOL */
|
||||
|
||||
/*
|
||||
* This is the actual return value from the ->alloc_noncontiguous method.
|
||||
|
||||
@@ -105,11 +105,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
unsigned long attrs);
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
||||
@@ -164,8 +166,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
static inline unsigned int dma_map_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sg, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -174,6 +177,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
@@ -343,34 +351,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_map_sgtable - Map the given buffer for DMA
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
* @attrs: Optional DMA attributes for the map operation
|
||||
*
|
||||
* Maps a buffer described by a scatterlist stored in the given sg_table
|
||||
* object for the @dir DMA operation by the @dev device. After success the
|
||||
* ownership for the buffer is transferred to the DMA domain. One has to
|
||||
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
|
||||
* ownership of the buffer back to the CPU domain before touching the
|
||||
* buffer by the CPU.
|
||||
*
|
||||
* Returns 0 on success or -EINVAL on error during mapping the buffer.
|
||||
*/
|
||||
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
int nents;
|
||||
|
||||
nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
|
||||
if (nents <= 0)
|
||||
return -EINVAL;
|
||||
sgt->nents = nents;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_sgtable - Unmap the given buffer for DMA
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
|
||||
@@ -380,6 +380,7 @@ enum dma_slave_buswidth {
|
||||
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
|
||||
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
|
||||
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
|
||||
DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -398,7 +399,7 @@ enum dma_slave_buswidth {
|
||||
* @src_addr_width: this is the width in bytes of the source (RX)
|
||||
* register where DMA data shall be read. If the source
|
||||
* is memory this may be ignored depending on architecture.
|
||||
* Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
|
||||
* Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
|
||||
* @dst_addr_width: same as src_addr_width but for destination
|
||||
* target (TX) mutatis mutandis.
|
||||
* @src_maxburst: the maximum number of words (note: words, as in
|
||||
|
||||
@@ -11,60 +11,48 @@
|
||||
struct dsa_switch;
|
||||
struct sk_buff;
|
||||
struct net_device;
|
||||
struct packet_type;
|
||||
struct dsa_8021q_context;
|
||||
|
||||
struct dsa_8021q_crosschip_link {
|
||||
struct dsa_tag_8021q_vlan {
|
||||
struct list_head list;
|
||||
int port;
|
||||
struct dsa_8021q_context *other_ctx;
|
||||
int other_port;
|
||||
u16 vid;
|
||||
refcount_t refcount;
|
||||
};
|
||||
|
||||
struct dsa_8021q_ops {
|
||||
int (*vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags);
|
||||
int (*vlan_del)(struct dsa_switch *ds, int port, u16 vid);
|
||||
};
|
||||
|
||||
struct dsa_8021q_context {
|
||||
const struct dsa_8021q_ops *ops;
|
||||
struct dsa_switch *ds;
|
||||
struct list_head crosschip_links;
|
||||
struct list_head vlans;
|
||||
/* EtherType of RX VID, used for filtering on master interface */
|
||||
__be16 proto;
|
||||
};
|
||||
|
||||
#define DSA_8021Q_N_SUBVLAN 8
|
||||
int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
|
||||
|
||||
int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled);
|
||||
|
||||
int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
|
||||
struct dsa_8021q_context *other_ctx,
|
||||
int other_port);
|
||||
|
||||
int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
|
||||
struct dsa_8021q_context *other_ctx,
|
||||
int other_port);
|
||||
void dsa_tag_8021q_unregister(struct dsa_switch *ds);
|
||||
|
||||
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
|
||||
u16 tpid, u16 tci);
|
||||
|
||||
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
|
||||
int *subvlan);
|
||||
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id);
|
||||
|
||||
int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
|
||||
struct net_device *br,
|
||||
int bridge_num);
|
||||
|
||||
void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
|
||||
struct net_device *br,
|
||||
int bridge_num);
|
||||
|
||||
u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num);
|
||||
|
||||
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
|
||||
|
||||
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
|
||||
|
||||
u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan);
|
||||
|
||||
int dsa_8021q_rx_switch_id(u16 vid);
|
||||
|
||||
int dsa_8021q_rx_source_port(u16 vid);
|
||||
|
||||
u16 dsa_8021q_rx_subvlan(u16 vid);
|
||||
|
||||
bool vid_is_dsa_8021q_rxvlan(u16 vid);
|
||||
|
||||
bool vid_is_dsa_8021q_txvlan(u16 vid);
|
||||
|
||||
13
include/linux/dsa/mv88e6xxx.h
Normal file
13
include/linux/dsa/mv88e6xxx.h
Normal file
@@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Copyright 2021 NXP
|
||||
*/
|
||||
|
||||
#ifndef _NET_DSA_TAG_MV88E6XXX_H
|
||||
#define _NET_DSA_TAG_MV88E6XXX_H
|
||||
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
#define MV88E6XXX_VID_STANDALONE 0
|
||||
#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
|
||||
|
||||
#endif
|
||||
@@ -1,11 +1,32 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Copyright 2019-2021 NXP Semiconductors
|
||||
* Copyright 2019-2021 NXP
|
||||
*/
|
||||
|
||||
#ifndef _NET_DSA_TAG_OCELOT_H
|
||||
#define _NET_DSA_TAG_OCELOT_H
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/packing.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
struct ocelot_skb_cb {
|
||||
struct sk_buff *clone;
|
||||
unsigned int ptp_class; /* valid only for clones */
|
||||
u8 ptp_cmd;
|
||||
u8 ts_id;
|
||||
};
|
||||
|
||||
#define OCELOT_SKB_CB(skb) \
|
||||
((struct ocelot_skb_cb *)((skb)->cb))
|
||||
|
||||
#define IFH_TAG_TYPE_C 0
|
||||
#define IFH_TAG_TYPE_S 1
|
||||
|
||||
#define IFH_REW_OP_NOOP 0x0
|
||||
#define IFH_REW_OP_DSCP 0x1
|
||||
#define IFH_REW_OP_ONE_STEP_PTP 0x2
|
||||
#define IFH_REW_OP_TWO_STEP_PTP 0x3
|
||||
#define IFH_REW_OP_ORIGIN_PTP 0x5
|
||||
|
||||
#define OCELOT_TAG_LEN 16
|
||||
#define OCELOT_SHORT_PREFIX_LEN 4
|
||||
@@ -140,6 +161,17 @@
|
||||
* +------+------+------+------+------+------+------+------+
|
||||
*/
|
||||
|
||||
struct felix_deferred_xmit_work {
|
||||
struct dsa_port *dp;
|
||||
struct sk_buff *skb;
|
||||
struct kthread_work work;
|
||||
};
|
||||
|
||||
struct felix_port {
|
||||
void (*xmit_work_fn)(struct kthread_work *work);
|
||||
struct kthread_worker *xmit_worker;
|
||||
};
|
||||
|
||||
static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
|
||||
{
|
||||
packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
|
||||
@@ -215,4 +247,21 @@ static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
|
||||
packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
|
||||
}
|
||||
|
||||
/* Determine the PTP REW_OP to use for injecting the given skb */
|
||||
static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
|
||||
u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
|
||||
u32 rew_op = 0;
|
||||
|
||||
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
|
||||
rew_op = ptp_cmd;
|
||||
rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
|
||||
} else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
|
||||
rew_op = ptp_cmd;
|
||||
}
|
||||
|
||||
return rew_op;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
#define ETH_P_SJA1105_META 0x0008
|
||||
#define ETH_P_SJA1110 0xdadc
|
||||
|
||||
#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
|
||||
|
||||
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
|
||||
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
|
||||
#define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull
|
||||
@@ -46,6 +48,10 @@ struct sja1105_tagger_data {
|
||||
spinlock_t meta_lock;
|
||||
unsigned long state;
|
||||
u8 ts_id;
|
||||
/* Used on SJA1110 where meta frames are generated only for
|
||||
* 2-step TX timestamps
|
||||
*/
|
||||
struct sk_buff_head skb_txtstamp_queue;
|
||||
};
|
||||
|
||||
struct sja1105_skb_cb {
|
||||
@@ -59,34 +65,32 @@ struct sja1105_skb_cb {
|
||||
((struct sja1105_skb_cb *)((skb)->cb))
|
||||
|
||||
struct sja1105_port {
|
||||
u16 subvlan_map[DSA_8021Q_N_SUBVLAN];
|
||||
struct kthread_worker *xmit_worker;
|
||||
struct kthread_work xmit_work;
|
||||
struct sk_buff_head xmit_queue;
|
||||
struct sja1105_tagger_data *data;
|
||||
struct dsa_port *dp;
|
||||
bool hwts_tx_en;
|
||||
u16 xmit_tpid;
|
||||
};
|
||||
|
||||
enum sja1110_meta_tstamp {
|
||||
SJA1110_META_TSTAMP_TX = 0,
|
||||
SJA1110_META_TSTAMP_RX = 1,
|
||||
};
|
||||
/* Timestamps are in units of 8 ns clock ticks (equivalent to
|
||||
* a fixed 125 MHz clock).
|
||||
*/
|
||||
#define SJA1105_TICK_NS 8
|
||||
|
||||
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
|
||||
|
||||
void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
|
||||
enum sja1110_meta_tstamp dir, u64 tstamp);
|
||||
|
||||
#else
|
||||
|
||||
static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
|
||||
u8 ts_id, enum sja1110_meta_tstamp dir,
|
||||
u64 tstamp)
|
||||
static inline s64 ns_to_sja1105_ticks(s64 ns)
|
||||
{
|
||||
return ns / SJA1105_TICK_NS;
|
||||
}
|
||||
|
||||
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
|
||||
static inline s64 sja1105_ticks_to_ns(s64 ticks)
|
||||
{
|
||||
return ticks * SJA1105_TICK_NS;
|
||||
}
|
||||
|
||||
static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _NET_DSA_SJA1105_H */
|
||||
|
||||
@@ -184,6 +184,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
|
||||
* @MEM_DDR5: Unbuffered DDR5 RAM
|
||||
* @MEM_NVDIMM: Non-volatile RAM
|
||||
* @MEM_WIO2: Wide I/O 2.
|
||||
* @MEM_HBM2: High bandwidth Memory Gen 2.
|
||||
*/
|
||||
enum mem_type {
|
||||
MEM_EMPTY = 0,
|
||||
@@ -212,6 +213,7 @@ enum mem_type {
|
||||
MEM_DDR5,
|
||||
MEM_NVDIMM,
|
||||
MEM_WIO2,
|
||||
MEM_HBM2,
|
||||
};
|
||||
|
||||
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
|
||||
@@ -239,6 +241,7 @@ enum mem_type {
|
||||
#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
|
||||
#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
|
||||
#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
|
||||
#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
|
||||
|
||||
/**
|
||||
* enum edac_type - Error Detection and Correction capabilities and mode
|
||||
|
||||
@@ -109,7 +109,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_UM) || defined(CONFIG_IA64)
|
||||
#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
|
||||
/*
|
||||
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
|
||||
* extra segments containing the gate DSO contents. Dumping its
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* em_perf_state - Performance state of a performance domain
|
||||
* struct em_perf_state - Performance state of a performance domain
|
||||
* @frequency: The frequency in KHz, for consistency with CPUFreq
|
||||
* @power: The power consumed at this level (by 1 CPU or by a registered
|
||||
* device). It can be a total power: static and dynamic.
|
||||
@@ -25,7 +25,7 @@ struct em_perf_state {
|
||||
};
|
||||
|
||||
/**
|
||||
* em_perf_domain - Performance domain
|
||||
* struct em_perf_domain - Performance domain
|
||||
* @table: List of performance states, in ascending order
|
||||
* @nr_perf_states: Number of performance states
|
||||
* @milliwatts: Flag indicating the power values are in milli-Watts
|
||||
@@ -103,12 +103,12 @@ void em_dev_unregister_perf_domain(struct device *dev);
|
||||
|
||||
/**
|
||||
* em_cpu_energy() - Estimates the energy consumed by the CPUs of a
|
||||
performance domain
|
||||
* performance domain
|
||||
* @pd : performance domain for which energy has to be estimated
|
||||
* @max_util : highest utilization among CPUs of the domain
|
||||
* @sum_util : sum of the utilization of all CPUs in the domain
|
||||
* @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
|
||||
might reflect reduced frequency (due to thermal)
|
||||
* might reflect reduced frequency (due to thermal)
|
||||
*
|
||||
* This function must be used only for CPU devices. There is no validation,
|
||||
* i.e. if the EM is a CPU type and has cpumask allocated. It is called from
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
#ifndef __LINUX_ENTRYKVM_H
|
||||
#define __LINUX_ENTRYKVM_H
|
||||
|
||||
#include <linux/entry-common.h>
|
||||
#include <linux/static_call_types.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
/* Transfer to guest mode work */
|
||||
|
||||
@@ -31,5 +31,6 @@
|
||||
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
|
||||
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
|
||||
#define ERECALLCONFLICT 530 /* conflict with recalled state */
|
||||
#define ENOGRACE 531 /* NFS file lock reclaim refused */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -299,6 +299,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* eth_hw_addr_set - Assign Ethernet address to a net_device
|
||||
* @dev: pointer to net_device structure
|
||||
* @addr: address to assign
|
||||
*
|
||||
* Assign given address to the net_device, addr_assign_type is not changed.
|
||||
*/
|
||||
static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
|
||||
{
|
||||
__dev_addr_set(dev, addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
/**
|
||||
* eth_hw_addr_inherit - Copy dev_addr from another net_device
|
||||
* @dst: pointer to net_device to copy dev_addr to
|
||||
|
||||
@@ -15,10 +15,9 @@
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <uapi/linux/ethtool.h>
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
struct compat_ethtool_rx_flow_spec {
|
||||
u32 flow_type;
|
||||
union ethtool_flow_union h_u;
|
||||
@@ -38,8 +37,6 @@ struct compat_ethtool_rxnfc {
|
||||
u32 rule_locs[];
|
||||
};
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#include <linux/rculist.h>
|
||||
|
||||
/**
|
||||
@@ -176,6 +173,11 @@ extern int
|
||||
__ethtool_get_link_ksettings(struct net_device *dev,
|
||||
struct ethtool_link_ksettings *link_ksettings);
|
||||
|
||||
struct kernel_ethtool_coalesce {
|
||||
u8 use_cqe_mode_tx;
|
||||
u8 use_cqe_mode_rx;
|
||||
};
|
||||
|
||||
/**
|
||||
* ethtool_intersect_link_masks - Given two link masks, AND them together
|
||||
* @dst: first mask and where result is stored
|
||||
@@ -215,7 +217,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
|
||||
#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
|
||||
#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
|
||||
#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
|
||||
#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0)
|
||||
#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
|
||||
#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
|
||||
#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0)
|
||||
|
||||
#define ETHTOOL_COALESCE_USECS \
|
||||
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
|
||||
@@ -241,6 +245,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
|
||||
ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
|
||||
ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
|
||||
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
|
||||
#define ETHTOOL_COALESCE_USE_CQE \
|
||||
(ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
|
||||
|
||||
#define ETHTOOL_STAT_NOT_SET (~0ULL)
|
||||
|
||||
@@ -606,8 +612,14 @@ struct ethtool_ops {
|
||||
struct ethtool_eeprom *, u8 *);
|
||||
int (*set_eeprom)(struct net_device *,
|
||||
struct ethtool_eeprom *, u8 *);
|
||||
int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
|
||||
int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
|
||||
int (*get_coalesce)(struct net_device *,
|
||||
struct ethtool_coalesce *,
|
||||
struct kernel_ethtool_coalesce *,
|
||||
struct netlink_ext_ack *);
|
||||
int (*set_coalesce)(struct net_device *,
|
||||
struct ethtool_coalesce *,
|
||||
struct kernel_ethtool_coalesce *,
|
||||
struct netlink_ext_ack *);
|
||||
void (*get_ringparam)(struct net_device *,
|
||||
struct ethtool_ringparam *);
|
||||
int (*set_ringparam)(struct net_device *,
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/percpu-defs.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
|
||||
@@ -43,11 +44,9 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
|
||||
__u64 *cnt);
|
||||
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
||||
|
||||
DECLARE_PER_CPU(int, eventfd_wake_count);
|
||||
|
||||
static inline bool eventfd_signal_count(void)
|
||||
static inline bool eventfd_signal_allowed(void)
|
||||
{
|
||||
return this_cpu_read(eventfd_wake_count);
|
||||
return !current->in_eventfd_signal;
|
||||
}
|
||||
|
||||
#else /* CONFIG_EVENTFD */
|
||||
@@ -78,9 +77,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline bool eventfd_signal_count(void)
|
||||
static inline bool eventfd_signal_allowed(void)
|
||||
{
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
|
||||
|
||||
@@ -68,4 +68,22 @@ static inline void eventpoll_release(struct file *file) {}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
|
||||
/* ARM OABI has an incompatible struct layout and needs a special handler */
|
||||
extern struct epoll_event __user *
|
||||
epoll_put_uevent(__poll_t revents, __u64 data,
|
||||
struct epoll_event __user *uevent);
|
||||
#else
|
||||
static inline struct epoll_event __user *
|
||||
epoll_put_uevent(__poll_t revents, __u64 data,
|
||||
struct epoll_event __user *uevent)
|
||||
{
|
||||
if (__put_user(revents, &uevent->events) ||
|
||||
__put_user(data, &uevent->data))
|
||||
return NULL;
|
||||
|
||||
return uevent+1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* #ifndef _LINUX_EVENTPOLL_H */
|
||||
|
||||
@@ -221,6 +221,8 @@ struct export_operations {
|
||||
#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
|
||||
atomic attribute updates
|
||||
*/
|
||||
#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do
|
||||
asychronous blocking locks */
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
|
||||
@@ -27,6 +27,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
|
||||
|
||||
#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME)
|
||||
|
||||
#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
|
||||
|
||||
/*
|
||||
* fanotify_init() flags that require CAP_SYS_ADMIN.
|
||||
* We do not allow unprivileged groups to request permission events.
|
||||
@@ -35,6 +37,7 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
|
||||
*/
|
||||
#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \
|
||||
FAN_REPORT_TID | \
|
||||
FAN_REPORT_PIDFD | \
|
||||
FAN_UNLIMITED_QUEUE | \
|
||||
FAN_UNLIMITED_MARKS)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#ifndef _LINUX_FB_H
|
||||
#define _LINUX_FB_H
|
||||
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <uapi/linux/fb.h>
|
||||
|
||||
@@ -435,7 +436,7 @@ struct fb_tile_ops {
|
||||
|
||||
|
||||
struct fb_info {
|
||||
atomic_t count;
|
||||
refcount_t count;
|
||||
int node;
|
||||
int flags;
|
||||
/*
|
||||
|
||||
@@ -18,8 +18,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
|
||||
u64 phys, u64 len, u32 flags);
|
||||
|
||||
int generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo, u64 start, u64 len,
|
||||
get_block_t *get_block);
|
||||
|
||||
#endif /* _LINUX_FIEMAP_H 1 */
|
||||
|
||||
@@ -94,6 +94,9 @@ extern void fd_install(unsigned int fd, struct file *file);
|
||||
|
||||
extern int __receive_fd(struct file *file, int __user *ufd,
|
||||
unsigned int o_flags);
|
||||
|
||||
extern int receive_fd(struct file *file, unsigned int o_flags);
|
||||
|
||||
static inline int receive_fd_user(struct file *file, int __user *ufd,
|
||||
unsigned int o_flags)
|
||||
{
|
||||
@@ -101,10 +104,6 @@ static inline int receive_fd_user(struct file *file, int __user *ufd,
|
||||
return -EFAULT;
|
||||
return __receive_fd(file, ufd, o_flags);
|
||||
}
|
||||
static inline int receive_fd(struct file *file, unsigned int o_flags)
|
||||
{
|
||||
return __receive_fd(file, NULL, o_flags);
|
||||
}
|
||||
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
|
||||
|
||||
extern void flush_delayed_fput(void);
|
||||
|
||||
@@ -5,8 +5,6 @@
|
||||
#ifndef __LINUX_FILTER_H__
|
||||
#define __LINUX_FILTER_H__
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/compat.h>
|
||||
@@ -574,7 +572,8 @@ struct bpf_prog {
|
||||
kprobe_override:1, /* Do we override a kprobe? */
|
||||
has_callchain_buf:1, /* callchain buffer allocated? */
|
||||
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
|
||||
call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
|
||||
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
|
||||
call_get_func_ip:1; /* Do we call get_func_ip() */
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
enum bpf_attach_type expected_attach_type; /* For some prog types */
|
||||
u32 len; /* Number of filter blocks */
|
||||
@@ -599,25 +598,38 @@ struct sk_filter {
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
|
||||
|
||||
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
|
||||
u32 __ret; \
|
||||
cant_migrate(); \
|
||||
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
|
||||
struct bpf_prog_stats *__stats; \
|
||||
u64 __start = sched_clock(); \
|
||||
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
|
||||
__stats = this_cpu_ptr(prog->stats); \
|
||||
u64_stats_update_begin(&__stats->syncp); \
|
||||
__stats->cnt++; \
|
||||
__stats->nsecs += sched_clock() - __start; \
|
||||
u64_stats_update_end(&__stats->syncp); \
|
||||
} else { \
|
||||
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
|
||||
} \
|
||||
__ret; })
|
||||
typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
|
||||
const struct bpf_insn *insnsi,
|
||||
unsigned int (*bpf_func)(const void *,
|
||||
const struct bpf_insn *));
|
||||
|
||||
#define BPF_PROG_RUN(prog, ctx) \
|
||||
__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
|
||||
static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
|
||||
const void *ctx,
|
||||
bpf_dispatcher_fn dfunc)
|
||||
{
|
||||
u32 ret;
|
||||
|
||||
cant_migrate();
|
||||
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
|
||||
struct bpf_prog_stats *stats;
|
||||
u64 start = sched_clock();
|
||||
|
||||
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
|
||||
stats = this_cpu_ptr(prog->stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->cnt++;
|
||||
stats->nsecs += sched_clock() - start;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
} else {
|
||||
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
|
||||
{
|
||||
return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use in preemptible and therefore migratable context to make sure that
|
||||
@@ -636,7 +648,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
|
||||
u32 ret;
|
||||
|
||||
migrate_disable();
|
||||
ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
|
||||
ret = bpf_prog_run(prog, ctx);
|
||||
migrate_enable();
|
||||
return ret;
|
||||
}
|
||||
@@ -709,7 +721,7 @@ static inline void bpf_restore_data_end(
|
||||
cb->data_end = saved_data_end;
|
||||
}
|
||||
|
||||
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
||||
static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
|
||||
{
|
||||
/* eBPF programs may read/write skb->cb[] area to transfer meta
|
||||
* data between tail calls. Since this also needs to work with
|
||||
@@ -730,8 +742,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
||||
|
||||
/* Must be invoked with migration disabled */
|
||||
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
const void *ctx)
|
||||
{
|
||||
const struct sk_buff *skb = ctx;
|
||||
u8 *cb_data = bpf_skb_cb(skb);
|
||||
u8 cb_saved[BPF_SKB_CB_LEN];
|
||||
u32 res;
|
||||
@@ -741,7 +754,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
memset(cb_data, 0, sizeof(cb_saved));
|
||||
}
|
||||
|
||||
res = BPF_PROG_RUN(prog, skb);
|
||||
res = bpf_prog_run(prog, skb);
|
||||
|
||||
if (unlikely(prog->cb_access))
|
||||
memcpy(cb_data, cb_saved, sizeof(cb_saved));
|
||||
@@ -775,6 +788,10 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
|
||||
|
||||
DECLARE_BPF_DISPATCHER(xdp)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
|
||||
|
||||
u32 xdp_master_redirect(struct xdp_buff *xdp);
|
||||
|
||||
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
||||
struct xdp_buff *xdp)
|
||||
{
|
||||
@@ -782,7 +799,14 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
||||
* under local_bh_disable(), which provides the needed RCU protection
|
||||
* for accessing map entries.
|
||||
*/
|
||||
return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
|
||||
u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
|
||||
|
||||
if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
|
||||
if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
|
||||
act = xdp_master_redirect(xdp);
|
||||
}
|
||||
|
||||
return act;
|
||||
}
|
||||
|
||||
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
|
||||
@@ -1027,6 +1051,7 @@ extern int bpf_jit_enable;
|
||||
extern int bpf_jit_harden;
|
||||
extern int bpf_jit_kallsyms;
|
||||
extern long bpf_jit_limit;
|
||||
extern long bpf_jit_limit_max;
|
||||
|
||||
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
|
||||
|
||||
@@ -1428,7 +1453,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
|
||||
};
|
||||
u32 act;
|
||||
|
||||
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
|
||||
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
|
||||
if (act == SK_PASS) {
|
||||
selected_sk = ctx.selected_sk;
|
||||
no_reuseport = ctx.no_reuseport;
|
||||
@@ -1466,7 +1491,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
|
||||
};
|
||||
u32 act;
|
||||
|
||||
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
|
||||
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
|
||||
if (act == SK_PASS) {
|
||||
selected_sk = ctx.selected_sk;
|
||||
no_reuseport = ctx.no_reuseport;
|
||||
|
||||
@@ -52,6 +52,10 @@
|
||||
#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
|
||||
#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
|
||||
|
||||
/* Loader commands */
|
||||
#define PM_LOAD_PDI 0x701
|
||||
#define PDI_SRC_DDR 0xF
|
||||
|
||||
/*
|
||||
* Firmware FPGA Manager flags
|
||||
* XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
|
||||
@@ -411,6 +415,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
|
||||
u32 *value);
|
||||
int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
|
||||
u32 value);
|
||||
int zynqmp_pm_load_pdi(const u32 src, const u64 address);
|
||||
#else
|
||||
static inline int zynqmp_pm_get_api_version(u32 *version)
|
||||
{
|
||||
@@ -622,6 +627,11 @@ static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __FIRMWARE_ZYNQMP_H__ */
|
||||
|
||||
@@ -110,7 +110,7 @@ struct fpga_image_info {
|
||||
* @initial_header_size: Maximum number of bytes that should be passed into write_init
|
||||
* @state: returns an enum value of the FPGA's state
|
||||
* @status: returns status of the FPGA, including reconfiguration error code
|
||||
* @write_init: prepare the FPGA to receive confuration data
|
||||
* @write_init: prepare the FPGA to receive configuration data
|
||||
* @write: write count bytes of configuration data to the FPGA
|
||||
* @write_sg: write the scatter list of configuration data to the FPGA
|
||||
* @write_complete: set FPGA to operating state after writing is done
|
||||
|
||||
@@ -319,6 +319,8 @@ enum rw_hint {
|
||||
/* iocb->ki_waitq is valid */
|
||||
#define IOCB_WAITQ (1 << 19)
|
||||
#define IOCB_NOIO (1 << 20)
|
||||
/* can use bio alloc cache */
|
||||
#define IOCB_ALLOC_CACHE (1 << 21)
|
||||
|
||||
struct kiocb {
|
||||
struct file *ki_filp;
|
||||
@@ -436,6 +438,10 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
|
||||
* struct address_space - Contents of a cacheable, mappable object.
|
||||
* @host: Owner, either the inode or the block_device.
|
||||
* @i_pages: Cached pages.
|
||||
* @invalidate_lock: Guards coherency between page cache contents and
|
||||
* file offset->disk block mappings in the filesystem during invalidates.
|
||||
* It is also used to block modification of page cache contents through
|
||||
* memory mappings.
|
||||
* @gfp_mask: Memory allocation flags to use for allocating pages.
|
||||
* @i_mmap_writable: Number of VM_SHARED mappings.
|
||||
* @nr_thps: Number of THPs in the pagecache (non-shmem only).
|
||||
@@ -453,6 +459,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
|
||||
struct address_space {
|
||||
struct inode *host;
|
||||
struct xarray i_pages;
|
||||
struct rw_semaphore invalidate_lock;
|
||||
gfp_t gfp_mask;
|
||||
atomic_t i_mmap_writable;
|
||||
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
|
||||
@@ -581,6 +588,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
|
||||
|
||||
struct posix_acl;
|
||||
#define ACL_NOT_CACHED ((void *)(-1))
|
||||
/*
|
||||
* ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
|
||||
* cache the ACL. This also means that ->get_acl() can be called in RCU mode
|
||||
* with the LOOKUP_RCU flag.
|
||||
*/
|
||||
#define ACL_DONT_CACHE ((void *)(-3))
|
||||
|
||||
static inline struct posix_acl *
|
||||
@@ -814,9 +826,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla
|
||||
down_read_nested(&inode->i_rwsem, subclass);
|
||||
}
|
||||
|
||||
static inline void filemap_invalidate_lock(struct address_space *mapping)
|
||||
{
|
||||
down_write(&mapping->invalidate_lock);
|
||||
}
|
||||
|
||||
static inline void filemap_invalidate_unlock(struct address_space *mapping)
|
||||
{
|
||||
up_write(&mapping->invalidate_lock);
|
||||
}
|
||||
|
||||
static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
|
||||
{
|
||||
down_read(&mapping->invalidate_lock);
|
||||
}
|
||||
|
||||
static inline int filemap_invalidate_trylock_shared(
|
||||
struct address_space *mapping)
|
||||
{
|
||||
return down_read_trylock(&mapping->invalidate_lock);
|
||||
}
|
||||
|
||||
static inline void filemap_invalidate_unlock_shared(
|
||||
struct address_space *mapping)
|
||||
{
|
||||
up_read(&mapping->invalidate_lock);
|
||||
}
|
||||
|
||||
void lock_two_nondirectories(struct inode *, struct inode*);
|
||||
void unlock_two_nondirectories(struct inode *, struct inode*);
|
||||
|
||||
void filemap_invalidate_lock_two(struct address_space *mapping1,
|
||||
struct address_space *mapping2);
|
||||
void filemap_invalidate_unlock_two(struct address_space *mapping1,
|
||||
struct address_space *mapping2);
|
||||
|
||||
|
||||
/*
|
||||
* NOTE: in a 32bit arch with a preemptable kernel and
|
||||
* an UP compile the i_size_read/write must be atomic
|
||||
@@ -997,6 +1042,7 @@ static inline struct file *get_file(struct file *f)
|
||||
#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
|
||||
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
|
||||
#define FL_LAYOUT 2048 /* outstanding pNFS layout */
|
||||
#define FL_RECLAIM 4096 /* reclaiming from a reboot server */
|
||||
|
||||
#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
|
||||
|
||||
@@ -1507,8 +1553,11 @@ struct super_block {
|
||||
/* Number of inodes with nlink == 0 but still referenced */
|
||||
atomic_long_t s_remove_count;
|
||||
|
||||
/* Pending fsnotify inode refs */
|
||||
atomic_long_t s_fsnotify_inode_refs;
|
||||
/*
|
||||
* Number of inode/mount/sb objects that are being watched, note that
|
||||
* inodes objects are currently double-accounted.
|
||||
*/
|
||||
atomic_long_t s_fsnotify_connectors;
|
||||
|
||||
/* Being remounted read-only */
|
||||
int s_readonly_remount;
|
||||
@@ -2065,7 +2114,7 @@ struct inode_operations {
|
||||
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
|
||||
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
|
||||
int (*permission) (struct user_namespace *, struct inode *, int);
|
||||
struct posix_acl * (*get_acl)(struct inode *, int);
|
||||
struct posix_acl * (*get_acl)(struct inode *, int, bool);
|
||||
|
||||
int (*readlink) (struct dentry *, char __user *,int);
|
||||
|
||||
@@ -2457,7 +2506,6 @@ static inline void file_accessed(struct file *file)
|
||||
|
||||
extern int file_modified(struct file *file);
|
||||
|
||||
int sync_inode(struct inode *inode, struct writeback_control *wbc);
|
||||
int sync_inode_metadata(struct inode *inode, int wait);
|
||||
|
||||
struct file_system_type {
|
||||
@@ -2487,6 +2535,7 @@ struct file_system_type {
|
||||
|
||||
struct lock_class_key i_lock_key;
|
||||
struct lock_class_key i_mutex_key;
|
||||
struct lock_class_key invalidate_lock_key;
|
||||
struct lock_class_key i_mutex_dir_key;
|
||||
};
|
||||
|
||||
@@ -2570,90 +2619,6 @@ extern struct kobject *fs_kobj;
|
||||
|
||||
#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
|
||||
|
||||
#ifdef CONFIG_MANDATORY_FILE_LOCKING
|
||||
extern int locks_mandatory_locked(struct file *);
|
||||
extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
|
||||
|
||||
/*
|
||||
* Candidates for mandatory locking have the setgid bit set
|
||||
* but no group execute bit - an otherwise meaningless combination.
|
||||
*/
|
||||
|
||||
static inline int __mandatory_lock(struct inode *ino)
|
||||
{
|
||||
return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
|
||||
}
|
||||
|
||||
/*
|
||||
* ... and these candidates should be on SB_MANDLOCK mounted fs,
|
||||
* otherwise these will be advisory locks
|
||||
*/
|
||||
|
||||
static inline int mandatory_lock(struct inode *ino)
|
||||
{
|
||||
return IS_MANDLOCK(ino) && __mandatory_lock(ino);
|
||||
}
|
||||
|
||||
static inline int locks_verify_locked(struct file *file)
|
||||
{
|
||||
if (mandatory_lock(locks_inode(file)))
|
||||
return locks_mandatory_locked(file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int locks_verify_truncate(struct inode *inode,
|
||||
struct file *f,
|
||||
loff_t size)
|
||||
{
|
||||
if (!inode->i_flctx || !mandatory_lock(inode))
|
||||
return 0;
|
||||
|
||||
if (size < inode->i_size) {
|
||||
return locks_mandatory_area(inode, f, size, inode->i_size - 1,
|
||||
F_WRLCK);
|
||||
} else {
|
||||
return locks_mandatory_area(inode, f, inode->i_size, size - 1,
|
||||
F_WRLCK);
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !CONFIG_MANDATORY_FILE_LOCKING */
|
||||
|
||||
static inline int locks_mandatory_locked(struct file *file)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
|
||||
loff_t start, loff_t end, unsigned char type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __mandatory_lock(struct inode *inode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int mandatory_lock(struct inode *inode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int locks_verify_locked(struct file *file)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
|
||||
size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MANDATORY_FILE_LOCKING */
|
||||
|
||||
|
||||
#ifdef CONFIG_FILE_LOCKING
|
||||
static inline int break_lease(struct inode *inode, unsigned int mode)
|
||||
{
|
||||
@@ -2786,6 +2751,7 @@ static inline struct file *file_clone_open(struct file *file)
|
||||
extern int filp_close(struct file *, fl_owner_t id);
|
||||
|
||||
extern struct filename *getname_flags(const char __user *, int, int *);
|
||||
extern struct filename *getname_uflags(const char __user *, int);
|
||||
extern struct filename *getname(const char __user *);
|
||||
extern struct filename *getname_kernel(const char *);
|
||||
extern void putname(struct filename *name);
|
||||
@@ -2891,6 +2857,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
|
||||
loff_t start, loff_t end);
|
||||
extern int filemap_check_errors(struct address_space *mapping);
|
||||
extern void __filemap_set_wb_err(struct address_space *mapping, int err);
|
||||
int filemap_fdatawrite_wbc(struct address_space *mapping,
|
||||
struct writeback_control *wbc);
|
||||
|
||||
static inline int filemap_write_and_wait(struct address_space *mapping)
|
||||
{
|
||||
@@ -3055,15 +3023,20 @@ static inline void file_end_write(struct file *file)
|
||||
}
|
||||
|
||||
/*
|
||||
* This is used for regular files where some users -- especially the
|
||||
* currently executed binary in a process, previously handled via
|
||||
* VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap
|
||||
* read-write shared) accesses.
|
||||
*
|
||||
* get_write_access() gets write permission for a file.
|
||||
* put_write_access() releases this write permission.
|
||||
* This is used for regular files.
|
||||
* We cannot support write (and maybe mmap read-write shared) accesses and
|
||||
* MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
|
||||
* can have the following values:
|
||||
* 0: no writers, no VM_DENYWRITE mappings
|
||||
* < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
|
||||
* > 0: (i_writecount) users are writing to the file.
|
||||
* deny_write_access() denies write access to a file.
|
||||
* allow_write_access() re-enables write access to a file.
|
||||
*
|
||||
* The i_writecount field of an inode can have the following values:
|
||||
* 0: no write access, no denied write access
|
||||
* < 0: (-i_writecount) users that denied write access to the file.
|
||||
* > 0: (i_writecount) users that have write access to the file.
|
||||
*
|
||||
* Normally we operate on that counter with atomic_{inc,dec} and it's safe
|
||||
* except for the cases where we don't hold i_writecount yet. Then we need to
|
||||
@@ -3246,10 +3219,6 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
|
||||
ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
|
||||
struct iov_iter *iter);
|
||||
|
||||
/* fs/block_dev.c */
|
||||
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
|
||||
/* fs/splice.c */
|
||||
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
|
||||
struct pipe_inode_info *, size_t, unsigned int);
|
||||
@@ -3355,6 +3324,7 @@ extern int page_symlink(struct inode *inode, const char *symname, int len);
|
||||
extern const struct inode_operations page_symlink_inode_operations;
|
||||
extern void kfree_link(void *);
|
||||
void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *);
|
||||
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
|
||||
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
|
||||
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
|
||||
void __inode_add_bytes(struct inode *inode, loff_t bytes);
|
||||
@@ -3469,6 +3439,8 @@ extern int buffer_migrate_page_norefs(struct address_space *,
|
||||
#define buffer_migrate_page_norefs NULL
|
||||
#endif
|
||||
|
||||
int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
|
||||
unsigned int ia_valid);
|
||||
int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
|
||||
extern int inode_newsize_ok(const struct inode *, loff_t offset);
|
||||
void setattr_copy(struct user_namespace *, struct inode *inode,
|
||||
@@ -3622,7 +3594,7 @@ int proc_nr_dentry(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int proc_nr_inodes(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int __init get_filesystem_list(char *buf);
|
||||
int __init list_bdev_fs_names(char *buf, size_t size);
|
||||
|
||||
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
|
||||
#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
|
||||
|
||||
@@ -147,7 +147,6 @@ struct fscache_retrieval {
|
||||
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
|
||||
void *context; /* netfs read context (pinned) */
|
||||
struct list_head to_do; /* list of things to be done by the backend */
|
||||
unsigned long start_time; /* time at which retrieval started */
|
||||
atomic_t n_pages; /* number of pages to be retrieved */
|
||||
};
|
||||
|
||||
@@ -385,9 +384,6 @@ struct fscache_object {
|
||||
struct list_head dependents; /* FIFO of dependent objects */
|
||||
struct list_head dep_link; /* link in parent's dependents list */
|
||||
struct list_head pending_ops; /* unstarted operations on this object */
|
||||
#ifdef CONFIG_FSCACHE_OBJECT_LIST
|
||||
struct rb_node objlist_link; /* link in global object list */
|
||||
#endif
|
||||
pgoff_t store_limit; /* current storage limit */
|
||||
loff_t store_limit_l; /* current storage limit */
|
||||
};
|
||||
|
||||
@@ -123,15 +123,17 @@ struct fscache_netfs {
|
||||
* - indices are created on disk just-in-time
|
||||
*/
|
||||
struct fscache_cookie {
|
||||
atomic_t usage; /* number of users of this cookie */
|
||||
refcount_t ref; /* number of users of this cookie */
|
||||
atomic_t n_children; /* number of children of this cookie */
|
||||
atomic_t n_active; /* number of active users of netfs ptrs */
|
||||
unsigned int debug_id;
|
||||
spinlock_t lock;
|
||||
spinlock_t stores_lock; /* lock on page store tree */
|
||||
struct hlist_head backing_objects; /* object(s) backing this file/index */
|
||||
const struct fscache_cookie_def *def; /* definition */
|
||||
struct fscache_cookie *parent; /* parent of this entry */
|
||||
struct hlist_bl_node hash_link; /* Link in hash table */
|
||||
struct list_head proc_link; /* Link in proc list */
|
||||
void *netfs_data; /* back pointer to netfs */
|
||||
struct radix_tree_root stores; /* pages to be stored on this cookie */
|
||||
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
|
||||
|
||||
@@ -47,27 +47,128 @@ struct fscrypt_name {
|
||||
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40
|
||||
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
|
||||
/*
|
||||
* fscrypt superblock flags
|
||||
* If set, the fscrypt bounce page pool won't be allocated (unless another
|
||||
* filesystem needs it). Set this if the filesystem always uses its own bounce
|
||||
* pages for writes and therefore won't need the fscrypt bounce page pool.
|
||||
*/
|
||||
#define FS_CFLG_OWN_PAGES (1U << 1)
|
||||
|
||||
/*
|
||||
* crypto operations for filesystems
|
||||
*/
|
||||
/* Crypto operations for filesystems */
|
||||
struct fscrypt_operations {
|
||||
|
||||
/* Set of optional flags; see above for allowed flags */
|
||||
unsigned int flags;
|
||||
|
||||
/*
|
||||
* If set, this is a filesystem-specific key description prefix that
|
||||
* will be accepted for "logon" keys for v1 fscrypt policies, in
|
||||
* addition to the generic prefix "fscrypt:". This functionality is
|
||||
* deprecated, so new filesystems shouldn't set this field.
|
||||
*/
|
||||
const char *key_prefix;
|
||||
|
||||
/*
|
||||
* Get the fscrypt context of the given inode.
|
||||
*
|
||||
* @inode: the inode whose context to get
|
||||
* @ctx: the buffer into which to get the context
|
||||
* @len: length of the @ctx buffer in bytes
|
||||
*
|
||||
* Return: On success, returns the length of the context in bytes; this
|
||||
* may be less than @len. On failure, returns -ENODATA if the
|
||||
* inode doesn't have a context, -ERANGE if the context is
|
||||
* longer than @len, or another -errno code.
|
||||
*/
|
||||
int (*get_context)(struct inode *inode, void *ctx, size_t len);
|
||||
|
||||
/*
|
||||
* Set an fscrypt context on the given inode.
|
||||
*
|
||||
* @inode: the inode whose context to set. The inode won't already have
|
||||
* an fscrypt context.
|
||||
* @ctx: the context to set
|
||||
* @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE)
|
||||
* @fs_data: If called from fscrypt_set_context(), this will be the
|
||||
* value the filesystem passed to fscrypt_set_context().
|
||||
* Otherwise (i.e. when called from
|
||||
* FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL.
|
||||
*
|
||||
* i_rwsem will be held for write.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure.
|
||||
*/
|
||||
int (*set_context)(struct inode *inode, const void *ctx, size_t len,
|
||||
void *fs_data);
|
||||
|
||||
/*
|
||||
* Get the dummy fscrypt policy in use on the filesystem (if any).
|
||||
*
|
||||
* Filesystems only need to implement this function if they support the
|
||||
* test_dummy_encryption mount option.
|
||||
*
|
||||
* Return: A pointer to the dummy fscrypt policy, if the filesystem is
|
||||
* mounted with test_dummy_encryption; otherwise NULL.
|
||||
*/
|
||||
const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb);
|
||||
|
||||
/*
|
||||
* Check whether a directory is empty. i_rwsem will be held for write.
|
||||
*/
|
||||
bool (*empty_dir)(struct inode *inode);
|
||||
|
||||
/* The filesystem's maximum ciphertext filename length, in bytes */
|
||||
unsigned int max_namelen;
|
||||
|
||||
/*
|
||||
* Check whether the filesystem's inode numbers and UUID are stable,
|
||||
* meaning that they will never be changed even by offline operations
|
||||
* such as filesystem shrinking and therefore can be used in the
|
||||
* encryption without the possibility of files becoming unreadable.
|
||||
*
|
||||
* Filesystems only need to implement this function if they want to
|
||||
* support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These
|
||||
* flags are designed to work around the limitations of UFS and eMMC
|
||||
* inline crypto hardware, and they shouldn't be used in scenarios where
|
||||
* such hardware isn't being used.
|
||||
*
|
||||
* Leaving this NULL is equivalent to always returning false.
|
||||
*/
|
||||
bool (*has_stable_inodes)(struct super_block *sb);
|
||||
|
||||
/*
|
||||
* Get the number of bits that the filesystem uses to represent inode
|
||||
* numbers and file logical block numbers.
|
||||
*
|
||||
* By default, both of these are assumed to be 64-bit. This function
|
||||
* can be implemented to declare that either or both of these numbers is
|
||||
* shorter, which may allow the use of the
|
||||
* FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
|
||||
* inline crypto hardware whose maximum DUN length is less than 64 bits
|
||||
* (e.g., eMMC v5.2 spec compliant hardware). This function only needs
|
||||
* to be implemented if support for one of these features is needed.
|
||||
*/
|
||||
void (*get_ino_and_lblk_bits)(struct super_block *sb,
|
||||
int *ino_bits_ret, int *lblk_bits_ret);
|
||||
|
||||
/*
|
||||
* Return the number of block devices to which the filesystem may write
|
||||
* encrypted file contents.
|
||||
*
|
||||
* If the filesystem can use multiple block devices (other than block
|
||||
* devices that aren't used for encrypted file contents, such as
|
||||
* external journal devices), and wants to support inline encryption,
|
||||
* then it must implement this function. Otherwise it's not needed.
|
||||
*/
|
||||
int (*get_num_devices)(struct super_block *sb);
|
||||
|
||||
/*
|
||||
* If ->get_num_devices() returns a value greater than 1, then this
|
||||
* function is called to get the array of request_queues that the
|
||||
* filesystem is using -- one per block device. (There may be duplicate
|
||||
* entries in this array, as block devices can share a request_queue.)
|
||||
*/
|
||||
void (*get_devices)(struct super_block *sb,
|
||||
struct request_queue **devs);
|
||||
};
|
||||
|
||||
@@ -423,7 +423,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
|
||||
|
||||
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
|
||||
|
||||
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
|
||||
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
|
||||
u16 if_id);
|
||||
|
||||
extern struct bus_type fsl_mc_bus_type;
|
||||
|
||||
|
||||
@@ -30,6 +30,9 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask,
|
||||
struct inode *child,
|
||||
const struct qstr *name, u32 cookie)
|
||||
{
|
||||
if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0)
|
||||
return;
|
||||
|
||||
fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie);
|
||||
}
|
||||
|
||||
@@ -41,6 +44,9 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
|
||||
|
||||
static inline void fsnotify_inode(struct inode *inode, __u32 mask)
|
||||
{
|
||||
if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
|
||||
return;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
mask |= FS_ISDIR;
|
||||
|
||||
@@ -53,6 +59,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
|
||||
{
|
||||
struct inode *inode = d_inode(dentry);
|
||||
|
||||
if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
|
||||
return 0;
|
||||
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
mask |= FS_ISDIR;
|
||||
|
||||
|
||||
@@ -643,6 +643,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
|
||||
extern int ftrace_make_nop(struct module *mod,
|
||||
struct dyn_ftrace *rec, unsigned long addr);
|
||||
|
||||
/**
|
||||
* ftrace_need_init_nop - return whether nop call sites should be initialized
|
||||
*
|
||||
* Normally the compiler's -mnop-mcount generates suitable nops, so we don't
|
||||
* need to call ftrace_init_nop() if the code is built with that flag.
|
||||
* Architectures where this is not always the case may define their own
|
||||
* condition.
|
||||
*
|
||||
* Return must be:
|
||||
* 0 if ftrace_init_nop() should be called
|
||||
* Nonzero if ftrace_init_nop() should not be called
|
||||
*/
|
||||
|
||||
#ifndef ftrace_need_init_nop
|
||||
#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ftrace_init_nop - initialize a nop call site
|
||||
|
||||
@@ -22,10 +22,15 @@ struct device;
|
||||
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
|
||||
* NOT_DEVICE: The fwnode will never be populated as a struct device.
|
||||
* INITIALIZED: The hardware corresponding to fwnode has been initialized.
|
||||
* NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
|
||||
* driver needs its child devices to be bound with
|
||||
* their respective drivers as soon as they are
|
||||
* added.
|
||||
*/
|
||||
#define FWNODE_FLAG_LINKS_ADDED BIT(0)
|
||||
#define FWNODE_FLAG_NOT_DEVICE BIT(1)
|
||||
#define FWNODE_FLAG_INITIALIZED BIT(2)
|
||||
#define FWNODE_FLAG_LINKS_ADDED BIT(0)
|
||||
#define FWNODE_FLAG_NOT_DEVICE BIT(1)
|
||||
#define FWNODE_FLAG_INITIALIZED BIT(2)
|
||||
#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
|
||||
|
||||
struct fwnode_handle {
|
||||
struct fwnode_handle *secondary;
|
||||
|
||||
@@ -8,34 +8,11 @@
|
||||
/* All generic netlink requests are serialized by a global lock. */
|
||||
extern void genl_lock(void);
|
||||
extern void genl_unlock(void);
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
extern bool lockdep_genl_is_held(void);
|
||||
#endif
|
||||
|
||||
/* for synchronisation between af_netlink and genetlink */
|
||||
extern atomic_t genl_sk_destructing_cnt;
|
||||
extern wait_queue_head_t genl_sk_destructing_waitq;
|
||||
|
||||
/**
|
||||
* rcu_dereference_genl - rcu_dereference with debug checking
|
||||
* @p: The pointer to read, prior to dereferencing
|
||||
*
|
||||
* Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
|
||||
* or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
|
||||
*/
|
||||
#define rcu_dereference_genl(p) \
|
||||
rcu_dereference_check(p, lockdep_genl_is_held())
|
||||
|
||||
/**
|
||||
* genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
|
||||
* @p: The pointer to read, prior to dereferencing
|
||||
*
|
||||
* Return the value of the specified RCU-protected pointer, but omit
|
||||
* the READ_ONCE(), because caller holds genl mutex.
|
||||
*/
|
||||
#define genl_dereference(p) \
|
||||
rcu_dereference_protected(p, lockdep_genl_is_held())
|
||||
|
||||
#define MODULE_ALIAS_GENL_FAMILY(family)\
|
||||
MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
|
||||
|
||||
|
||||
@@ -60,9 +60,6 @@ struct partition_meta_info {
|
||||
* device.
|
||||
* Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl.
|
||||
*
|
||||
* ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up",
|
||||
* with a similar meaning to network interfaces.
|
||||
*
|
||||
* ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include
|
||||
* partition information in ``/proc/partitions`` or in the output of
|
||||
* printk_all_partitions().
|
||||
@@ -97,7 +94,6 @@ struct partition_meta_info {
|
||||
/* 2 is unused (used to be GENHD_FL_DRIVERFS) */
|
||||
/* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */
|
||||
#define GENHD_FL_CD 0x0008
|
||||
#define GENHD_FL_UP 0x0010
|
||||
#define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020
|
||||
#define GENHD_FL_EXT_DEVT 0x0040
|
||||
#define GENHD_FL_NATIVE_CAPACITY 0x0080
|
||||
@@ -153,13 +149,16 @@ struct gendisk {
|
||||
unsigned long state;
|
||||
#define GD_NEED_PART_SCAN 0
|
||||
#define GD_READ_ONLY 1
|
||||
#define GD_QUEUE_REF 2
|
||||
#define GD_DEAD 2
|
||||
|
||||
struct mutex open_mutex; /* open/close mutex */
|
||||
unsigned open_partitions; /* number of open partitions */
|
||||
|
||||
struct backing_dev_info *bdi;
|
||||
struct kobject *slave_dir;
|
||||
|
||||
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
|
||||
struct list_head slave_bdevs;
|
||||
#endif
|
||||
struct timer_rand_state *random;
|
||||
atomic_t sync_io; /* RAID */
|
||||
struct disk_events *ev;
|
||||
@@ -172,8 +171,14 @@ struct gendisk {
|
||||
int node_id;
|
||||
struct badblocks *bb;
|
||||
struct lockdep_map lockdep_map;
|
||||
u64 diskseq;
|
||||
};
|
||||
|
||||
static inline bool disk_live(struct gendisk *disk)
|
||||
{
|
||||
return !inode_unhashed(disk->part0->bd_inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* The gendisk is refcounted by the part0 block_device, and the bd_device
|
||||
* therein is also used for device model presentation in sysfs.
|
||||
@@ -210,18 +215,12 @@ static inline dev_t disk_devt(struct gendisk *disk)
|
||||
void disk_uevent(struct gendisk *disk, enum kobject_action action);
|
||||
|
||||
/* block/genhd.c */
|
||||
extern void device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups);
|
||||
static inline void add_disk(struct gendisk *disk)
|
||||
int device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups);
|
||||
static inline int add_disk(struct gendisk *disk)
|
||||
{
|
||||
device_add_disk(NULL, disk, NULL);
|
||||
return device_add_disk(NULL, disk, NULL);
|
||||
}
|
||||
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
|
||||
static inline void add_disk_no_queue_reg(struct gendisk *disk)
|
||||
{
|
||||
device_add_disk_no_queue_reg(NULL, disk);
|
||||
}
|
||||
|
||||
extern void del_gendisk(struct gendisk *gp);
|
||||
|
||||
void set_disk_ro(struct gendisk *disk, bool read_only);
|
||||
@@ -236,6 +235,7 @@ extern void disk_block_events(struct gendisk *disk);
|
||||
extern void disk_unblock_events(struct gendisk *disk);
|
||||
extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
|
||||
bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
|
||||
bool disk_force_media_change(struct gendisk *disk, unsigned int events);
|
||||
|
||||
/* drivers/char/random.c */
|
||||
extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
|
||||
@@ -259,26 +259,10 @@ static inline sector_t get_capacity(struct gendisk *disk)
|
||||
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
|
||||
void blk_drop_partitions(struct gendisk *disk);
|
||||
|
||||
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
|
||||
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
||||
struct lock_class_key *lkclass);
|
||||
extern void put_disk(struct gendisk *disk);
|
||||
|
||||
#define alloc_disk_node(minors, node_id) \
|
||||
({ \
|
||||
static struct lock_class_key __key; \
|
||||
const char *__name; \
|
||||
struct gendisk *__disk; \
|
||||
\
|
||||
__name = "(gendisk_completion)"#minors"("#node_id")"; \
|
||||
\
|
||||
__disk = __alloc_disk_node(minors, node_id); \
|
||||
\
|
||||
if (__disk) \
|
||||
lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
|
||||
\
|
||||
__disk; \
|
||||
})
|
||||
|
||||
#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
|
||||
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
|
||||
|
||||
/**
|
||||
* blk_alloc_disk - allocate a gendisk structure
|
||||
@@ -291,15 +275,10 @@ extern void put_disk(struct gendisk *disk);
|
||||
*/
|
||||
#define blk_alloc_disk(node_id) \
|
||||
({ \
|
||||
struct gendisk *__disk = __blk_alloc_disk(node_id); \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
if (__disk) \
|
||||
lockdep_init_map(&__disk->lockdep_map, \
|
||||
"(bio completion)", &__key, 0); \
|
||||
__disk; \
|
||||
__blk_alloc_disk(node_id, &__key); \
|
||||
})
|
||||
struct gendisk *__blk_alloc_disk(int node);
|
||||
void blk_cleanup_disk(struct gendisk *disk);
|
||||
|
||||
int __register_blkdev(unsigned int major, const char *name,
|
||||
@@ -316,9 +295,10 @@ void set_capacity(struct gendisk *disk, sector_t size);
|
||||
int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
|
||||
long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
|
||||
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
|
||||
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
|
||||
int bd_register_pending_holders(struct gendisk *disk);
|
||||
#else
|
||||
static inline int bd_link_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
@@ -329,9 +309,14 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SYSFS */
|
||||
static inline int bd_register_pending_holders(struct gendisk *disk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
|
||||
|
||||
dev_t part_devt(struct gendisk *disk, u8 partno);
|
||||
void inc_diskseq(struct gendisk *disk);
|
||||
dev_t blk_lookup_devt(const char *name, int partno);
|
||||
void blk_request_module(dev_t devt);
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
@@ -609,7 +609,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
|
||||
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
|
||||
struct device_node;
|
||||
|
||||
struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
|
||||
struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
|
||||
const char *propname, int index,
|
||||
enum gpiod_flags dflags,
|
||||
const char *label);
|
||||
@@ -619,7 +619,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
|
||||
struct device_node;
|
||||
|
||||
static inline
|
||||
struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
|
||||
struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
|
||||
const char *propname, int index,
|
||||
enum gpiod_flags dflags,
|
||||
const char *label)
|
||||
@@ -633,7 +633,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
|
||||
struct device_node;
|
||||
|
||||
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
|
||||
struct device_node *node,
|
||||
const struct device_node *node,
|
||||
const char *propname, int index,
|
||||
enum gpiod_flags dflags,
|
||||
const char *label);
|
||||
@@ -644,7 +644,7 @@ struct device_node;
|
||||
|
||||
static inline
|
||||
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
|
||||
struct device_node *node,
|
||||
const struct device_node *node,
|
||||
const char *propname, int index,
|
||||
enum gpiod_flags dflags,
|
||||
const char *label)
|
||||
@@ -680,10 +680,10 @@ struct acpi_gpio_mapping {
|
||||
unsigned int quirks;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
|
||||
|
||||
struct acpi_device;
|
||||
|
||||
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
|
||||
|
||||
int acpi_dev_add_driver_gpios(struct acpi_device *adev,
|
||||
const struct acpi_gpio_mapping *gpios);
|
||||
void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
|
||||
@@ -696,8 +696,6 @@ struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label);
|
||||
|
||||
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
|
||||
|
||||
struct acpi_device;
|
||||
|
||||
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
|
||||
const struct acpi_gpio_mapping *gpios)
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user