https://source.android.com/security/bulletin/2022-07-01
CVE-2020-29374
CVE-2022-20227

* tag 'ASB-2022-07-05_12-5.10': (39 commits)
  ANDROID: GKI: Add symbols to abi_gki_aarch64_transsion
  BACKPORT: nfc: nfcmrvl: main: reorder destructive operations in nfcmrvl_nci_unregister_dev to avoid bugs
  ANDROID: vendor_hook: Add hook in __free_pages()
  ANDROID: create and export is_swap_slot_cache_enabled
  ANDROID: vendor_hook: Add hook in swap_slots
  ANDROID: mm: export swapcache_free_entries
  ANDROID: mm: export symbols used in vendor hook android_vh_get_swap_page()
  ANDROID: vendor_hooks: Add hooks to extend struct swap_slots_cache
  ANDROID: mm: export swap_type_to_swap_info
  ANDROID: vendor_hook: Add hook in si_swapinfo()
  ANDROID: vendor_hooks: Add hooks to extend the struct swap_info_struct
  ANDROID: vendor_hook: Add hooks in unuse_pte_range() and try_to_unuse()
  ANDROID: vendor_hook: Add hooks in free_swap_slot()
  ANDROID: vendor_hook: Add hook to update nr_swap_pages and total_swap_pages
  ANDROID: vendor_hook: Add hook in page_referenced_one()
  ANDROID: vendor_hooks: Add hooks to record the I/O statistics of swap:
  ANDROID: vendor_hook: Add hook in migrate_page_states()
  ANDROID: vendor_hook: Add hook in __migration_entry_wait()
  ANDROID: vendor_hook: Add hook in handle_pte_fault()
  ANDROID: vendor_hook: Add hook in do_swap_page()
  ...

Change-Id: I36501f339e4d9d46aade317ea3ccd32bce7f1b8c

Conflicts:
	drivers/usb/gadget/function/uvc_queue.c
This commit is contained in:
Tao Huang
2022-07-15 18:11:10 +08:00
66 changed files with 3776 additions and 2133 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -3103,6 +3103,7 @@
trace_raw_output_prep
trace_seq_printf
trace_seq_putc
tracing_is_on
tracing_off
truncate_inode_pages_range
truncate_pagecache_range

View File

@@ -0,0 +1,8 @@
[abi_symbol_list]
get_mem_cgroup_from_mm
is_swap_slot_cache_enabled
swapcache_free_entries
swap_type_to_swap_info
scan_swap_map_slots
swap_alloc_cluster
check_cache_active

View File

@@ -29,6 +29,7 @@ android/abi_gki_aarch64_virtual_device
android/abi_gki_aarch64_vivo
android/abi_gki_aarch64_xiaomi
android/abi_gki_aarch64_asus
android/abi_gki_aarch64_transsion
"
FILES="${FILES}

View File

@@ -407,3 +407,26 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ra_tuning_max_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_memcg_scan_type);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_pte_fault_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cow_user_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swapin_add_anon_rmap);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_waiting_for_page_migration);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_migrate_page_states);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_one_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_pswpin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_pswpout);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_swpout_vm_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_slot_cache_active);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_slots_cache_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_swap_slot_cache);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_swap_slot);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_swap_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_isolated_for_reclaim);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_inactive_is_low);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_snapshot_refaults);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_account_swap_pages);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unuse_swap_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_swap_info_struct);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_swapinfo);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_si);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages);

View File

@@ -29,6 +29,47 @@ struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
/*
* Here, mutex is required to serialize the calls to del_wk work between
* user/kernel space which happens when devcd is added with device_add()
* and that sends uevent to user space. User space reads the uevents,
* and calls to devcd_data_write() which try to modify the work which is
* not even initialized/queued from devcoredump.
*
*
*
* cpu0(X) cpu1(Y)
*
* dev_coredump() uevent sent to user space
* device_add() ======================> user space process Y reads the
* uevents writes to devcd fd
* which results into writes to
*
* devcd_data_write()
* mod_delayed_work()
* try_to_grab_pending()
* del_timer()
* debug_assert_init()
* INIT_DELAYED_WORK()
* schedule_delayed_work()
*
*
* Also, mutex alone would not be enough to avoid scheduling of
* del_wk work after it get flush from a call to devcd_free()
* mentioned as below.
*
* disabled_store()
* devcd_free()
* mutex_lock() devcd_data_write()
* flush_delayed_work()
* mutex_unlock()
* mutex_lock()
* mod_delayed_work()
* mutex_unlock()
* So, delete_work flag is required.
*/
struct mutex mutex;
bool delete_work;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
@@ -88,7 +129,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
mod_delayed_work(system_wq, &devcd->del_wk, 0);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work) {
devcd->delete_work = true;
mod_delayed_work(system_wq, &devcd->del_wk, 0);
}
mutex_unlock(&devcd->mutex);
return count;
}
@@ -116,7 +162,12 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work)
devcd->delete_work = true;
flush_delayed_work(&devcd->del_wk);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -126,6 +177,30 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
return sysfs_emit(buf, "%d\n", devcd_disabled);
}
/*
*
* disabled_store() worker()
* class_for_each_device(&devcd_class,
* NULL, NULL, devcd_free)
* ...
* ...
* while ((dev = class_dev_iter_next(&iter))
* devcd_del()
* device_del()
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
* mutex_lock(&devcd->mutex);
*
*
* In the above diagram, It looks like disabled_store() would be racing with parallely
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
* is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
*/
static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
@@ -282,13 +357,16 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
devcd->delete_work = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
dev_set_name(&devcd->devcd_dev, "devcd%d",
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
mutex_lock(&devcd->mutex);
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -302,10 +380,11 @@ void dev_coredumpm(struct device *dev, struct module *owner,
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
put_module:
module_put(owner);
free:

View File

@@ -194,6 +194,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
{
struct nci_dev *ndev = priv->ndev;
nci_unregister_device(ndev);
if (priv->ndev->nfc_dev->fw_download_in_progress)
nfcmrvl_fw_dnld_abort(priv);
@@ -202,7 +203,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
if (gpio_is_valid(priv->config.reset_n_io))
gpio_free(priv->config.reset_n_io);
nci_unregister_device(ndev);
nci_free_device(ndev);
kfree(priv);
}

View File

@@ -275,6 +275,7 @@ static int usb_conn_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
device_set_wakeup_capable(&pdev->dev, true);
/* Perform initial detection */
usb_conn_queue_dwork(info, 0);
@@ -304,6 +305,14 @@ static int __maybe_unused usb_conn_suspend(struct device *dev)
{
struct usb_conn_info *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (info->id_gpiod)
enable_irq_wake(info->id_irq);
if (info->vbus_gpiod)
enable_irq_wake(info->vbus_irq);
return 0;
}
if (info->id_gpiod)
disable_irq(info->id_irq);
if (info->vbus_gpiod)
@@ -318,6 +327,14 @@ static int __maybe_unused usb_conn_resume(struct device *dev)
{
struct usb_conn_info *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (info->id_gpiod)
disable_irq_wake(info->id_irq);
if (info->vbus_gpiod)
disable_irq_wake(info->vbus_irq);
return 0;
}
pinctrl_pm_select_default_state(dev);
if (info->id_gpiod)

View File

@@ -122,8 +122,6 @@ struct ffs_ep {
struct usb_endpoint_descriptor *descs[3];
u8 num;
int status; /* P: epfile->mutex */
};
struct ffs_epfile {
@@ -227,6 +225,9 @@ struct ffs_io_data {
bool use_sg;
struct ffs_data *ffs;
int status;
struct completion done;
};
struct ffs_desc_helper {
@@ -705,12 +706,15 @@ static const struct file_operations ffs_ep0_operations = {
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
{
struct ffs_io_data *io_data = req->context;
ENTER();
if (likely(req->context)) {
struct ffs_ep *ep = _ep->driver_data;
ep->status = req->status ? req->status : req->actual;
complete(req->context);
}
if (req->status)
io_data->status = req->status;
else
io_data->status = req->actual;
complete(&io_data->done);
}
static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
@@ -1048,7 +1052,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
WARN(1, "%s: data_len == -EINVAL\n", __func__);
ret = -EINVAL;
} else if (!io_data->aio) {
DECLARE_COMPLETION_ONSTACK(done);
bool interrupted = false;
req = ep->req;
@@ -1064,7 +1067,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
io_data->buf = data;
req->context = &done;
init_completion(&io_data->done);
req->context = io_data;
req->complete = ffs_epfile_io_complete;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
@@ -1073,7 +1077,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
if (unlikely(wait_for_completion_interruptible(&done))) {
if (unlikely(wait_for_completion_interruptible(&io_data->done))) {
spin_lock_irq(&epfile->ffs->eps_lock);
if (epfile->ep != ep) {
ret = -ESHUTDOWN;
goto error_lock;
}
/*
* To avoid race condition with ffs_epfile_io_complete,
* dequeue the request first then check
@@ -1081,17 +1090,18 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* condition with req->complete callback.
*/
usb_ep_dequeue(ep->ep, req);
wait_for_completion(&done);
interrupted = ep->status < 0;
spin_unlock_irq(&epfile->ffs->eps_lock);
wait_for_completion(&io_data->done);
interrupted = io_data->status < 0;
}
if (interrupted)
ret = -EINTR;
else if (io_data->read && ep->status > 0)
ret = __ffs_epfile_read_data(epfile, data, ep->status,
else if (io_data->read && io_data->status > 0)
ret = __ffs_epfile_read_data(epfile, data, io_data->status,
&io_data->data);
else
ret = ep->status;
ret = io_data->status;
goto error_mutex;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM;

View File

@@ -1188,6 +1188,8 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
int msf = common->cmnd[1] & 0x02;
int start_track = common->cmnd[6];
u8 *buf = (u8 *)bh->buf;
u8 format;
int i, len;
if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
start_track > 1) {
@@ -1195,18 +1197,62 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
return -EINVAL;
}
memset(buf, 0, 20);
buf[1] = (20-2); /* TOC data length */
buf[2] = 1; /* First track number */
buf[3] = 1; /* Last track number */
buf[5] = 0x16; /* Data track, copying allowed */
buf[6] = 0x01; /* Only track is number 1 */
store_cdrom_address(&buf[8], msf, 0);
format = common->cmnd[2] & 0xf;
/*
* Check if CDB is old style SFF-8020i
* i.e. format is in 2 MSBs of byte 9
* Mac OS-X host sends us this.
*/
if (format == 0)
format = (common->cmnd[9] >> 6) & 0x3;
buf[13] = 0x16; /* Lead-out track is data */
buf[14] = 0xAA; /* Lead-out track number */
store_cdrom_address(&buf[16], msf, curlun->num_sectors);
return 20;
switch (format) {
case 0:
/* Formatted TOC */
len = 4 + 2*8; /* 4 byte header + 2 descriptors */
memset(buf, 0, len);
buf[1] = len - 2; /* TOC Length excludes length field */
buf[2] = 1; /* First track number */
buf[3] = 1; /* Last track number */
buf[5] = 0x16; /* Data track, copying allowed */
buf[6] = 0x01; /* Only track is number 1 */
store_cdrom_address(&buf[8], msf, 0);
buf[13] = 0x16; /* Lead-out track is data */
buf[14] = 0xAA; /* Lead-out track number */
store_cdrom_address(&buf[16], msf, curlun->num_sectors);
return len;
case 2:
/* Raw TOC */
len = 4 + 3*11; /* 4 byte header + 3 descriptors */
memset(buf, 0, len); /* Header + A0, A1 & A2 descriptors */
buf[1] = len - 2; /* TOC Length excludes length field */
buf[2] = 1; /* First complete session */
buf[3] = 1; /* Last complete session */
buf += 4;
/* fill in A0, A1 and A2 points */
for (i = 0; i < 3; i++) {
buf[0] = 1; /* Session number */
buf[1] = 0x16; /* Data track, copying allowed */
/* 2 - Track number 0 -> TOC */
buf[3] = 0xA0 + i; /* A0, A1, A2 point */
/* 4, 5, 6 - Min, sec, frame is zero */
buf[8] = 1; /* Pmin: last track number */
buf += 11; /* go to next track descriptor */
}
buf -= 11; /* go back to A2 descriptor */
/* For A2, 7, 8, 9, 10 - zero, Pmin, Psec, Pframe of Lead out */
store_cdrom_address(&buf[7], msf, curlun->num_sectors);
return len;
default:
/* Multi-session, PMA, ATIP, CD-TEXT not supported/required */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
}
static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
@@ -1933,7 +1979,7 @@ static int do_scsi_command(struct fsg_common *common)
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(7<<6) | (1<<1), 1,
(0xf<<6) | (3<<1), 1,
"READ TOC");
if (reply == 0)
reply = do_read_toc(common, bh);

View File

@@ -44,11 +44,12 @@ static int uvc_queue_setup(struct vb2_queue *vq,
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_video *video = container_of(queue, struct uvc_video, queue);
struct usb_composite_dev *cdev = video->uvc->func.config->cdev;
#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
struct uvc_device *uvc = container_of(video, struct uvc_device, video);
struct f_uvc_opts *opts = fi_to_f_uvc_opts(uvc->func.fi);
#endif
unsigned int req_size;
unsigned int nreq;
if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
*nbuffers = UVC_MAX_VIDEO_BUFFERS;
@@ -64,10 +65,16 @@ static int uvc_queue_setup(struct vb2_queue *vq,
}
#endif
if (cdev->gadget->speed < USB_SPEED_SUPER)
video->uvc_num_requests = 4;
else
video->uvc_num_requests = 64;
req_size = video->ep->maxpacket
* max_t(unsigned int, video->ep->maxburst, 1)
* (video->ep->mult);
/* We divide by two, to increase the chance to run
* into fewer requests for smaller framesizes.
*/
nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size);
nreq = clamp(nreq, 4U, 64U);
video->uvc_num_requests = nreq;
return 0;
}

View File

@@ -310,6 +310,9 @@ static void uvcg_video_pump(struct work_struct *work)
uvcg_queue_cancel(queue, 0);
break;
}
/* Endpoint now owns the request */
req = NULL;
}
if (!req)

View File

@@ -141,7 +141,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
kfree(sbi->vol_amap);
}
int exfat_set_bitmap(struct inode *inode, unsigned int clu)
int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
@@ -154,7 +154,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
set_bit_le(b, sbi->vol_amap[i]->b_data);
exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode));
exfat_update_bh(sbi->vol_amap[i], sync);
return 0;
}

View File

@@ -317,7 +317,7 @@ int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
exfat_chain_set(clu, EXFAT_EOF_CLUSTER, 0, ALLOC_NO_FAT_CHAIN);
ret = exfat_alloc_cluster(inode, 1, clu);
ret = exfat_alloc_cluster(inode, 1, clu, IS_DIRSYNC(inode));
if (ret)
return ret;

View File

@@ -388,7 +388,7 @@ int exfat_clear_volume_dirty(struct super_block *sb);
#define exfat_get_next_cluster(sb, pclu) exfat_ent_get(sb, *(pclu), pclu)
int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
struct exfat_chain *p_chain);
struct exfat_chain *p_chain, bool sync_bmap);
int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain);
int exfat_ent_get(struct super_block *sb, unsigned int loc,
unsigned int *content);
@@ -407,7 +407,7 @@ int exfat_count_num_clusters(struct super_block *sb,
/* balloc.c */
int exfat_load_bitmap(struct super_block *sb);
void exfat_free_bitmap(struct exfat_sb_info *sbi);
int exfat_set_bitmap(struct inode *inode, unsigned int clu);
int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync);
void exfat_clear_bitmap(struct inode *inode, unsigned int clu);
unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);

View File

@@ -277,7 +277,7 @@ release_bhs:
}
int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
struct exfat_chain *p_chain)
struct exfat_chain *p_chain, bool sync_bmap)
{
int ret = -ENOSPC;
unsigned int num_clusters = 0, total_cnt;
@@ -339,7 +339,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
}
/* update allocation bitmap */
if (exfat_set_bitmap(inode, new_clu)) {
if (exfat_set_bitmap(inode, new_clu, sync_bmap)) {
ret = -EIO;
goto free_cluster;
}

View File

@@ -178,7 +178,8 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
return -EIO;
}
ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu);
ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu,
inode_needs_sync(inode));
if (ret)
return ret;

View File

@@ -340,7 +340,7 @@ static int exfat_find_empty_entry(struct inode *inode,
exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
/* allocate a cluster */
ret = exfat_alloc_cluster(inode, 1, &clu);
ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
if (ret)
return ret;

View File

@@ -1556,6 +1556,7 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
static void io_flush_timeouts(struct io_ring_ctx *ctx)
{
struct io_kiocb *req, *tmp;
u32 seq;
if (list_empty(&ctx->timeout_list))
@@ -1563,10 +1564,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
do {
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
u32 events_needed, events_got;
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
struct io_kiocb, timeout.list);
if (io_is_timeout_noseq(req))
break;
@@ -1583,9 +1582,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
if (events_got < events_needed)
break;
list_del_init(&req->timeout.list);
io_kill_timeout(req, 0);
} while (!list_empty(&ctx->timeout_list));
}
ctx->cq_last_tm_flush = seq;
}
@@ -5639,6 +5637,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
else
data->mode = HRTIMER_MODE_REL;
INIT_LIST_HEAD(&req->timeout.list);
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
return 0;
}
@@ -6282,12 +6281,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
if (!list_empty(&req->link_list)) {
prev = list_entry(req->link_list.prev, struct io_kiocb,
link_list);
if (refcount_inc_not_zero(&prev->refs))
list_del_init(&req->link_list);
else
list_del_init(&req->link_list);
if (!refcount_inc_not_zero(&prev->refs))
prev = NULL;
}
list_del(&req->timeout.list);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {

Some files were not shown because too many files have changed in this diff Show More