Commit fa7f5780 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:

 - a bit more MM

 - procfs updates

 - dynamic-debug fixes

 - lib/ updates

 - checkpatch

 - epoll

 - nilfs2

 - signals

 - rapidio

 - PID management cleanup and optimization

 - kcov updates

 - sysvipc updates

 - quite a few misc things all over the place

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (94 commits)
  EXPERT Kconfig menu: fix broken EXPERT menu
  include/asm-generic/topology.h: remove unused parent_node() macro
  arch/tile/include/asm/topology.h: remove unused parent_node() macro
  arch/sparc/include/asm/topology_64.h: remove unused parent_node() macro
  arch/sh/include/asm/topology.h: remove unused parent_node() macro
  arch/ia64/include/asm/topology.h: remove unused parent_node() macro
  drivers/pcmcia/sa1111_badge4.c: avoid unused function warning
  mm: add infrastructure for get_user_pages_fast() benchmarking
  sysvipc: make get_maxid O(1) again
  sysvipc: properly name ipc_addid() limit parameter
  sysvipc: duplicate lock comments wrt ipc_addid()
  sysvipc: unteach ids->next_id for !CHECKPOINT_RESTORE
  initramfs: use time64_t timestamps
  drivers/watchdog: make use of devm_register_reboot_notifier()
  kernel/reboot.c: add devm_register_reboot_notifier()
  kcov: update documentation
  Makefile: support flag -fsanitizer-coverage=trace-cmp
  kcov: support comparison operands collection
  kcov: remove pointless current != NULL check
  kernel/panic.c: add TAINT_AUX
  ...
parents 2dcd9c71 d1b069f5
......@@ -18,7 +18,7 @@ shortcut for ``print_hex_dump(KERN_DEBUG)``.
For ``print_hex_dump_debug()``/``print_hex_dump_bytes()``, format string is
its ``prefix_str`` argument, if it is constant string; or ``hexdump``
in case ``prefix_str`` is build dynamically.
in case ``prefix_str`` is built dynamically.
Dynamic debug has even more useful features:
......@@ -197,8 +197,8 @@ line
line number matches the callsite line number exactly. A
range of line numbers matches any callsite between the first
and last line number inclusive. An empty first number means
the first line in the file, an empty line number means the
last number in the file. Examples::
the first line in the file, an empty last line number means the
last line number in the file. Examples::
line 1603 // exactly line 1603
line 1600-1605 // the six lines from line 1600 to line 1605
......
WARN_ONCE / WARN_ON_ONCE only print a warning once.
echo 1 > /sys/kernel/debug/clear_warn_once
clears the state and allows the warnings to print once again.
This can be useful after test suite runs to reproduce problems.
......@@ -12,19 +12,30 @@ To achieve this goal it does not collect coverage in soft/hard interrupts
and instrumentation of some inherently non-deterministic parts of kernel is
disabled (e.g. scheduler, locking).
Usage
-----
kcov is also able to collect comparison operands from the instrumented code
(this feature currently requires that the kernel is compiled with clang).
Prerequisites
-------------
Configure the kernel with::
CONFIG_KCOV=y
CONFIG_KCOV requires gcc built on revision 231296 or later.
If the comparison operands need to be collected, set::
CONFIG_KCOV_ENABLE_COMPARISONS=y
Profiling data will only become accessible once debugfs has been mounted::
mount -t debugfs none /sys/kernel/debug
The following program demonstrates kcov usage from within a test program:
Coverage collection
-------------------
The following program demonstrates coverage collection from within a test
program using kcov:
.. code-block:: c
......@@ -44,6 +55,9 @@ The following program demonstrates kcov usage from within a test program:
#define KCOV_DISABLE _IO('c', 101)
#define COVER_SIZE (64<<10)
#define KCOV_TRACE_PC 0
#define KCOV_TRACE_CMP 1
int main(int argc, char **argv)
{
int fd;
......@@ -64,7 +78,7 @@ The following program demonstrates kcov usage from within a test program:
if ((void*)cover == MAP_FAILED)
perror("mmap"), exit(1);
/* Enable coverage collection on the current thread. */
if (ioctl(fd, KCOV_ENABLE, 0))
if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_PC))
perror("ioctl"), exit(1);
/* Reset coverage from the tail of the ioctl() call. */
__atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
......@@ -111,3 +125,80 @@ The interface is fine-grained to allow efficient forking of test processes.
That is, a parent process opens /sys/kernel/debug/kcov, enables trace mode,
mmaps coverage buffer and then forks child processes in a loop. Child processes
only need to enable coverage (disable happens automatically on thread end).
Comparison operands collection
------------------------------
Comparison operands collection is similar to coverage collection:
.. code-block:: c
/* Same includes and defines as above. */
/* Number of 64-bit words per record. */
#define KCOV_WORDS_PER_CMP 4
/*
* The format for the types of collected comparisons.
*
* Bit 0 shows whether one of the arguments is a compile-time constant.
* Bits 1 & 2 contain log2 of the argument size, up to 8 bytes.
*/
#define KCOV_CMP_CONST (1 << 0)
#define KCOV_CMP_SIZE(n) ((n) << 1)
#define KCOV_CMP_MASK KCOV_CMP_SIZE(3)
int main(int argc, char **argv)
{
int fd;
uint64_t *cover, type, arg1, arg2, is_const, size;
unsigned long n, i;
fd = open("/sys/kernel/debug/kcov", O_RDWR);
if (fd == -1)
perror("open"), exit(1);
if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
perror("ioctl"), exit(1);
/*
* Note that the buffer pointer is of type uint64_t*, because all
* the comparison operands are promoted to uint64_t.
*/
cover = (uint64_t *)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if ((void*)cover == MAP_FAILED)
perror("mmap"), exit(1);
/* Note KCOV_TRACE_CMP instead of KCOV_TRACE_PC. */
if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_CMP))
perror("ioctl"), exit(1);
__atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
read(-1, NULL, 0);
/* Read number of comparisons collected. */
n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
for (i = 0; i < n; i++) {
type = cover[i * KCOV_WORDS_PER_CMP + 1];
/* arg1 and arg2 - operands of the comparison. */
arg1 = cover[i * KCOV_WORDS_PER_CMP + 2];
arg2 = cover[i * KCOV_WORDS_PER_CMP + 3];
/* ip - caller address. */
ip = cover[i * KCOV_WORDS_PER_CMP + 4];
/* size of the operands. */
size = 1 << ((type & KCOV_CMP_MASK) >> 1);
/* is_const - true if either operand is a compile-time constant.*/
is_const = type & KCOV_CMP_CONST;
printf("ip: 0x%lx type: 0x%lx, arg1: 0x%lx, arg2: 0x%lx, "
"size: %lu, %s\n",
ip, type, arg1, arg2, size,
is_const ? "const" : "non-const");
}
if (ioctl(fd, KCOV_DISABLE, 0))
perror("ioctl"), exit(1);
/* Free resources. */
if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
perror("munmap"), exit(1);
if (close(fd))
perror("close"), exit(1);
return 0;
}
Note that the kcov modes (coverage collection or comparison operands) are
mutually exclusive.
......@@ -181,6 +181,7 @@ read the file /proc/PID/status:
VmPTE: 20 kb
VmSwap: 0 kB
HugetlbPages: 0 kB
CoreDumping: 0
Threads: 1
SigQ: 0/28578
SigPnd: 0000000000000000
......@@ -253,6 +254,8 @@ Table 1-2: Contents of the status files (as of 4.8)
VmSwap amount of swap used by anonymous private data
(shmem swap usage is not included)
HugetlbPages size of hugetlb memory portions
CoreDumping process's memory is currently being dumped
(killing the process may lead to a corrupted core)
Threads number of threads
SigQ number of signals queued/max. number for queue
SigPnd bitmap of pending signals for the thread
......
......@@ -818,7 +818,7 @@ tooling to work, you can do:
swappiness
This control is used to define how aggressive the kernel will swap
memory pages. Higher values will increase agressiveness, lower values
memory pages. Higher values will increase aggressiveness, lower values
decrease the amount of swap. A value of 0 instructs the kernel not to
initiate swap until the amount of free and file-backed pages is less
than the high water mark in a zone.
......
......@@ -375,8 +375,6 @@ CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
# Use USERINCLUDE when you must reference the UAPI directories only.
USERINCLUDE := \
......@@ -659,6 +657,7 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
include scripts/Makefile.kcov
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM
......
......@@ -33,13 +33,6 @@
cpu_all_mask : \
&node_to_cpu_mask[node])
/*
* Returns the number of the node containing Node 'nid'.
* Not implemented here. Multi-level hierarchies detected with
* the help of node_distance().
*/
#define parent_node(nid) (nid)
/*
* Determines the node for a given pci bus
*/
......
......@@ -31,8 +31,8 @@ void foo(void)
DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
BUILD_BUG_ON(sizeof(struct upid) != 32);
DEFINE(IA64_UPID_SHIFT, 5);
BUILD_BUG_ON(sizeof(struct upid) != 16);
DEFINE(IA64_UPID_SHIFT, 4);
BLANK();
......
......@@ -60,7 +60,7 @@ void bust_spinlocks(int yes)
void do_BUG(const char *file, int line)
{
bust_spinlocks(1);
printk(KERN_EMERG "------------[ cut here ]------------\n");
printk(KERN_EMERG CUT_HERE);
printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
}
......
......@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
LOAD_INT(c), LOAD_FRAC(c),
count_active_contexts(),
atomic_read(&nr_spu_contexts),
task_active_pid_ns(current)->last_pid);
idr_get_cursor(&task_active_pid_ns(current)->idr));
return 0;
}
......
......@@ -104,6 +104,18 @@ static void error(char *x)
while(1); /* Halt */
}
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
void __stack_chk_fail(void)
{
error("stack-protector: Kernel stack is corrupted\n");
}
#ifdef CONFIG_SUPERH64
#define stackalign 8
#else
......@@ -118,6 +130,8 @@ void decompress_kernel(void)
{
unsigned long output_addr;
__stack_chk_guard_setup();
#ifdef CONFIG_SUPERH64
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
......
......@@ -5,7 +5,6 @@
#ifdef CONFIG_NUMA
#define cpu_to_node(cpu) ((void)(cpu),0)
#define parent_node(node) ((void)(node),0)
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
......
......@@ -11,8 +11,6 @@ static inline int cpu_to_node(int cpu)
return numa_cpu_lookup_table[cpu];
}
#define parent_node(node) (node)
#define cpumask_of_node(node) ((node) == -1 ? \
cpu_all_mask : \
&numa_cpumask_lookup_table[node])
......
......@@ -29,12 +29,6 @@ static inline int cpu_to_node(int cpu)
return cpu_2_node[cpu];
}
/*
* Returns the number of the node containing Node 'node'.
* This architecture is flat, so it is a pretty simple function!
*/
#define parent_node(node) (node)
/* Returns a bitmask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
......
......@@ -63,9 +63,11 @@ void lkdtm_BUG(void)
BUG();
}
static int warn_counter;
void lkdtm_WARNING(void)
{
WARN_ON(1);
WARN(1, "Warning message trigger count: %d\n", warn_counter++);
}
void lkdtm_EXCEPTION(void)
......
......@@ -144,6 +144,7 @@ int pcmcia_badge4_init(struct sa1111_dev *dev)
sa11xx_drv_pcmcia_add_one);
}
#ifndef MODULE
static int __init pcmv_setup(char *s)
{
int v[4];
......@@ -158,3 +159,4 @@ static int __init pcmv_setup(char *s)
}
__setup("pcmv=", pcmv_setup);
#endif
......@@ -959,9 +959,10 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
nents = dma_map_sg(chan->device->dev,
req->sgt.sgl, req->sgt.nents, dir);
if (nents == -EFAULT) {
if (nents == 0) {
rmcd_error("Failed to map SG list");
return -EFAULT;
ret = -EFAULT;
goto err_pg;
}
ret = do_dma_request(req, xfer, sync, nents);
......
......@@ -458,7 +458,7 @@ static void idtg2_remove(struct rio_dev *rdev)
idtg2_sysfs(rdev, false);
}
static struct rio_device_id idtg2_id_table[] = {
static const struct rio_device_id idtg2_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)},
......
......@@ -348,7 +348,7 @@ static void idtg3_shutdown(struct rio_dev *rdev)
}
}
static struct rio_device_id idtg3_id_table[] = {
static const struct rio_device_id idtg3_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)},
{ 0, } /* terminate list */
......
......@@ -168,7 +168,7 @@ static void idtcps_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id idtcps_id_table[] = {
static const struct rio_device_id idtcps_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)},
......
......@@ -169,7 +169,7 @@ static void tsi568_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id tsi568_id_table[] = {
static const struct rio_device_id tsi568_id_table[] = {
{RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)},
{ 0, } /* terminate list */
};
......
......@@ -336,7 +336,7 @@ static void tsi57x_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id tsi57x_id_table[] = {
static const struct rio_device_id tsi57x_id_table[] = {
{RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)},
......
......@@ -137,25 +137,6 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
}
EXPORT_SYMBOL_GPL(watchdog_init_timeout);
static int watchdog_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *data)
{
struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
reboot_nb);
if (code == SYS_DOWN || code == SYS_HALT) {
if (watchdog_active(wdd)) {
int ret;
ret = wdd->ops->stop(wdd);
if (ret)
return NOTIFY_BAD;
}
}
return NOTIFY_DONE;
}
static int watchdog_restart_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
......@@ -244,19 +225,6 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
}
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
ret = register_reboot_notifier(&wdd->reboot_nb);
if (ret) {
pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
wdd->id, ret);
watchdog_dev_unregister(wdd);
ida_simple_remove(&watchdog_ida, wdd->id);
return ret;
}
}
if (wdd->ops->restart) {
wdd->restart_nb.notifier_call = watchdog_restart_notifier;
......@@ -302,9 +270,6 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd)
if (wdd->ops->restart)
unregister_restart_handler(&wdd->restart_nb);
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
unregister_reboot_notifier(&wdd->reboot_nb);
watchdog_dev_unregister(wdd);
ida_simple_remove(&watchdog_ida, wdd->id);
}
......
......@@ -42,6 +42,7 @@
#include <linux/miscdevice.h> /* For handling misc devices */
#include <linux/module.h> /* For module stuff/... */
#include <linux/mutex.h> /* For mutexes */
#include <linux/reboot.h> /* For reboot notifier */
#include <linux/slab.h> /* For memory functions */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/watchdog.h> /* For watchdog specific items */
......@@ -1016,6 +1017,25 @@ static struct class watchdog_class = {
.dev_groups = wdt_groups,
};
static int watchdog_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *data)
{
struct watchdog_device *wdd;
wdd = container_of(nb, struct watchdog_device, reboot_nb);
if (code == SYS_DOWN || code == SYS_HALT) {
if (watchdog_active(wdd)) {
int ret;
ret = wdd->ops->stop(wdd);
if (ret)
return NOTIFY_BAD;
}
}
return NOTIFY_DONE;
}
/*
* watchdog_dev_register: register a watchdog device
* @wdd: watchdog device
......@@ -1049,6 +1069,18 @@ int watchdog_dev_register(struct watchdog_device *wdd)
if (ret) {
device_destroy(&watchdog_class, devno);
watchdog_cdev_unregister(wdd);
return ret;
}
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
if (ret) {
pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
wdd->id, ret);
watchdog_dev_unregister(wdd);
}
}
return ret;
......
......@@ -81,7 +81,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
return (bytes > 0);
/* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
}
static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
......@@ -95,6 +96,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
} pkt;
struct file *pipe = NULL;
size_t pktsz;
int ret;
pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
(unsigned long) wq->wait_queue_token,
......@@ -169,7 +171,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
mutex_unlock(&sbi->wq_mutex);
if (autofs4_write(sbi, pipe, &pkt, pktsz))
switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
case 0:
break;
case -ENOMEM:
case -ERESTARTSYS:
/* Just fail this one */
autofs4_wait_release(sbi, wq->wait_queue_token, ret);
break;
default:
autofs4_catatonic_mode(sbi);
break;
}
fput(pipe);
}
......
......@@ -276,12 +276,6 @@ static DEFINE_MUTEX(epmutex);
/* Used to check for epoll file descriptor inclusion loops */
static struct nested_calls poll_loop_ncalls;
/* Used for safe wake up implementation */
static struct nested_calls poll_safewake_ncalls;
/* Used to call file's f_op->poll() under the nested calls boundaries */
static struct nested_calls poll_readywalk_ncalls;
/* Slab cache used to allocate "struct epitem" */
static struct kmem_cache *epi_cache __read_mostly;
......@@ -551,40 +545,21 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
* this special case of epoll.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
unsigned long events, int subclass)
static struct nested_calls poll_safewake_ncalls;
static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
{
unsigned long flags;
wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie;
spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
wake_up_locked_poll(wqueue, events);
spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1);
wake_up_locked_poll(wqueue, POLLIN);
spin_unlock_irqrestore(&wqueue->lock, flags);
}
#else
static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
unsigned long events, int subclass)
{
wake_up_poll(wqueue, events);
}
#endif
static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
{
ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
1 + call_nests);
return 0;
}
/*
* Perform a safe wake up of the poll wait list. The problem is that
* with the new callback'd wake up system, it is possible that the
* poll callback is reentered from inside the call to wake_up() done
* on the poll wait queue head. The rule is that we cannot reenter the
* wake up code from the same task more than EP_MAX_NESTS times,
* and we cannot reenter the same wait queue head at all. This will
* enable to have a hierarchy of epoll file descriptor of no more than
* EP_MAX_NESTS deep.
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
int this_cpu = get_cpu();
......@@ -595,6 +570,15 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
put_cpu();
}
#else
static void ep_poll_safewake(wait_queue_head_t *wq)
{
wake_up_poll(wq, POLLIN);
}
#endif
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
{
wait_queue_head_t *whead;
......@@ -880,11 +864,33 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
return 0;
}
static inline unsigned int ep_item_poll(struct epitem *epi, poll_table *pt)
static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv);
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
poll_table *pt);
/*
* Differs from ep_eventpoll_poll() in that internal callers already have
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
* is correctly annotated.
*/
static unsigned int ep_item_poll(struct epitem *epi, poll_table *pt, int depth)
{
struct eventpoll *ep;
bool locked;
pt->_key = epi->event.events;
if (!is_file_epoll(epi->ffd.file))
return epi->ffd.file->f_op->poll(epi->ffd.file, pt) &
epi->event.events;
ep = epi->ffd.file->private_data;
poll_wait(epi->ffd.file, &ep->poll_wait, pt);
locked = pt && (pt->_qproc == ep_ptable_queue_proc);
return epi->ffd.file->f_op->poll(epi->ffd.file, pt) & epi->event.events;
return ep_scan_ready_list(epi->ffd.file->private_data,
ep_read_events_proc, &depth, depth,
locked) & epi->event.events;
}
static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
......@@ -892,13 +898,15 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
{