1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ec.c - ACPI Embedded Controller Driver (v3)
5 * Copyright (C) 2001-2015 Intel Corporation
6 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
7 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
8 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * 2004 Luming Yu <luming.yu@intel.com>
10 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
11 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
12 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
15 /* Uncomment next line to get verbose printout */
17 #define pr_fmt(fmt) "ACPI: EC: " fmt
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/types.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/list.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/acpi.h>
30 #include <linux/dmi.h>
35 #define ACPI_EC_CLASS "embedded_controller"
36 #define ACPI_EC_DEVICE_NAME "Embedded Controller"
37 #define ACPI_EC_FILE_INFO "info"
39 /* EC status register */
40 #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
41 #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
42 #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
43 #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
44 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
47 * The SCI_EVT clearing timing is not defined by the ACPI specification.
48 * This leads to lots of practical timing issues for the host EC driver.
49 * The following variations are defined (from the target EC firmware's
51 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
52 * target can clear SCI_EVT at any time so long as the host can see
53 * the indication by reading the status register (EC_SC). So the
54 * host should re-check SCI_EVT after the first time the SCI_EVT
55 * indication is seen, which is the same time the query request
56 * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
57 * at any later time could indicate another event. Normally such
58 * kind of EC firmware has implemented an event queue and will
59 * return 0x00 to indicate "no outstanding event".
60 * QUERY: After seeing the query request (QR_EC) written to the command
61 * register (EC_CMD) by the host and having prepared the responding
62 * event value in the data register (EC_DATA), the target can safely
63 * clear SCI_EVT because the target can confirm that the current
64 * event is being handled by the host. The host then should check
65 * SCI_EVT right after reading the event response from the data
67 * EVENT: After seeing the event response read from the data register
68 * (EC_DATA) by the host, the target can clear SCI_EVT. As the
69 * target requires time to notice the change in the data register
70 * (EC_DATA), the host may be required to wait additional guarding
71 * time before checking the SCI_EVT again. Such guarding may not be
72 * necessary if the host is notified via another IRQ.
74 #define ACPI_EC_EVT_TIMING_STATUS 0x00
75 #define ACPI_EC_EVT_TIMING_QUERY 0x01
76 #define ACPI_EC_EVT_TIMING_EVENT 0x02
80 ACPI_EC_COMMAND_READ = 0x80,
81 ACPI_EC_COMMAND_WRITE = 0x81,
82 ACPI_EC_BURST_ENABLE = 0x82,
83 ACPI_EC_BURST_DISABLE = 0x83,
84 ACPI_EC_COMMAND_QUERY = 0x84,
87 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
88 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
89 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
90 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
91 * when trying to clear the EC */
92 #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
95 EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
96 EC_FLAGS_QUERY_PENDING, /* Query is pending */
97 EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
98 EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
99 EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
100 EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
101 EC_FLAGS_STARTED, /* Driver is started */
102 EC_FLAGS_STOPPED, /* Driver is stopped */
103 EC_FLAGS_EVENTS_MASKED, /* Events masked */
106 #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
107 #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
109 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
110 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
111 module_param(ec_delay, uint, 0644);
112 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
114 static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
115 module_param(ec_max_queries, uint, 0644);
116 MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
118 static bool ec_busy_polling __read_mostly;
119 module_param(ec_busy_polling, bool, 0644);
120 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
122 static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
123 module_param(ec_polling_guard, uint, 0644);
124 MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
126 static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
129 * If the number of false interrupts per one transaction exceeds
130 * this threshold, will think there is a GPE storm happened and
131 * will disable the GPE for normal transaction.
133 static unsigned int ec_storm_threshold __read_mostly = 8;
134 module_param(ec_storm_threshold, uint, 0644);
135 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
137 static bool ec_freeze_events __read_mostly = false;
138 module_param(ec_freeze_events, bool, 0644);
139 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
141 static bool ec_no_wakeup __read_mostly;
142 module_param(ec_no_wakeup, bool, 0644);
143 MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
145 struct acpi_ec_query_handler {
146 struct list_head node;
147 acpi_ec_query_func func;
157 unsigned short irq_count;
166 struct acpi_ec_query {
167 struct transaction transaction;
168 struct work_struct work;
169 struct acpi_ec_query_handler *handler;
172 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
173 static void advance_transaction(struct acpi_ec *ec);
174 static void acpi_ec_event_handler(struct work_struct *work);
175 static void acpi_ec_event_processor(struct work_struct *work);
177 struct acpi_ec *first_ec;
178 EXPORT_SYMBOL(first_ec);
180 static struct acpi_ec *boot_ec;
181 static bool boot_ec_is_ecdt = false;
182 static struct workqueue_struct *ec_query_wq;
184 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
185 static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
186 static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
187 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
189 /* --------------------------------------------------------------------------
191 * -------------------------------------------------------------------------- */
194 * Splitters used by the developers to track the boundary of the EC
195 * handling processes.
198 #define EC_DBG_SEP " "
199 #define EC_DBG_DRV "+++++"
200 #define EC_DBG_STM "====="
201 #define EC_DBG_REQ "*****"
202 #define EC_DBG_EVT "#####"
204 #define EC_DBG_SEP ""
211 #define ec_log_raw(fmt, ...) \
212 pr_info(fmt "\n", ##__VA_ARGS__)
213 #define ec_dbg_raw(fmt, ...) \
214 pr_debug(fmt "\n", ##__VA_ARGS__)
215 #define ec_log(filter, fmt, ...) \
216 ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
217 #define ec_dbg(filter, fmt, ...) \
218 ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
220 #define ec_log_drv(fmt, ...) \
221 ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
222 #define ec_dbg_drv(fmt, ...) \
223 ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
224 #define ec_dbg_stm(fmt, ...) \
225 ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
226 #define ec_dbg_req(fmt, ...) \
227 ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
228 #define ec_dbg_evt(fmt, ...) \
229 ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
230 #define ec_dbg_ref(ec, fmt, ...) \
231 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
233 /* --------------------------------------------------------------------------
235 * -------------------------------------------------------------------------- */
237 static bool acpi_ec_started(struct acpi_ec *ec)
239 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
240 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
243 static bool acpi_ec_event_enabled(struct acpi_ec *ec)
246 * There is an OSPM early stage logic. During the early stages
247 * (boot/resume), OSPMs shouldn't enable the event handling, only
248 * the EC transactions are allowed to be performed.
250 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
253 * However, disabling the event handling is experimental for late
254 * stage (suspend), and is controlled by the boot parameter of
255 * "ec_freeze_events":
256 * 1. true: The EC event handling is disabled before entering
258 * 2. false: The EC event handling is automatically disabled as
259 * soon as the EC driver is stopped.
261 if (ec_freeze_events)
262 return acpi_ec_started(ec);
264 return test_bit(EC_FLAGS_STARTED, &ec->flags);
267 static bool acpi_ec_flushed(struct acpi_ec *ec)
269 return ec->reference_count == 1;
272 /* --------------------------------------------------------------------------
274 * -------------------------------------------------------------------------- */
276 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
278 u8 x = inb(ec->command_addr);
280 ec_dbg_raw("EC_SC(R) = 0x%2.2x "
281 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
283 !!(x & ACPI_EC_FLAG_SCI),
284 !!(x & ACPI_EC_FLAG_BURST),
285 !!(x & ACPI_EC_FLAG_CMD),
286 !!(x & ACPI_EC_FLAG_IBF),
287 !!(x & ACPI_EC_FLAG_OBF));
291 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
293 u8 x = inb(ec->data_addr);
295 ec->timestamp = jiffies;
296 ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
300 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
302 ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
303 outb(command, ec->command_addr);
304 ec->timestamp = jiffies;
307 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
309 ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
310 outb(data, ec->data_addr);
311 ec->timestamp = jiffies;
314 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
315 static const char *acpi_ec_cmd_string(u8 cmd)
332 #define acpi_ec_cmd_string(cmd) "UNDEF"
335 /* --------------------------------------------------------------------------
337 * -------------------------------------------------------------------------- */
339 static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
341 acpi_event_status gpe_status = 0;
343 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
344 return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
347 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
350 acpi_enable_gpe(NULL, ec->gpe);
352 BUG_ON(ec->reference_count < 1);
353 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
355 if (acpi_ec_is_gpe_raised(ec)) {
357 * On some platforms, EN=1 writes cannot trigger GPE. So
358 * software need to manually trigger a pseudo GPE event on
361 ec_dbg_raw("Polling quirk");
362 advance_transaction(ec);
366 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
369 acpi_disable_gpe(NULL, ec->gpe);
371 BUG_ON(ec->reference_count < 1);
372 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
376 static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
379 * GPE STS is a W1C register, which means:
380 * 1. Software can clear it without worrying about clearing other
381 * GPEs' STS bits when the hardware sets them in parallel.
382 * 2. As long as software can ensure only clearing it when it is
383 * set, hardware won't set it in parallel.
384 * So software can clear GPE in any contexts.
385 * Warning: do not move the check into advance_transaction() as the
386 * EC commands will be sent without GPE raised.
388 if (!acpi_ec_is_gpe_raised(ec))
390 acpi_clear_gpe(NULL, ec->gpe);
393 /* --------------------------------------------------------------------------
394 * Transaction Management
395 * -------------------------------------------------------------------------- */
397 static void acpi_ec_submit_request(struct acpi_ec *ec)
399 ec->reference_count++;
400 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
401 ec->gpe >= 0 && ec->reference_count == 1)
402 acpi_ec_enable_gpe(ec, true);
405 static void acpi_ec_complete_request(struct acpi_ec *ec)
407 bool flushed = false;
409 ec->reference_count--;
410 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
411 ec->gpe >= 0 && ec->reference_count == 0)
412 acpi_ec_disable_gpe(ec, true);
413 flushed = acpi_ec_flushed(ec);
418 static void acpi_ec_mask_events(struct acpi_ec *ec)
420 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
422 acpi_ec_disable_gpe(ec, false);
424 disable_irq_nosync(ec->irq);
426 ec_dbg_drv("Polling enabled");
427 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
431 static void acpi_ec_unmask_events(struct acpi_ec *ec)
433 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
434 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
436 acpi_ec_enable_gpe(ec, false);
440 ec_dbg_drv("Polling disabled");
445 * acpi_ec_submit_flushable_request() - Increase the reference count unless
446 * the flush operation is not in
450 * This function must be used before taking a new action that should hold
451 * the reference count. If this function returns false, then the action
452 * must be discarded or it will prevent the flush operation from being
455 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
457 if (!acpi_ec_started(ec))
459 acpi_ec_submit_request(ec);
463 static void acpi_ec_submit_query(struct acpi_ec *ec)
465 acpi_ec_mask_events(ec);
466 if (!acpi_ec_event_enabled(ec))
468 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
469 ec_dbg_evt("Command(%s) submitted/blocked",
470 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
471 ec->nr_pending_queries++;
472 schedule_work(&ec->work);
476 static void acpi_ec_complete_query(struct acpi_ec *ec)
478 if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
479 ec_dbg_evt("Command(%s) unblocked",
480 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
481 acpi_ec_unmask_events(ec);
484 static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
486 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
487 ec_log_drv("event unblocked");
489 * Unconditionally invoke this once after enabling the event
490 * handling mechanism to detect the pending events.
492 advance_transaction(ec);
495 static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
497 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
498 ec_log_drv("event blocked");
502 * Process _Q events that might have accumulated in the EC.
503 * Run with locked ec mutex.
505 static void acpi_ec_clear(struct acpi_ec *ec)
510 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
511 status = acpi_ec_query(ec, &value);
512 if (status || !value)
515 if (unlikely(i == ACPI_EC_CLEAR_MAX))
516 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
518 pr_info("%d stale EC events cleared\n", i);
521 static void acpi_ec_enable_event(struct acpi_ec *ec)
525 spin_lock_irqsave(&ec->lock, flags);
526 if (acpi_ec_started(ec))
527 __acpi_ec_enable_event(ec);
528 spin_unlock_irqrestore(&ec->lock, flags);
530 /* Drain additional events if hardware requires that */
531 if (EC_FLAGS_CLEAR_ON_RESUME)
535 #ifdef CONFIG_PM_SLEEP
536 static bool acpi_ec_query_flushed(struct acpi_ec *ec)
541 spin_lock_irqsave(&ec->lock, flags);
542 flushed = !ec->nr_pending_queries;
543 spin_unlock_irqrestore(&ec->lock, flags);
547 static void __acpi_ec_flush_event(struct acpi_ec *ec)
550 * When ec_freeze_events is true, we need to flush events in
551 * the proper position before entering the noirq stage.
553 wait_event(ec->wait, acpi_ec_query_flushed(ec));
555 flush_workqueue(ec_query_wq);
558 static void acpi_ec_disable_event(struct acpi_ec *ec)
562 spin_lock_irqsave(&ec->lock, flags);
563 __acpi_ec_disable_event(ec);
564 spin_unlock_irqrestore(&ec->lock, flags);
565 __acpi_ec_flush_event(ec);
568 void acpi_ec_flush_work(void)
571 __acpi_ec_flush_event(first_ec);
573 flush_scheduled_work();
575 #endif /* CONFIG_PM_SLEEP */
577 static bool acpi_ec_guard_event(struct acpi_ec *ec)
582 spin_lock_irqsave(&ec->lock, flags);
584 * If firmware SCI_EVT clearing timing is "event", we actually
585 * don't know when the SCI_EVT will be cleared by firmware after
586 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
589 * The guarding period begins when EC_FLAGS_QUERY_PENDING is
590 * flagged, which means SCI_EVT check has just been performed.
591 * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
592 * guarding should have already been performed (via
593 * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
594 * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
595 * ACPI_EC_COMMAND_POLL state immediately.
597 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
598 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
599 !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
600 (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
602 spin_unlock_irqrestore(&ec->lock, flags);
606 static int ec_transaction_polled(struct acpi_ec *ec)
611 spin_lock_irqsave(&ec->lock, flags);
612 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
614 spin_unlock_irqrestore(&ec->lock, flags);
618 static int ec_transaction_completed(struct acpi_ec *ec)
623 spin_lock_irqsave(&ec->lock, flags);
624 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
626 spin_unlock_irqrestore(&ec->lock, flags);
630 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
632 ec->curr->flags |= flag;
633 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
634 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
635 flag == ACPI_EC_COMMAND_POLL)
636 acpi_ec_complete_query(ec);
637 if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
638 flag == ACPI_EC_COMMAND_COMPLETE)
639 acpi_ec_complete_query(ec);
640 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
641 flag == ACPI_EC_COMMAND_COMPLETE)
642 set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
646 static void advance_transaction(struct acpi_ec *ec)
648 struct transaction *t;
652 ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
655 * By always clearing STS before handling all indications, we can
656 * ensure a hardware STS 0->1 change after this clearing can always
657 * trigger a GPE interrupt.
660 acpi_ec_clear_gpe(ec);
662 status = acpi_ec_read_status(ec);
665 * Another IRQ or a guarded polling mode advancement is detected,
666 * the next QR_EC submission is then allowed.
668 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
669 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
670 (!ec->nr_pending_queries ||
671 test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
672 clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
673 acpi_ec_complete_query(ec);
678 if (t->flags & ACPI_EC_COMMAND_POLL) {
679 if (t->wlen > t->wi) {
680 if ((status & ACPI_EC_FLAG_IBF) == 0)
681 acpi_ec_write_data(ec, t->wdata[t->wi++]);
684 } else if (t->rlen > t->ri) {
685 if ((status & ACPI_EC_FLAG_OBF) == 1) {
686 t->rdata[t->ri++] = acpi_ec_read_data(ec);
687 if (t->rlen == t->ri) {
688 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
689 if (t->command == ACPI_EC_COMMAND_QUERY)
690 ec_dbg_evt("Command(%s) completed by hardware",
691 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
696 } else if (t->wlen == t->wi &&
697 (status & ACPI_EC_FLAG_IBF) == 0) {
698 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
703 if (EC_FLAGS_QUERY_HANDSHAKE &&
704 !(status & ACPI_EC_FLAG_SCI) &&
705 (t->command == ACPI_EC_COMMAND_QUERY)) {
706 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
707 t->rdata[t->ri++] = 0x00;
708 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
709 ec_dbg_evt("Command(%s) completed by software",
710 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
712 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
713 acpi_ec_write_cmd(ec, t->command);
714 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
721 * If SCI bit is set, then don't think it's a false IRQ
722 * otherwise will take a not handled IRQ as a false one.
724 if (!(status & ACPI_EC_FLAG_SCI)) {
725 if (in_interrupt() && t) {
726 if (t->irq_count < ec_storm_threshold)
728 /* Allow triggering on 0 threshold */
729 if (t->irq_count == ec_storm_threshold)
730 acpi_ec_mask_events(ec);
734 if (status & ACPI_EC_FLAG_SCI)
735 acpi_ec_submit_query(ec);
736 if (wakeup && in_interrupt())
740 static void start_transaction(struct acpi_ec *ec)
742 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
746 static int ec_guard(struct acpi_ec *ec)
748 unsigned long guard = usecs_to_jiffies(ec->polling_guard);
749 unsigned long timeout = ec->timestamp + guard;
751 /* Ensure guarding period before polling EC status */
753 if (ec->busy_polling) {
754 /* Perform busy polling */
755 if (ec_transaction_completed(ec))
757 udelay(jiffies_to_usecs(guard));
760 * Perform wait polling
761 * 1. Wait the transaction to be completed by the
762 * GPE handler after the transaction enters
763 * ACPI_EC_COMMAND_POLL state.
764 * 2. A special guarding logic is also required
765 * for event clearing mode "event" before the
766 * transaction enters ACPI_EC_COMMAND_POLL
769 if (!ec_transaction_polled(ec) &&
770 !acpi_ec_guard_event(ec))
772 if (wait_event_timeout(ec->wait,
773 ec_transaction_completed(ec),
777 } while (time_before(jiffies, timeout));
781 static int ec_poll(struct acpi_ec *ec)
784 int repeat = 5; /* number of command restarts */
787 unsigned long delay = jiffies +
788 msecs_to_jiffies(ec_delay);
792 spin_lock_irqsave(&ec->lock, flags);
793 advance_transaction(ec);
794 spin_unlock_irqrestore(&ec->lock, flags);
795 } while (time_before(jiffies, delay));
796 pr_debug("controller reset, restart transaction\n");
797 spin_lock_irqsave(&ec->lock, flags);
798 start_transaction(ec);
799 spin_unlock_irqrestore(&ec->lock, flags);
804 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
805 struct transaction *t)
810 /* start transaction */
811 spin_lock_irqsave(&ec->lock, tmp);
812 /* Enable GPE for command processing (IBF=0/OBF=1) */
813 if (!acpi_ec_submit_flushable_request(ec)) {
817 ec_dbg_ref(ec, "Increase command");
818 /* following two actions should be kept atomic */
820 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
821 start_transaction(ec);
822 spin_unlock_irqrestore(&ec->lock, tmp);
826 spin_lock_irqsave(&ec->lock, tmp);
827 if (t->irq_count == ec_storm_threshold)
828 acpi_ec_unmask_events(ec);
829 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
831 /* Disable GPE for command processing (IBF=0/OBF=1) */
832 acpi_ec_complete_request(ec);
833 ec_dbg_ref(ec, "Decrease command");
835 spin_unlock_irqrestore(&ec->lock, tmp);
839 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
844 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
847 memset(t->rdata, 0, t->rlen);
849 mutex_lock(&ec->mutex);
850 if (ec->global_lock) {
851 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
852 if (ACPI_FAILURE(status)) {
858 status = acpi_ec_transaction_unlocked(ec, t);
861 acpi_release_global_lock(glk);
863 mutex_unlock(&ec->mutex);
867 static int acpi_ec_burst_enable(struct acpi_ec *ec)
870 struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
871 .wdata = NULL, .rdata = &d,
872 .wlen = 0, .rlen = 1};
874 return acpi_ec_transaction(ec, &t);
877 static int acpi_ec_burst_disable(struct acpi_ec *ec)
879 struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
880 .wdata = NULL, .rdata = NULL,
881 .wlen = 0, .rlen = 0};
883 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
884 acpi_ec_transaction(ec, &t) : 0;
887 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
891 struct transaction t = {.command = ACPI_EC_COMMAND_READ,
892 .wdata = &address, .rdata = &d,
893 .wlen = 1, .rlen = 1};
895 result = acpi_ec_transaction(ec, &t);
900 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
902 u8 wdata[2] = { address, data };
903 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
904 .wdata = wdata, .rdata = NULL,
905 .wlen = 2, .rlen = 0};
907 return acpi_ec_transaction(ec, &t);
910 int ec_read(u8 addr, u8 *val)
918 err = acpi_ec_read(first_ec, addr, &temp_data);
926 EXPORT_SYMBOL(ec_read);
928 int ec_write(u8 addr, u8 val)
935 err = acpi_ec_write(first_ec, addr, val);
939 EXPORT_SYMBOL(ec_write);
941 int ec_transaction(u8 command,
942 const u8 *wdata, unsigned wdata_len,
943 u8 *rdata, unsigned rdata_len)
945 struct transaction t = {.command = command,
946 .wdata = wdata, .rdata = rdata,
947 .wlen = wdata_len, .rlen = rdata_len};
952 return acpi_ec_transaction(first_ec, &t);
954 EXPORT_SYMBOL(ec_transaction);
956 /* Get the handle to the EC device */
957 acpi_handle ec_get_handle(void)
961 return first_ec->handle;
963 EXPORT_SYMBOL(ec_get_handle);
965 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
969 spin_lock_irqsave(&ec->lock, flags);
970 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
971 ec_dbg_drv("Starting EC");
972 /* Enable GPE for event processing (SCI_EVT=1) */
974 acpi_ec_submit_request(ec);
975 ec_dbg_ref(ec, "Increase driver");
977 ec_log_drv("EC started");
979 spin_unlock_irqrestore(&ec->lock, flags);
982 static bool acpi_ec_stopped(struct acpi_ec *ec)
987 spin_lock_irqsave(&ec->lock, flags);
988 flushed = acpi_ec_flushed(ec);
989 spin_unlock_irqrestore(&ec->lock, flags);
993 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
997 spin_lock_irqsave(&ec->lock, flags);
998 if (acpi_ec_started(ec)) {
999 ec_dbg_drv("Stopping EC");
1000 set_bit(EC_FLAGS_STOPPED, &ec->flags);
1001 spin_unlock_irqrestore(&ec->lock, flags);
1002 wait_event(ec->wait, acpi_ec_stopped(ec));
1003 spin_lock_irqsave(&ec->lock, flags);
1004 /* Disable GPE for event processing (SCI_EVT=1) */
1006 acpi_ec_complete_request(ec);
1007 ec_dbg_ref(ec, "Decrease driver");
1008 } else if (!ec_freeze_events)
1009 __acpi_ec_disable_event(ec);
1010 clear_bit(EC_FLAGS_STARTED, &ec->flags);
1011 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
1012 ec_log_drv("EC stopped");
1014 spin_unlock_irqrestore(&ec->lock, flags);
1017 static void acpi_ec_enter_noirq(struct acpi_ec *ec)
1019 unsigned long flags;
1021 spin_lock_irqsave(&ec->lock, flags);
1022 ec->busy_polling = true;
1023 ec->polling_guard = 0;
1024 ec_log_drv("interrupt blocked");
1025 spin_unlock_irqrestore(&ec->lock, flags);
1028 static void acpi_ec_leave_noirq(struct acpi_ec *ec)
1030 unsigned long flags;
1032 spin_lock_irqsave(&ec->lock, flags);
1033 ec->busy_polling = ec_busy_polling;
1034 ec->polling_guard = ec_polling_guard;
1035 ec_log_drv("interrupt unblocked");
1036 spin_unlock_irqrestore(&ec->lock, flags);
1039 void acpi_ec_block_transactions(void)
1041 struct acpi_ec *ec = first_ec;
1046 mutex_lock(&ec->mutex);
1047 /* Prevent transactions from being carried out */
1048 acpi_ec_stop(ec, true);
1049 mutex_unlock(&ec->mutex);
1052 void acpi_ec_unblock_transactions(void)
1055 * Allow transactions to happen again (this function is called from
1056 * atomic context during wakeup, so we don't need to acquire the mutex).
1059 acpi_ec_start(first_ec, true);
1062 /* --------------------------------------------------------------------------
1064 -------------------------------------------------------------------------- */
1065 static struct acpi_ec_query_handler *
1066 acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
1069 kref_get(&handler->kref);
1073 static struct acpi_ec_query_handler *
1074 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
1076 struct acpi_ec_query_handler *handler;
1079 mutex_lock(&ec->mutex);
1080 list_for_each_entry(handler, &ec->list, node) {
1081 if (value == handler->query_bit) {
1086 mutex_unlock(&ec->mutex);
1087 return found ? acpi_ec_get_query_handler(handler) : NULL;
1090 static void acpi_ec_query_handler_release(struct kref *kref)
1092 struct acpi_ec_query_handler *handler =
1093 container_of(kref, struct acpi_ec_query_handler, kref);
1098 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
1100 kref_put(&handler->kref, acpi_ec_query_handler_release);
1103 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
1104 acpi_handle handle, acpi_ec_query_func func,
1107 struct acpi_ec_query_handler *handler =
1108 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
1113 handler->query_bit = query_bit;
1114 handler->handle = handle;
1115 handler->func = func;
1116 handler->data = data;
1117 mutex_lock(&ec->mutex);
1118 kref_init(&handler->kref);
1119 list_add(&handler->node, &ec->list);
1120 mutex_unlock(&ec->mutex);
1123 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
1125 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
1126 bool remove_all, u8 query_bit)
1128 struct acpi_ec_query_handler *handler, *tmp;
1129 LIST_HEAD(free_list);
1131 mutex_lock(&ec->mutex);
1132 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1133 if (remove_all || query_bit == handler->query_bit) {
1134 list_del_init(&handler->node);
1135 list_add(&handler->node, &free_list);
1138 mutex_unlock(&ec->mutex);
1139 list_for_each_entry_safe(handler, tmp, &free_list, node)
1140 acpi_ec_put_query_handler(handler);
1143 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
1145 acpi_ec_remove_query_handlers(ec, false, query_bit);
1147 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
1149 static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
1151 struct acpi_ec_query *q;
1152 struct transaction *t;
1154 q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
1157 INIT_WORK(&q->work, acpi_ec_event_processor);
1158 t = &q->transaction;
1159 t->command = ACPI_EC_COMMAND_QUERY;
1165 static void acpi_ec_delete_query(struct acpi_ec_query *q)
1169 acpi_ec_put_query_handler(q->handler);
1174 static void acpi_ec_event_processor(struct work_struct *work)
1176 struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
1177 struct acpi_ec_query_handler *handler = q->handler;
1179 ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
1181 handler->func(handler->data);
1182 else if (handler->handle)
1183 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
1184 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
1185 acpi_ec_delete_query(q);
1188 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1192 struct acpi_ec_query *q;
1194 q = acpi_ec_create_query(&value);
1199 * Query the EC to find out which _Qxx method we need to evaluate.
1200 * Note that successful completion of the query causes the ACPI_EC_SCI
1201 * bit to be cleared (and thus clearing the interrupt source).
1203 result = acpi_ec_transaction(ec, &q->transaction);
1209 q->handler = acpi_ec_get_query_handler_by_value(ec, value);
1216 * It is reported that _Qxx are evaluated in a parallel way on
1218 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1220 * Put this log entry before schedule_work() in order to make
1221 * it appearing before any other log entries occurred during the
1222 * work queue execution.
1224 ec_dbg_evt("Query(0x%02x) scheduled", value);
1225 if (!queue_work(ec_query_wq, &q->work)) {
1226 ec_dbg_evt("Query(0x%02x) overlapped", value);
1232 acpi_ec_delete_query(q);
1238 static void acpi_ec_check_event(struct acpi_ec *ec)
1240 unsigned long flags;
1242 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
1244 spin_lock_irqsave(&ec->lock, flags);
1246 * Take care of the SCI_EVT unless no one else is
1247 * taking care of it.
1250 advance_transaction(ec);
1251 spin_unlock_irqrestore(&ec->lock, flags);
1256 static void acpi_ec_event_handler(struct work_struct *work)
1258 unsigned long flags;
1259 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
1261 ec_dbg_evt("Event started");
1263 spin_lock_irqsave(&ec->lock, flags);
1264 while (ec->nr_pending_queries) {
1265 spin_unlock_irqrestore(&ec->lock, flags);
1266 (void)acpi_ec_query(ec, NULL);
1267 spin_lock_irqsave(&ec->lock, flags);
1268 ec->nr_pending_queries--;
1270 * Before exit, make sure that this work item can be
1271 * scheduled again. There might be QR_EC failures, leaving
1272 * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
1273 * item from being scheduled again.
1275 if (!ec->nr_pending_queries) {
1276 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
1277 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
1278 acpi_ec_complete_query(ec);
1281 spin_unlock_irqrestore(&ec->lock, flags);
1283 ec_dbg_evt("Event stopped");
1285 acpi_ec_check_event(ec);
1288 static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
1290 unsigned long flags;
1292 spin_lock_irqsave(&ec->lock, flags);
1293 advance_transaction(ec);
1294 spin_unlock_irqrestore(&ec->lock, flags);
1297 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
1298 u32 gpe_number, void *data)
1300 acpi_ec_handle_interrupt(data);
1301 return ACPI_INTERRUPT_HANDLED;
1304 static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
1306 acpi_ec_handle_interrupt(data);
1310 /* --------------------------------------------------------------------------
1311 * Address Space Management
1312 * -------------------------------------------------------------------------- */
1315 acpi_ec_space_handler(u32 function, acpi_physical_address address,
1316 u32 bits, u64 *value64,
1317 void *handler_context, void *region_context)
1319 struct acpi_ec *ec = handler_context;
1320 int result = 0, i, bytes = bits / 8;
1321 u8 *value = (u8 *)value64;
1323 if ((address > 0xFF) || !value || !handler_context)
1324 return AE_BAD_PARAMETER;
1326 if (function != ACPI_READ && function != ACPI_WRITE)
1327 return AE_BAD_PARAMETER;
1329 if (ec->busy_polling || bits > 8)
1330 acpi_ec_burst_enable(ec);
1332 for (i = 0; i < bytes; ++i, ++address, ++value)
1333 result = (function == ACPI_READ) ?
1334 acpi_ec_read(ec, address, value) :
1335 acpi_ec_write(ec, address, *value);
1337 if (ec->busy_polling || bits > 8)
1338 acpi_ec_burst_disable(ec);
1342 return AE_BAD_PARAMETER;
1344 return AE_NOT_FOUND;
1352 /* --------------------------------------------------------------------------
1354 * -------------------------------------------------------------------------- */
1357 ec_parse_io_ports(struct acpi_resource *resource, void *context);
1359 static void acpi_ec_free(struct acpi_ec *ec)
1368 static struct acpi_ec *acpi_ec_alloc(void)
1370 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
1374 mutex_init(&ec->mutex);
1375 init_waitqueue_head(&ec->wait);
1376 INIT_LIST_HEAD(&ec->list);
1377 spin_lock_init(&ec->lock);
1378 INIT_WORK(&ec->work, acpi_ec_event_handler);
1379 ec->timestamp = jiffies;
1380 ec->busy_polling = true;
1381 ec->polling_guard = 0;
1388 acpi_ec_register_query_methods(acpi_handle handle, u32 level,
1389 void *context, void **return_value)
1392 struct acpi_buffer buffer = { sizeof(node_name), node_name };
1393 struct acpi_ec *ec = context;
1397 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
1399 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
1400 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
1405 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
1408 unsigned long long tmp = 0;
1409 struct acpi_ec *ec = context;
1411 /* clear addr values, ec_parse_io_ports depend on it */
1412 ec->command_addr = ec->data_addr = 0;
1414 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1415 ec_parse_io_ports, ec);
1416 if (ACPI_FAILURE(status))
1418 if (ec->data_addr == 0 || ec->command_addr == 0)
1421 if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
1423 * Always inherit the GPE number setting from the ECDT
1426 ec->gpe = boot_ec->gpe;
1428 /* Get GPE bit assignment (EC events). */
1429 /* TODO: Add support for _GPE returning a package */
1430 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
1431 if (ACPI_SUCCESS(status))
1435 * Errors are non-fatal, allowing for ACPI Reduced Hardware
1436 * platforms which use GpioInt instead of GPE.
1439 /* Use the global lock for all EC transactions? */
1441 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
1442 ec->global_lock = tmp;
1443 ec->handle = handle;
1444 return AE_CTRL_TERMINATE;
1447 static void install_gpe_event_handler(struct acpi_ec *ec)
1449 acpi_status status =
1450 acpi_install_gpe_raw_handler(NULL, ec->gpe,
1451 ACPI_GPE_EDGE_TRIGGERED,
1452 &acpi_ec_gpe_handler,
1454 if (ACPI_SUCCESS(status)) {
1455 /* This is not fatal as we can poll EC events */
1456 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1457 acpi_ec_leave_noirq(ec);
1458 if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1459 ec->reference_count >= 1)
1460 acpi_ec_enable_gpe(ec, true);
1464 /* ACPI reduced hardware platforms use a GpioInt specified in _CRS. */
1465 static int install_gpio_irq_event_handler(struct acpi_ec *ec,
1466 struct acpi_device *device)
1468 int irq = acpi_dev_gpio_irq_get(device, 0);
1474 ret = request_irq(irq, acpi_ec_irq_handler, IRQF_SHARED,
1478 * Unlike the GPE case, we treat errors here as fatal, we'll only
1479 * implement GPIO polling if we find a case that needs it.
1485 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1486 acpi_ec_leave_noirq(ec);
1492 * Note: This function returns an error code only when the address space
1493 * handler is not installed, which means "not able to handle
1496 static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
1501 acpi_ec_start(ec, false);
1503 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1504 acpi_ec_enter_noirq(ec);
1505 status = acpi_install_address_space_handler(ec->handle,
1507 &acpi_ec_space_handler,
1509 if (ACPI_FAILURE(status)) {
1510 if (status == AE_NOT_FOUND) {
1512 * Maybe OS fails in evaluating the _REG
1513 * object. The AE_NOT_FOUND error will be
1514 * ignored and OS * continue to initialize
1517 pr_err("Fail in evaluating the _REG object"
1518 " of EC device. Broken bios is suspected.\n");
1520 acpi_ec_stop(ec, false);
1524 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1530 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1531 /* Find and register all query methods */
1532 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1533 acpi_ec_register_query_methods,
1535 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1537 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1539 install_gpe_event_handler(ec);
1540 } else if (device) {
1541 int ret = install_gpio_irq_event_handler(ec, device);
1545 } else { /* No GPE and no GpioInt? */
1549 /* EC is fully operational, allow queries */
1550 acpi_ec_enable_event(ec);
1555 static void ec_remove_handlers(struct acpi_ec *ec)
1557 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1558 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
1559 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
1560 pr_err("failed to remove space handler\n");
1561 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1565 * Stops handling the EC transactions after removing the operation
1566 * region handler. This is required because _REG(DISCONNECT)
1567 * invoked during the removal can result in new EC transactions.
1569 * Flushes the EC requests and thus disables the GPE before
1570 * removing the GPE handler. This is required by the current ACPICA
1571 * GPE core. ACPICA GPE core will automatically disable a GPE when
1572 * it is indicated but there is no way to handle it. So the drivers
1573 * must disable the GPEs prior to removing the GPE handlers.
1575 acpi_ec_stop(ec, false);
1577 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1579 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1580 &acpi_ec_gpe_handler)))
1581 pr_err("failed to remove gpe handler\n");
1584 free_irq(ec->irq, ec);
1586 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1588 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1589 acpi_ec_remove_query_handlers(ec, true, 0);
1590 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1594 static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device,
1599 ret = ec_install_handlers(ec, device, handle_events);
1603 /* First EC capable of handling transactions */
1606 acpi_handle_info(first_ec->handle, "Used as first EC\n");
1609 acpi_handle_info(ec->handle,
1610 "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
1611 ec->gpe, ec->irq, ec->command_addr, ec->data_addr);
1615 static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
1617 struct acpi_table_ecdt *ecdt_ptr;
1621 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1622 (struct acpi_table_header **)&ecdt_ptr);
1623 if (ACPI_FAILURE(status))
1626 status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
1627 if (ACPI_FAILURE(status))
1634 static int acpi_ec_add(struct acpi_device *device)
1636 struct acpi_ec *ec = NULL;
1637 bool dep_update = true;
1641 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
1642 strcpy(acpi_device_class(device), ACPI_EC_CLASS);
1644 if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
1645 boot_ec_is_ecdt = true;
1649 ec = acpi_ec_alloc();
1653 status = ec_parse_device(device->handle, 0, ec, NULL);
1654 if (status != AE_CTRL_TERMINATE) {
1659 if (boot_ec && ec->command_addr == boot_ec->command_addr &&
1660 ec->data_addr == boot_ec->data_addr) {
1661 boot_ec_is_ecdt = false;
1663 * Trust PNP0C09 namespace location rather than
1664 * ECDT ID. But trust ECDT GPE rather than _GPE
1665 * because of ASUS quirks, so do not change
1666 * boot_ec->gpe to ec->gpe.
1668 boot_ec->handle = ec->handle;
1669 acpi_handle_debug(ec->handle, "duplicated.\n");
1675 ret = acpi_ec_setup(ec, device, true);
1680 acpi_handle_info(boot_ec->handle,
1681 "Boot %s EC used to handle transactions and events\n",
1682 boot_ec_is_ecdt ? "ECDT" : "DSDT");
1684 device->driver_data = ec;
1686 ret = !!request_region(ec->data_addr, 1, "EC data");
1687 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1688 ret = !!request_region(ec->command_addr, 1, "EC cmd");
1689 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1692 /* Reprobe devices depending on the EC */
1693 acpi_walk_dep_device_list(ec->handle);
1695 acpi_handle_debug(ec->handle, "enumerated.\n");
1700 acpi_ec_remove_query_handlers(ec, true, 0);
1707 static int acpi_ec_remove(struct acpi_device *device)
1714 ec = acpi_driver_data(device);
1715 release_region(ec->data_addr, 1);
1716 release_region(ec->command_addr, 1);
1717 device->driver_data = NULL;
1718 if (ec != boot_ec) {
1719 ec_remove_handlers(ec);
1726 ec_parse_io_ports(struct acpi_resource *resource, void *context)
1728 struct acpi_ec *ec = context;
1730 if (resource->type != ACPI_RESOURCE_TYPE_IO)
1734 * The first address region returned is the data port, and
1735 * the second address region returned is the status/command
1738 if (ec->data_addr == 0)
1739 ec->data_addr = resource->data.io.minimum;
1740 else if (ec->command_addr == 0)
1741 ec->command_addr = resource->data.io.minimum;
1743 return AE_CTRL_TERMINATE;
1748 static const struct acpi_device_id ec_device_ids[] = {
1755 * This function is not Windows-compatible as Windows never enumerates the
1756 * namespace EC before the main ACPI device enumeration process. It is
1757 * retained for historical reason and will be deprecated in the future.
1759 void __init acpi_ec_dsdt_probe(void)
1766 * If a platform has ECDT, there is no need to proceed as the
1767 * following probe is not a part of the ACPI device enumeration,
1768 * executing _STA is not safe, and thus this probe may risk of
1769 * picking up an invalid EC device.
1774 ec = acpi_ec_alloc();
1779 * At this point, the namespace is initialized, so start to find
1780 * the namespace objects.
1782 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
1783 if (ACPI_FAILURE(status) || !ec->handle) {
1789 * When the DSDT EC is available, always re-configure boot EC to
1790 * have _REG evaluated. _REG can only be evaluated after the
1791 * namespace initialization.
1792 * At this point, the GPE is not fully initialized, so do not to
1793 * handle the events.
1795 ret = acpi_ec_setup(ec, NULL, false);
1803 acpi_handle_info(ec->handle,
1804 "Boot DSDT EC used to handle transactions\n");
1808 * If the DSDT EC is not functioning, we still need to prepare a fully
1809 * functioning ECDT EC first in order to handle the events.
1810 * https://bugzilla.kernel.org/show_bug.cgi?id=115021
1812 static int __init acpi_ec_ecdt_start(void)
1818 /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
1819 if (!boot_ec_is_ecdt)
1823 * At this point, the namespace and the GPE is initialized, so
1824 * start to find the namespace objects and handle the events.
1826 * Note: ec->handle can be valid if this function is called after
1827 * acpi_ec_add(), hence the fast path.
1829 if (boot_ec->handle == ACPI_ROOT_OBJECT) {
1830 if (!acpi_ec_ecdt_get_handle(&handle))
1832 boot_ec->handle = handle;
1835 /* Register to ACPI bus with PM ops attached */
1836 return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
1841 * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
1842 * set, for which case, we complete the QR_EC without issuing it to the
1844 * https://bugzilla.kernel.org/show_bug.cgi?id=82611
1845 * https://bugzilla.kernel.org/show_bug.cgi?id=97381
1847 static int ec_flag_query_handshake(const struct dmi_system_id *id)
1849 pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1850 EC_FLAGS_QUERY_HANDSHAKE = 1;
1856 * On some hardware it is necessary to clear events accumulated by the EC during
1857 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1858 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1860 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1862 * Ideally, the EC should also be instructed NOT to accumulate events during
1863 * sleep (which Windows seems to do somehow), but the interface to control this
1864 * behaviour is not known at this time.
1866 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1867 * however it is very likely that other Samsung models are affected.
1869 * On systems which don't accumulate _Q events during sleep, this extra check
1870 * should be harmless.
1872 static int ec_clear_on_resume(const struct dmi_system_id *id)
1874 pr_debug("Detected system needing EC poll on resume.\n");
1875 EC_FLAGS_CLEAR_ON_RESUME = 1;
1876 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
1881 * Some ECDTs contain wrong register addresses.
1883 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
1885 static int ec_correct_ecdt(const struct dmi_system_id *id)
1887 pr_debug("Detected system needing ECDT address correction.\n");
1888 EC_FLAGS_CORRECT_ECDT = 1;
1893 * Some DSDTs contain wrong GPE setting.
1894 * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
1895 * https://bugzilla.kernel.org/show_bug.cgi?id=195651
1897 static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
1899 pr_debug("Detected system needing ignore DSDT GPE setting.\n");
1900 EC_FLAGS_IGNORE_DSDT_GPE = 1;
1904 static const struct dmi_system_id ec_dmi_table[] __initconst = {
1906 ec_correct_ecdt, "MSI MS-171F", {
1907 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1908 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
1910 ec_honor_ecdt_gpe, "ASUS FX502VD", {
1911 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1912 DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
1914 ec_honor_ecdt_gpe, "ASUS FX502VE", {
1915 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1916 DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
1918 ec_honor_ecdt_gpe, "ASUS GL702VMK", {
1919 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1920 DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
1922 ec_honor_ecdt_gpe, "ASUS X550VXK", {
1923 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1924 DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
1926 ec_honor_ecdt_gpe, "ASUS X580VD", {
1927 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1928 DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
1930 ec_clear_on_resume, "Samsung hardware", {
1931 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1935 void __init acpi_ec_ecdt_probe(void)
1937 struct acpi_table_ecdt *ecdt_ptr;
1942 /* Generate a boot ec context. */
1943 dmi_check_system(ec_dmi_table);
1944 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1945 (struct acpi_table_header **)&ecdt_ptr);
1946 if (ACPI_FAILURE(status))
1949 if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
1952 * https://bugzilla.kernel.org/show_bug.cgi?id=11880
1957 ec = acpi_ec_alloc();
1961 if (EC_FLAGS_CORRECT_ECDT) {
1962 ec->command_addr = ecdt_ptr->data.address;
1963 ec->data_addr = ecdt_ptr->control.address;
1965 ec->command_addr = ecdt_ptr->control.address;
1966 ec->data_addr = ecdt_ptr->data.address;
1970 * Ignore the GPE value on Reduced Hardware platforms.
1971 * Some products have this set to an erroneous value.
1973 if (!acpi_gbl_reduced_hardware)
1974 ec->gpe = ecdt_ptr->gpe;
1976 ec->handle = ACPI_ROOT_OBJECT;
1979 * At this point, the namespace is not initialized, so do not find
1980 * the namespace objects, or handle the events.
1982 ret = acpi_ec_setup(ec, NULL, false);
1989 boot_ec_is_ecdt = true;
1991 pr_info("Boot ECDT EC used to handle transactions\n");
1994 #ifdef CONFIG_PM_SLEEP
1995 static int acpi_ec_suspend(struct device *dev)
1997 struct acpi_ec *ec =
1998 acpi_driver_data(to_acpi_device(dev));
2000 if (!pm_suspend_no_platform() && ec_freeze_events)
2001 acpi_ec_disable_event(ec);
2005 static int acpi_ec_suspend_noirq(struct device *dev)
2007 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
2010 * The SCI handler doesn't run at this point, so the GPE can be
2011 * masked at the low level without side effects.
2013 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
2014 ec->gpe >= 0 && ec->reference_count >= 1)
2015 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
2017 acpi_ec_enter_noirq(ec);
2022 static int acpi_ec_resume_noirq(struct device *dev)
2024 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
2026 acpi_ec_leave_noirq(ec);
2028 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
2029 ec->gpe >= 0 && ec->reference_count >= 1)
2030 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
2035 static int acpi_ec_resume(struct device *dev)
2037 struct acpi_ec *ec =
2038 acpi_driver_data(to_acpi_device(dev));
2040 acpi_ec_enable_event(ec);
2044 void acpi_ec_mark_gpe_for_wake(void)
2046 if (first_ec && !ec_no_wakeup)
2047 acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
2049 EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
2051 void acpi_ec_set_gpe_wake_mask(u8 action)
2053 if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
2054 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
2057 bool acpi_ec_dispatch_gpe(void)
2064 ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
2065 if (ret == ACPI_INTERRUPT_HANDLED) {
2066 pm_pr_dbg("EC GPE dispatched\n");
2071 #endif /* CONFIG_PM_SLEEP */
2073 static const struct dev_pm_ops acpi_ec_pm = {
2074 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
2075 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
2078 static int param_set_event_clearing(const char *val,
2079 const struct kernel_param *kp)
2083 if (!strncmp(val, "status", sizeof("status") - 1)) {
2084 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
2085 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
2086 } else if (!strncmp(val, "query", sizeof("query") - 1)) {
2087 ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
2088 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
2089 } else if (!strncmp(val, "event", sizeof("event") - 1)) {
2090 ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
2091 pr_info("Assuming SCI_EVT clearing on event reads\n");
2097 static int param_get_event_clearing(char *buffer,
2098 const struct kernel_param *kp)
2100 switch (ec_event_clearing) {
2101 case ACPI_EC_EVT_TIMING_STATUS:
2102 return sprintf(buffer, "status");
2103 case ACPI_EC_EVT_TIMING_QUERY:
2104 return sprintf(buffer, "query");
2105 case ACPI_EC_EVT_TIMING_EVENT:
2106 return sprintf(buffer, "event");
2108 return sprintf(buffer, "invalid");
2113 module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
2115 MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
2117 static struct acpi_driver acpi_ec_driver = {
2119 .class = ACPI_EC_CLASS,
2120 .ids = ec_device_ids,
2123 .remove = acpi_ec_remove,
2125 .drv.pm = &acpi_ec_pm,
2128 static inline int acpi_ec_query_init(void)
2131 ec_query_wq = alloc_workqueue("kec_query", 0,
2139 static inline void acpi_ec_query_exit(void)
2142 destroy_workqueue(ec_query_wq);
2147 static const struct dmi_system_id acpi_ec_no_wakeup[] = {
2149 .ident = "Thinkpad X1 Carbon 6th",
2151 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2152 DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
2156 .ident = "ThinkPad X1 Carbon 6th",
2158 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2159 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
2163 .ident = "ThinkPad X1 Yoga 3rd",
2165 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2166 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
2172 int __init acpi_ec_init(void)
2175 int ecdt_fail, dsdt_fail;
2177 /* register workqueue for _Qxx evaluations */
2178 result = acpi_ec_query_init();
2183 * Disable EC wakeup on following systems to prevent periodic
2184 * wakeup from EC GPE.
2186 if (dmi_check_system(acpi_ec_no_wakeup)) {
2187 ec_no_wakeup = true;
2188 pr_debug("Disabling EC wakeup on suspend-to-idle\n");
2191 /* Drivers must be started after acpi_ec_query_init() */
2192 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
2194 * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
2195 * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
2196 * settings but invalid DSDT settings.
2197 * https://bugzilla.kernel.org/show_bug.cgi?id=196847
2199 ecdt_fail = acpi_ec_ecdt_start();
2200 return ecdt_fail && dsdt_fail ? -ENODEV : 0;
2203 /* EC driver currently not unloadable */
2205 static void __exit acpi_ec_exit(void)
2208 acpi_bus_unregister_driver(&acpi_ec_driver);
2209 acpi_ec_query_exit();