1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ec.c - ACPI Embedded Controller Driver (v3)
5 * Copyright (C) 2001-2015 Intel Corporation
6 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
7 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
8 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * 2004 Luming Yu <luming.yu@intel.com>
10 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
11 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
12 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
15 /* Uncomment next line to get verbose printout */
17 #define pr_fmt(fmt) "ACPI: EC: " fmt
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/types.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/list.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/acpi.h>
30 #include <linux/dmi.h>
35 #define ACPI_EC_CLASS "embedded_controller"
36 #define ACPI_EC_DEVICE_NAME "Embedded Controller"
37 #define ACPI_EC_FILE_INFO "info"
39 /* EC status register */
40 #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
41 #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
42 #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
43 #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
44 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
47 * The SCI_EVT clearing timing is not defined by the ACPI specification.
48 * This leads to lots of practical timing issues for the host EC driver.
49 * The following variations are defined (from the target EC firmware's
51 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
52 * target can clear SCI_EVT at any time so long as the host can see
53 * the indication by reading the status register (EC_SC). So the
54 * host should re-check SCI_EVT after the first time the SCI_EVT
55 * indication is seen, which is the same time the query request
56 * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
57 * at any later time could indicate another event. Normally such
58 * kind of EC firmware has implemented an event queue and will
59 * return 0x00 to indicate "no outstanding event".
60 * QUERY: After seeing the query request (QR_EC) written to the command
61 * register (EC_CMD) by the host and having prepared the responding
62 * event value in the data register (EC_DATA), the target can safely
63 * clear SCI_EVT because the target can confirm that the current
64 * event is being handled by the host. The host then should check
65 * SCI_EVT right after reading the event response from the data
67 * EVENT: After seeing the event response read from the data register
68 * (EC_DATA) by the host, the target can clear SCI_EVT. As the
69 * target requires time to notice the change in the data register
70 * (EC_DATA), the host may be required to wait additional guarding
71 * time before checking the SCI_EVT again. Such guarding may not be
72 * necessary if the host is notified via another IRQ.
74 #define ACPI_EC_EVT_TIMING_STATUS 0x00
75 #define ACPI_EC_EVT_TIMING_QUERY 0x01
76 #define ACPI_EC_EVT_TIMING_EVENT 0x02
80 ACPI_EC_COMMAND_READ = 0x80,
81 ACPI_EC_COMMAND_WRITE = 0x81,
82 ACPI_EC_BURST_ENABLE = 0x82,
83 ACPI_EC_BURST_DISABLE = 0x83,
84 ACPI_EC_COMMAND_QUERY = 0x84,
87 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
88 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
89 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
90 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
91 * when trying to clear the EC */
92 #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
95 EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
96 EC_FLAGS_QUERY_PENDING, /* Query is pending */
97 EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
98 EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
99 EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
100 EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
101 EC_FLAGS_STARTED, /* Driver is started */
102 EC_FLAGS_STOPPED, /* Driver is stopped */
103 EC_FLAGS_EVENTS_MASKED, /* Events masked */
106 #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
107 #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
109 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
110 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
111 module_param(ec_delay, uint, 0644);
112 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
114 static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
115 module_param(ec_max_queries, uint, 0644);
116 MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
118 static bool ec_busy_polling __read_mostly;
119 module_param(ec_busy_polling, bool, 0644);
120 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
122 static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
123 module_param(ec_polling_guard, uint, 0644);
124 MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
126 static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
129 * If the number of false interrupts per one transaction exceeds
130 * this threshold, will think there is a GPE storm happened and
131 * will disable the GPE for normal transaction.
133 static unsigned int ec_storm_threshold __read_mostly = 8;
134 module_param(ec_storm_threshold, uint, 0644);
135 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
137 static bool ec_freeze_events __read_mostly = false;
138 module_param(ec_freeze_events, bool, 0644);
139 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
141 static bool ec_no_wakeup __read_mostly;
142 module_param(ec_no_wakeup, bool, 0644);
143 MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
145 struct acpi_ec_query_handler {
146 struct list_head node;
147 acpi_ec_query_func func;
157 unsigned short irq_count;
166 struct acpi_ec_query {
167 struct transaction transaction;
168 struct work_struct work;
169 struct acpi_ec_query_handler *handler;
172 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
173 static void advance_transaction(struct acpi_ec *ec);
174 static void acpi_ec_event_handler(struct work_struct *work);
175 static void acpi_ec_event_processor(struct work_struct *work);
177 struct acpi_ec *first_ec;
178 EXPORT_SYMBOL(first_ec);
180 static struct acpi_ec *boot_ec;
181 static bool boot_ec_is_ecdt = false;
182 static struct workqueue_struct *ec_wq;
183 static struct workqueue_struct *ec_query_wq;
185 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
186 static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
187 static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
188 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
190 /* --------------------------------------------------------------------------
192 * -------------------------------------------------------------------------- */
195 * Splitters used by the developers to track the boundary of the EC
196 * handling processes.
199 #define EC_DBG_SEP " "
200 #define EC_DBG_DRV "+++++"
201 #define EC_DBG_STM "====="
202 #define EC_DBG_REQ "*****"
203 #define EC_DBG_EVT "#####"
205 #define EC_DBG_SEP ""
212 #define ec_log_raw(fmt, ...) \
213 pr_info(fmt "\n", ##__VA_ARGS__)
214 #define ec_dbg_raw(fmt, ...) \
215 pr_debug(fmt "\n", ##__VA_ARGS__)
216 #define ec_log(filter, fmt, ...) \
217 ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
218 #define ec_dbg(filter, fmt, ...) \
219 ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
221 #define ec_log_drv(fmt, ...) \
222 ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
223 #define ec_dbg_drv(fmt, ...) \
224 ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
225 #define ec_dbg_stm(fmt, ...) \
226 ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
227 #define ec_dbg_req(fmt, ...) \
228 ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
229 #define ec_dbg_evt(fmt, ...) \
230 ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
231 #define ec_dbg_ref(ec, fmt, ...) \
232 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
234 /* --------------------------------------------------------------------------
236 * -------------------------------------------------------------------------- */
238 static bool acpi_ec_started(struct acpi_ec *ec)
240 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
241 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
244 static bool acpi_ec_event_enabled(struct acpi_ec *ec)
247 * There is an OSPM early stage logic. During the early stages
248 * (boot/resume), OSPMs shouldn't enable the event handling, only
249 * the EC transactions are allowed to be performed.
251 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
254 * However, disabling the event handling is experimental for late
255 * stage (suspend), and is controlled by the boot parameter of
256 * "ec_freeze_events":
257 * 1. true: The EC event handling is disabled before entering
259 * 2. false: The EC event handling is automatically disabled as
260 * soon as the EC driver is stopped.
262 if (ec_freeze_events)
263 return acpi_ec_started(ec);
265 return test_bit(EC_FLAGS_STARTED, &ec->flags);
268 static bool acpi_ec_flushed(struct acpi_ec *ec)
270 return ec->reference_count == 1;
273 /* --------------------------------------------------------------------------
275 * -------------------------------------------------------------------------- */
277 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
279 u8 x = inb(ec->command_addr);
281 ec_dbg_raw("EC_SC(R) = 0x%2.2x "
282 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
284 !!(x & ACPI_EC_FLAG_SCI),
285 !!(x & ACPI_EC_FLAG_BURST),
286 !!(x & ACPI_EC_FLAG_CMD),
287 !!(x & ACPI_EC_FLAG_IBF),
288 !!(x & ACPI_EC_FLAG_OBF));
292 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
294 u8 x = inb(ec->data_addr);
296 ec->timestamp = jiffies;
297 ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
301 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
303 ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
304 outb(command, ec->command_addr);
305 ec->timestamp = jiffies;
308 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
310 ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
311 outb(data, ec->data_addr);
312 ec->timestamp = jiffies;
315 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
316 static const char *acpi_ec_cmd_string(u8 cmd)
333 #define acpi_ec_cmd_string(cmd) "UNDEF"
336 /* --------------------------------------------------------------------------
338 * -------------------------------------------------------------------------- */
340 static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
342 acpi_event_status gpe_status = 0;
344 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
345 return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
348 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
351 acpi_enable_gpe(NULL, ec->gpe);
353 BUG_ON(ec->reference_count < 1);
354 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
356 if (acpi_ec_is_gpe_raised(ec)) {
358 * On some platforms, EN=1 writes cannot trigger GPE. So
359 * software need to manually trigger a pseudo GPE event on
362 ec_dbg_raw("Polling quirk");
363 advance_transaction(ec);
367 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
370 acpi_disable_gpe(NULL, ec->gpe);
372 BUG_ON(ec->reference_count < 1);
373 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
377 static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
380 * GPE STS is a W1C register, which means:
381 * 1. Software can clear it without worrying about clearing other
382 * GPEs' STS bits when the hardware sets them in parallel.
383 * 2. As long as software can ensure only clearing it when it is
384 * set, hardware won't set it in parallel.
385 * So software can clear GPE in any contexts.
386 * Warning: do not move the check into advance_transaction() as the
387 * EC commands will be sent without GPE raised.
389 if (!acpi_ec_is_gpe_raised(ec))
391 acpi_clear_gpe(NULL, ec->gpe);
394 /* --------------------------------------------------------------------------
395 * Transaction Management
396 * -------------------------------------------------------------------------- */
398 static void acpi_ec_submit_request(struct acpi_ec *ec)
400 ec->reference_count++;
401 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
402 ec->gpe >= 0 && ec->reference_count == 1)
403 acpi_ec_enable_gpe(ec, true);
406 static void acpi_ec_complete_request(struct acpi_ec *ec)
408 bool flushed = false;
410 ec->reference_count--;
411 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
412 ec->gpe >= 0 && ec->reference_count == 0)
413 acpi_ec_disable_gpe(ec, true);
414 flushed = acpi_ec_flushed(ec);
419 static void acpi_ec_mask_events(struct acpi_ec *ec)
421 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
423 acpi_ec_disable_gpe(ec, false);
425 disable_irq_nosync(ec->irq);
427 ec_dbg_drv("Polling enabled");
428 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
432 static void acpi_ec_unmask_events(struct acpi_ec *ec)
434 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
435 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
437 acpi_ec_enable_gpe(ec, false);
441 ec_dbg_drv("Polling disabled");
446 * acpi_ec_submit_flushable_request() - Increase the reference count unless
447 * the flush operation is not in
451 * This function must be used before taking a new action that should hold
452 * the reference count. If this function returns false, then the action
453 * must be discarded or it will prevent the flush operation from being
456 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
458 if (!acpi_ec_started(ec))
460 acpi_ec_submit_request(ec);
464 static void acpi_ec_submit_query(struct acpi_ec *ec)
466 acpi_ec_mask_events(ec);
467 if (!acpi_ec_event_enabled(ec))
469 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
470 ec_dbg_evt("Command(%s) submitted/blocked",
471 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
472 ec->nr_pending_queries++;
473 queue_work(ec_wq, &ec->work);
477 static void acpi_ec_complete_query(struct acpi_ec *ec)
479 if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
480 ec_dbg_evt("Command(%s) unblocked",
481 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
482 acpi_ec_unmask_events(ec);
485 static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
487 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
488 ec_log_drv("event unblocked");
490 * Unconditionally invoke this once after enabling the event
491 * handling mechanism to detect the pending events.
493 advance_transaction(ec);
496 static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
498 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
499 ec_log_drv("event blocked");
503 * Process _Q events that might have accumulated in the EC.
504 * Run with locked ec mutex.
506 static void acpi_ec_clear(struct acpi_ec *ec)
511 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
512 status = acpi_ec_query(ec, &value);
513 if (status || !value)
516 if (unlikely(i == ACPI_EC_CLEAR_MAX))
517 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
519 pr_info("%d stale EC events cleared\n", i);
522 static void acpi_ec_enable_event(struct acpi_ec *ec)
526 spin_lock_irqsave(&ec->lock, flags);
527 if (acpi_ec_started(ec))
528 __acpi_ec_enable_event(ec);
529 spin_unlock_irqrestore(&ec->lock, flags);
531 /* Drain additional events if hardware requires that */
532 if (EC_FLAGS_CLEAR_ON_RESUME)
536 #ifdef CONFIG_PM_SLEEP
537 static void __acpi_ec_flush_work(void)
539 drain_workqueue(ec_wq); /* flush ec->work */
540 flush_workqueue(ec_query_wq); /* flush queries */
543 static void acpi_ec_disable_event(struct acpi_ec *ec)
547 spin_lock_irqsave(&ec->lock, flags);
548 __acpi_ec_disable_event(ec);
549 spin_unlock_irqrestore(&ec->lock, flags);
552 * When ec_freeze_events is true, we need to flush events in
553 * the proper position before entering the noirq stage.
555 __acpi_ec_flush_work();
558 void acpi_ec_flush_work(void)
560 /* Without ec_wq there is nothing to flush. */
564 __acpi_ec_flush_work();
566 #endif /* CONFIG_PM_SLEEP */
568 static bool acpi_ec_guard_event(struct acpi_ec *ec)
573 spin_lock_irqsave(&ec->lock, flags);
575 * If firmware SCI_EVT clearing timing is "event", we actually
576 * don't know when the SCI_EVT will be cleared by firmware after
577 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
580 * The guarding period begins when EC_FLAGS_QUERY_PENDING is
581 * flagged, which means SCI_EVT check has just been performed.
582 * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
583 * guarding should have already been performed (via
584 * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
585 * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
586 * ACPI_EC_COMMAND_POLL state immediately.
588 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
589 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
590 !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
591 (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
593 spin_unlock_irqrestore(&ec->lock, flags);
597 static int ec_transaction_polled(struct acpi_ec *ec)
602 spin_lock_irqsave(&ec->lock, flags);
603 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
605 spin_unlock_irqrestore(&ec->lock, flags);
609 static int ec_transaction_completed(struct acpi_ec *ec)
614 spin_lock_irqsave(&ec->lock, flags);
615 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
617 spin_unlock_irqrestore(&ec->lock, flags);
621 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
623 ec->curr->flags |= flag;
624 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
625 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
626 flag == ACPI_EC_COMMAND_POLL)
627 acpi_ec_complete_query(ec);
628 if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
629 flag == ACPI_EC_COMMAND_COMPLETE)
630 acpi_ec_complete_query(ec);
631 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
632 flag == ACPI_EC_COMMAND_COMPLETE)
633 set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
637 static void advance_transaction(struct acpi_ec *ec)
639 struct transaction *t;
643 ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
646 * By always clearing STS before handling all indications, we can
647 * ensure a hardware STS 0->1 change after this clearing can always
648 * trigger a GPE interrupt.
651 acpi_ec_clear_gpe(ec);
653 status = acpi_ec_read_status(ec);
656 * Another IRQ or a guarded polling mode advancement is detected,
657 * the next QR_EC submission is then allowed.
659 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
660 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
661 (!ec->nr_pending_queries ||
662 test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
663 clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
664 acpi_ec_complete_query(ec);
669 if (t->flags & ACPI_EC_COMMAND_POLL) {
670 if (t->wlen > t->wi) {
671 if ((status & ACPI_EC_FLAG_IBF) == 0)
672 acpi_ec_write_data(ec, t->wdata[t->wi++]);
675 } else if (t->rlen > t->ri) {
676 if ((status & ACPI_EC_FLAG_OBF) == 1) {
677 t->rdata[t->ri++] = acpi_ec_read_data(ec);
678 if (t->rlen == t->ri) {
679 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
680 if (t->command == ACPI_EC_COMMAND_QUERY)
681 ec_dbg_evt("Command(%s) completed by hardware",
682 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
687 } else if (t->wlen == t->wi &&
688 (status & ACPI_EC_FLAG_IBF) == 0) {
689 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
694 if (EC_FLAGS_QUERY_HANDSHAKE &&
695 !(status & ACPI_EC_FLAG_SCI) &&
696 (t->command == ACPI_EC_COMMAND_QUERY)) {
697 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
698 t->rdata[t->ri++] = 0x00;
699 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
700 ec_dbg_evt("Command(%s) completed by software",
701 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
703 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
704 acpi_ec_write_cmd(ec, t->command);
705 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
712 * If SCI bit is set, then don't think it's a false IRQ
713 * otherwise will take a not handled IRQ as a false one.
715 if (!(status & ACPI_EC_FLAG_SCI)) {
716 if (in_interrupt() && t) {
717 if (t->irq_count < ec_storm_threshold)
719 /* Allow triggering on 0 threshold */
720 if (t->irq_count == ec_storm_threshold)
721 acpi_ec_mask_events(ec);
725 if (status & ACPI_EC_FLAG_SCI)
726 acpi_ec_submit_query(ec);
727 if (wakeup && in_interrupt())
731 static void start_transaction(struct acpi_ec *ec)
733 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
737 static int ec_guard(struct acpi_ec *ec)
739 unsigned long guard = usecs_to_jiffies(ec->polling_guard);
740 unsigned long timeout = ec->timestamp + guard;
742 /* Ensure guarding period before polling EC status */
744 if (ec->busy_polling) {
745 /* Perform busy polling */
746 if (ec_transaction_completed(ec))
748 udelay(jiffies_to_usecs(guard));
751 * Perform wait polling
752 * 1. Wait the transaction to be completed by the
753 * GPE handler after the transaction enters
754 * ACPI_EC_COMMAND_POLL state.
755 * 2. A special guarding logic is also required
756 * for event clearing mode "event" before the
757 * transaction enters ACPI_EC_COMMAND_POLL
760 if (!ec_transaction_polled(ec) &&
761 !acpi_ec_guard_event(ec))
763 if (wait_event_timeout(ec->wait,
764 ec_transaction_completed(ec),
768 } while (time_before(jiffies, timeout));
772 static int ec_poll(struct acpi_ec *ec)
775 int repeat = 5; /* number of command restarts */
778 unsigned long delay = jiffies +
779 msecs_to_jiffies(ec_delay);
783 spin_lock_irqsave(&ec->lock, flags);
784 advance_transaction(ec);
785 spin_unlock_irqrestore(&ec->lock, flags);
786 } while (time_before(jiffies, delay));
787 pr_debug("controller reset, restart transaction\n");
788 spin_lock_irqsave(&ec->lock, flags);
789 start_transaction(ec);
790 spin_unlock_irqrestore(&ec->lock, flags);
795 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
796 struct transaction *t)
801 /* start transaction */
802 spin_lock_irqsave(&ec->lock, tmp);
803 /* Enable GPE for command processing (IBF=0/OBF=1) */
804 if (!acpi_ec_submit_flushable_request(ec)) {
808 ec_dbg_ref(ec, "Increase command");
809 /* following two actions should be kept atomic */
811 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
812 start_transaction(ec);
813 spin_unlock_irqrestore(&ec->lock, tmp);
817 spin_lock_irqsave(&ec->lock, tmp);
818 if (t->irq_count == ec_storm_threshold)
819 acpi_ec_unmask_events(ec);
820 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
822 /* Disable GPE for command processing (IBF=0/OBF=1) */
823 acpi_ec_complete_request(ec);
824 ec_dbg_ref(ec, "Decrease command");
826 spin_unlock_irqrestore(&ec->lock, tmp);
830 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
835 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
838 memset(t->rdata, 0, t->rlen);
840 mutex_lock(&ec->mutex);
841 if (ec->global_lock) {
842 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
843 if (ACPI_FAILURE(status)) {
849 status = acpi_ec_transaction_unlocked(ec, t);
852 acpi_release_global_lock(glk);
854 mutex_unlock(&ec->mutex);
858 static int acpi_ec_burst_enable(struct acpi_ec *ec)
861 struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
862 .wdata = NULL, .rdata = &d,
863 .wlen = 0, .rlen = 1};
865 return acpi_ec_transaction(ec, &t);
868 static int acpi_ec_burst_disable(struct acpi_ec *ec)
870 struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
871 .wdata = NULL, .rdata = NULL,
872 .wlen = 0, .rlen = 0};
874 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
875 acpi_ec_transaction(ec, &t) : 0;
878 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
882 struct transaction t = {.command = ACPI_EC_COMMAND_READ,
883 .wdata = &address, .rdata = &d,
884 .wlen = 1, .rlen = 1};
886 result = acpi_ec_transaction(ec, &t);
891 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
893 u8 wdata[2] = { address, data };
894 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
895 .wdata = wdata, .rdata = NULL,
896 .wlen = 2, .rlen = 0};
898 return acpi_ec_transaction(ec, &t);
901 int ec_read(u8 addr, u8 *val)
909 err = acpi_ec_read(first_ec, addr, &temp_data);
917 EXPORT_SYMBOL(ec_read);
919 int ec_write(u8 addr, u8 val)
926 err = acpi_ec_write(first_ec, addr, val);
930 EXPORT_SYMBOL(ec_write);
932 int ec_transaction(u8 command,
933 const u8 *wdata, unsigned wdata_len,
934 u8 *rdata, unsigned rdata_len)
936 struct transaction t = {.command = command,
937 .wdata = wdata, .rdata = rdata,
938 .wlen = wdata_len, .rlen = rdata_len};
943 return acpi_ec_transaction(first_ec, &t);
945 EXPORT_SYMBOL(ec_transaction);
947 /* Get the handle to the EC device */
948 acpi_handle ec_get_handle(void)
952 return first_ec->handle;
954 EXPORT_SYMBOL(ec_get_handle);
956 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
960 spin_lock_irqsave(&ec->lock, flags);
961 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
962 ec_dbg_drv("Starting EC");
963 /* Enable GPE for event processing (SCI_EVT=1) */
965 acpi_ec_submit_request(ec);
966 ec_dbg_ref(ec, "Increase driver");
968 ec_log_drv("EC started");
970 spin_unlock_irqrestore(&ec->lock, flags);
973 static bool acpi_ec_stopped(struct acpi_ec *ec)
978 spin_lock_irqsave(&ec->lock, flags);
979 flushed = acpi_ec_flushed(ec);
980 spin_unlock_irqrestore(&ec->lock, flags);
984 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
988 spin_lock_irqsave(&ec->lock, flags);
989 if (acpi_ec_started(ec)) {
990 ec_dbg_drv("Stopping EC");
991 set_bit(EC_FLAGS_STOPPED, &ec->flags);
992 spin_unlock_irqrestore(&ec->lock, flags);
993 wait_event(ec->wait, acpi_ec_stopped(ec));
994 spin_lock_irqsave(&ec->lock, flags);
995 /* Disable GPE for event processing (SCI_EVT=1) */
997 acpi_ec_complete_request(ec);
998 ec_dbg_ref(ec, "Decrease driver");
999 } else if (!ec_freeze_events)
1000 __acpi_ec_disable_event(ec);
1001 clear_bit(EC_FLAGS_STARTED, &ec->flags);
1002 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
1003 ec_log_drv("EC stopped");
1005 spin_unlock_irqrestore(&ec->lock, flags);
1008 static void acpi_ec_enter_noirq(struct acpi_ec *ec)
1010 unsigned long flags;
1012 spin_lock_irqsave(&ec->lock, flags);
1013 ec->busy_polling = true;
1014 ec->polling_guard = 0;
1015 ec_log_drv("interrupt blocked");
1016 spin_unlock_irqrestore(&ec->lock, flags);
1019 static void acpi_ec_leave_noirq(struct acpi_ec *ec)
1021 unsigned long flags;
1023 spin_lock_irqsave(&ec->lock, flags);
1024 ec->busy_polling = ec_busy_polling;
1025 ec->polling_guard = ec_polling_guard;
1026 ec_log_drv("interrupt unblocked");
1027 spin_unlock_irqrestore(&ec->lock, flags);
1030 void acpi_ec_block_transactions(void)
1032 struct acpi_ec *ec = first_ec;
1037 mutex_lock(&ec->mutex);
1038 /* Prevent transactions from being carried out */
1039 acpi_ec_stop(ec, true);
1040 mutex_unlock(&ec->mutex);
1043 void acpi_ec_unblock_transactions(void)
1046 * Allow transactions to happen again (this function is called from
1047 * atomic context during wakeup, so we don't need to acquire the mutex).
1050 acpi_ec_start(first_ec, true);
1053 /* --------------------------------------------------------------------------
1055 -------------------------------------------------------------------------- */
1056 static struct acpi_ec_query_handler *
1057 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
1059 struct acpi_ec_query_handler *handler;
1061 mutex_lock(&ec->mutex);
1062 list_for_each_entry(handler, &ec->list, node) {
1063 if (value == handler->query_bit) {
1064 kref_get(&handler->kref);
1065 mutex_unlock(&ec->mutex);
1069 mutex_unlock(&ec->mutex);
1073 static void acpi_ec_query_handler_release(struct kref *kref)
1075 struct acpi_ec_query_handler *handler =
1076 container_of(kref, struct acpi_ec_query_handler, kref);
1081 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
1083 kref_put(&handler->kref, acpi_ec_query_handler_release);
1086 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
1087 acpi_handle handle, acpi_ec_query_func func,
1090 struct acpi_ec_query_handler *handler =
1091 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
1096 handler->query_bit = query_bit;
1097 handler->handle = handle;
1098 handler->func = func;
1099 handler->data = data;
1100 mutex_lock(&ec->mutex);
1101 kref_init(&handler->kref);
1102 list_add(&handler->node, &ec->list);
1103 mutex_unlock(&ec->mutex);
1106 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
1108 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
1109 bool remove_all, u8 query_bit)
1111 struct acpi_ec_query_handler *handler, *tmp;
1112 LIST_HEAD(free_list);
1114 mutex_lock(&ec->mutex);
1115 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1116 if (remove_all || query_bit == handler->query_bit) {
1117 list_del_init(&handler->node);
1118 list_add(&handler->node, &free_list);
1121 mutex_unlock(&ec->mutex);
1122 list_for_each_entry_safe(handler, tmp, &free_list, node)
1123 acpi_ec_put_query_handler(handler);
1126 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
1128 acpi_ec_remove_query_handlers(ec, false, query_bit);
1130 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
1132 static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
1134 struct acpi_ec_query *q;
1135 struct transaction *t;
1137 q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
1140 INIT_WORK(&q->work, acpi_ec_event_processor);
1141 t = &q->transaction;
1142 t->command = ACPI_EC_COMMAND_QUERY;
1148 static void acpi_ec_delete_query(struct acpi_ec_query *q)
1152 acpi_ec_put_query_handler(q->handler);
1157 static void acpi_ec_event_processor(struct work_struct *work)
1159 struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
1160 struct acpi_ec_query_handler *handler = q->handler;
1162 ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
1164 handler->func(handler->data);
1165 else if (handler->handle)
1166 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
1167 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
1168 acpi_ec_delete_query(q);
1171 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1175 struct acpi_ec_query *q;
1177 q = acpi_ec_create_query(&value);
1182 * Query the EC to find out which _Qxx method we need to evaluate.
1183 * Note that successful completion of the query causes the ACPI_EC_SCI
1184 * bit to be cleared (and thus clearing the interrupt source).
1186 result = acpi_ec_transaction(ec, &q->transaction);
1192 q->handler = acpi_ec_get_query_handler_by_value(ec, value);
1199 * It is reported that _Qxx are evaluated in a parallel way on
1201 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1203 * Put this log entry before schedule_work() in order to make
1204 * it appearing before any other log entries occurred during the
1205 * work queue execution.
1207 ec_dbg_evt("Query(0x%02x) scheduled", value);
1208 if (!queue_work(ec_query_wq, &q->work)) {
1209 ec_dbg_evt("Query(0x%02x) overlapped", value);
1215 acpi_ec_delete_query(q);
1221 static void acpi_ec_check_event(struct acpi_ec *ec)
1223 unsigned long flags;
1225 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
1227 spin_lock_irqsave(&ec->lock, flags);
1229 * Take care of the SCI_EVT unless no one else is
1230 * taking care of it.
1233 advance_transaction(ec);
1234 spin_unlock_irqrestore(&ec->lock, flags);
1239 static void acpi_ec_event_handler(struct work_struct *work)
1241 unsigned long flags;
1242 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
1244 ec_dbg_evt("Event started");
1246 spin_lock_irqsave(&ec->lock, flags);
1247 while (ec->nr_pending_queries) {
1248 spin_unlock_irqrestore(&ec->lock, flags);
1249 (void)acpi_ec_query(ec, NULL);
1250 spin_lock_irqsave(&ec->lock, flags);
1251 ec->nr_pending_queries--;
1253 * Before exit, make sure that this work item can be
1254 * scheduled again. There might be QR_EC failures, leaving
1255 * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
1256 * item from being scheduled again.
1258 if (!ec->nr_pending_queries) {
1259 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
1260 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
1261 acpi_ec_complete_query(ec);
1264 spin_unlock_irqrestore(&ec->lock, flags);
1266 ec_dbg_evt("Event stopped");
1268 acpi_ec_check_event(ec);
1271 static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
1273 unsigned long flags;
1275 spin_lock_irqsave(&ec->lock, flags);
1276 advance_transaction(ec);
1277 spin_unlock_irqrestore(&ec->lock, flags);
1280 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
1281 u32 gpe_number, void *data)
1283 acpi_ec_handle_interrupt(data);
1284 return ACPI_INTERRUPT_HANDLED;
1287 static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
1289 acpi_ec_handle_interrupt(data);
1293 /* --------------------------------------------------------------------------
1294 * Address Space Management
1295 * -------------------------------------------------------------------------- */
1298 acpi_ec_space_handler(u32 function, acpi_physical_address address,
1299 u32 bits, u64 *value64,
1300 void *handler_context, void *region_context)
1302 struct acpi_ec *ec = handler_context;
1303 int result = 0, i, bytes = bits / 8;
1304 u8 *value = (u8 *)value64;
1306 if ((address > 0xFF) || !value || !handler_context)
1307 return AE_BAD_PARAMETER;
1309 if (function != ACPI_READ && function != ACPI_WRITE)
1310 return AE_BAD_PARAMETER;
1312 if (ec->busy_polling || bits > 8)
1313 acpi_ec_burst_enable(ec);
1315 for (i = 0; i < bytes; ++i, ++address, ++value)
1316 result = (function == ACPI_READ) ?
1317 acpi_ec_read(ec, address, value) :
1318 acpi_ec_write(ec, address, *value);
1320 if (ec->busy_polling || bits > 8)
1321 acpi_ec_burst_disable(ec);
1325 return AE_BAD_PARAMETER;
1327 return AE_NOT_FOUND;
1335 /* --------------------------------------------------------------------------
1337 * -------------------------------------------------------------------------- */
1340 ec_parse_io_ports(struct acpi_resource *resource, void *context);
1342 static void acpi_ec_free(struct acpi_ec *ec)
1351 static struct acpi_ec *acpi_ec_alloc(void)
1353 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
1357 mutex_init(&ec->mutex);
1358 init_waitqueue_head(&ec->wait);
1359 INIT_LIST_HEAD(&ec->list);
1360 spin_lock_init(&ec->lock);
1361 INIT_WORK(&ec->work, acpi_ec_event_handler);
1362 ec->timestamp = jiffies;
1363 ec->busy_polling = true;
1364 ec->polling_guard = 0;
1371 acpi_ec_register_query_methods(acpi_handle handle, u32 level,
1372 void *context, void **return_value)
1375 struct acpi_buffer buffer = { sizeof(node_name), node_name };
1376 struct acpi_ec *ec = context;
1380 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
1382 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
1383 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
1388 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
1391 unsigned long long tmp = 0;
1392 struct acpi_ec *ec = context;
1394 /* clear addr values, ec_parse_io_ports depend on it */
1395 ec->command_addr = ec->data_addr = 0;
1397 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1398 ec_parse_io_ports, ec);
1399 if (ACPI_FAILURE(status))
1401 if (ec->data_addr == 0 || ec->command_addr == 0)
1404 if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
1406 * Always inherit the GPE number setting from the ECDT
1409 ec->gpe = boot_ec->gpe;
1411 /* Get GPE bit assignment (EC events). */
1412 /* TODO: Add support for _GPE returning a package */
1413 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
1414 if (ACPI_SUCCESS(status))
1418 * Errors are non-fatal, allowing for ACPI Reduced Hardware
1419 * platforms which use GpioInt instead of GPE.
1422 /* Use the global lock for all EC transactions? */
1424 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
1425 ec->global_lock = tmp;
1426 ec->handle = handle;
1427 return AE_CTRL_TERMINATE;
1430 static void install_gpe_event_handler(struct acpi_ec *ec)
1432 acpi_status status =
1433 acpi_install_gpe_raw_handler(NULL, ec->gpe,
1434 ACPI_GPE_EDGE_TRIGGERED,
1435 &acpi_ec_gpe_handler,
1437 if (ACPI_SUCCESS(status)) {
1438 /* This is not fatal as we can poll EC events */
1439 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1440 acpi_ec_leave_noirq(ec);
1441 if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1442 ec->reference_count >= 1)
1443 acpi_ec_enable_gpe(ec, true);
1447 /* ACPI reduced hardware platforms use a GpioInt specified in _CRS. */
1448 static int install_gpio_irq_event_handler(struct acpi_ec *ec,
1449 struct acpi_device *device)
1451 int irq = acpi_dev_gpio_irq_get(device, 0);
1457 ret = request_irq(irq, acpi_ec_irq_handler, IRQF_SHARED,
1461 * Unlike the GPE case, we treat errors here as fatal, we'll only
1462 * implement GPIO polling if we find a case that needs it.
1468 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1469 acpi_ec_leave_noirq(ec);
1475 * Note: This function returns an error code only when the address space
1476 * handler is not installed, which means "not able to handle
1479 static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
1484 acpi_ec_start(ec, false);
1486 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1487 acpi_ec_enter_noirq(ec);
1488 status = acpi_install_address_space_handler(ec->handle,
1490 &acpi_ec_space_handler,
1492 if (ACPI_FAILURE(status)) {
1493 if (status == AE_NOT_FOUND) {
1495 * Maybe OS fails in evaluating the _REG
1496 * object. The AE_NOT_FOUND error will be
1497 * ignored and OS * continue to initialize
1500 pr_err("Fail in evaluating the _REG object"
1501 " of EC device. Broken bios is suspected.\n");
1503 acpi_ec_stop(ec, false);
1507 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1513 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1514 /* Find and register all query methods */
1515 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1516 acpi_ec_register_query_methods,
1518 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1520 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1522 install_gpe_event_handler(ec);
1523 } else if (device) {
1524 int ret = install_gpio_irq_event_handler(ec, device);
1528 } else { /* No GPE and no GpioInt? */
1532 /* EC is fully operational, allow queries */
1533 acpi_ec_enable_event(ec);
1538 static void ec_remove_handlers(struct acpi_ec *ec)
1540 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1541 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
1542 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
1543 pr_err("failed to remove space handler\n");
1544 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1548 * Stops handling the EC transactions after removing the operation
1549 * region handler. This is required because _REG(DISCONNECT)
1550 * invoked during the removal can result in new EC transactions.
1552 * Flushes the EC requests and thus disables the GPE before
1553 * removing the GPE handler. This is required by the current ACPICA
1554 * GPE core. ACPICA GPE core will automatically disable a GPE when
1555 * it is indicated but there is no way to handle it. So the drivers
1556 * must disable the GPEs prior to removing the GPE handlers.
1558 acpi_ec_stop(ec, false);
1560 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1562 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1563 &acpi_ec_gpe_handler)))
1564 pr_err("failed to remove gpe handler\n");
1567 free_irq(ec->irq, ec);
1569 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1571 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1572 acpi_ec_remove_query_handlers(ec, true, 0);
1573 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1577 static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device,
1582 ret = ec_install_handlers(ec, device, handle_events);
1586 /* First EC capable of handling transactions */
1589 acpi_handle_info(first_ec->handle, "Used as first EC\n");
1592 acpi_handle_info(ec->handle,
1593 "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
1594 ec->gpe, ec->irq, ec->command_addr, ec->data_addr);
1598 static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
1600 struct acpi_table_ecdt *ecdt_ptr;
1604 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1605 (struct acpi_table_header **)&ecdt_ptr);
1606 if (ACPI_FAILURE(status))
1609 status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
1610 if (ACPI_FAILURE(status))
1617 static int acpi_ec_add(struct acpi_device *device)
1619 struct acpi_ec *ec = NULL;
1620 bool dep_update = true;
1624 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
1625 strcpy(acpi_device_class(device), ACPI_EC_CLASS);
1627 if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
1628 boot_ec_is_ecdt = true;
1632 ec = acpi_ec_alloc();
1636 status = ec_parse_device(device->handle, 0, ec, NULL);
1637 if (status != AE_CTRL_TERMINATE) {
1642 if (boot_ec && ec->command_addr == boot_ec->command_addr &&
1643 ec->data_addr == boot_ec->data_addr) {
1644 boot_ec_is_ecdt = false;
1646 * Trust PNP0C09 namespace location rather than
1647 * ECDT ID. But trust ECDT GPE rather than _GPE
1648 * because of ASUS quirks, so do not change
1649 * boot_ec->gpe to ec->gpe.
1651 boot_ec->handle = ec->handle;
1652 acpi_handle_debug(ec->handle, "duplicated.\n");
1658 ret = acpi_ec_setup(ec, device, true);
1663 acpi_handle_info(boot_ec->handle,
1664 "Boot %s EC used to handle transactions and events\n",
1665 boot_ec_is_ecdt ? "ECDT" : "DSDT");
1667 device->driver_data = ec;
1669 ret = !!request_region(ec->data_addr, 1, "EC data");
1670 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1671 ret = !!request_region(ec->command_addr, 1, "EC cmd");
1672 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1675 /* Reprobe devices depending on the EC */
1676 acpi_walk_dep_device_list(ec->handle);
1678 acpi_handle_debug(ec->handle, "enumerated.\n");
1683 acpi_ec_remove_query_handlers(ec, true, 0);
1690 static int acpi_ec_remove(struct acpi_device *device)
1697 ec = acpi_driver_data(device);
1698 release_region(ec->data_addr, 1);
1699 release_region(ec->command_addr, 1);
1700 device->driver_data = NULL;
1701 if (ec != boot_ec) {
1702 ec_remove_handlers(ec);
1709 ec_parse_io_ports(struct acpi_resource *resource, void *context)
1711 struct acpi_ec *ec = context;
1713 if (resource->type != ACPI_RESOURCE_TYPE_IO)
1717 * The first address region returned is the data port, and
1718 * the second address region returned is the status/command
1721 if (ec->data_addr == 0)
1722 ec->data_addr = resource->data.io.minimum;
1723 else if (ec->command_addr == 0)
1724 ec->command_addr = resource->data.io.minimum;
1726 return AE_CTRL_TERMINATE;
1731 static const struct acpi_device_id ec_device_ids[] = {
1738 * This function is not Windows-compatible as Windows never enumerates the
1739 * namespace EC before the main ACPI device enumeration process. It is
1740 * retained for historical reason and will be deprecated in the future.
1742 void __init acpi_ec_dsdt_probe(void)
1749 * If a platform has ECDT, there is no need to proceed as the
1750 * following probe is not a part of the ACPI device enumeration,
1751 * executing _STA is not safe, and thus this probe may risk of
1752 * picking up an invalid EC device.
1757 ec = acpi_ec_alloc();
1762 * At this point, the namespace is initialized, so start to find
1763 * the namespace objects.
1765 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
1766 if (ACPI_FAILURE(status) || !ec->handle) {
1772 * When the DSDT EC is available, always re-configure boot EC to
1773 * have _REG evaluated. _REG can only be evaluated after the
1774 * namespace initialization.
1775 * At this point, the GPE is not fully initialized, so do not to
1776 * handle the events.
1778 ret = acpi_ec_setup(ec, NULL, false);
1786 acpi_handle_info(ec->handle,
1787 "Boot DSDT EC used to handle transactions\n");
1791 * If the DSDT EC is not functioning, we still need to prepare a fully
1792 * functioning ECDT EC first in order to handle the events.
1793 * https://bugzilla.kernel.org/show_bug.cgi?id=115021
1795 static int __init acpi_ec_ecdt_start(void)
1801 /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
1802 if (!boot_ec_is_ecdt)
1806 * At this point, the namespace and the GPE is initialized, so
1807 * start to find the namespace objects and handle the events.
1809 * Note: ec->handle can be valid if this function is called after
1810 * acpi_ec_add(), hence the fast path.
1812 if (boot_ec->handle == ACPI_ROOT_OBJECT) {
1813 if (!acpi_ec_ecdt_get_handle(&handle))
1815 boot_ec->handle = handle;
1818 /* Register to ACPI bus with PM ops attached */
1819 return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
1824 * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
1825 * set, for which case, we complete the QR_EC without issuing it to the
1827 * https://bugzilla.kernel.org/show_bug.cgi?id=82611
1828 * https://bugzilla.kernel.org/show_bug.cgi?id=97381
1830 static int ec_flag_query_handshake(const struct dmi_system_id *id)
1832 pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1833 EC_FLAGS_QUERY_HANDSHAKE = 1;
1839 * On some hardware it is necessary to clear events accumulated by the EC during
1840 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1841 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1843 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1845 * Ideally, the EC should also be instructed NOT to accumulate events during
1846 * sleep (which Windows seems to do somehow), but the interface to control this
1847 * behaviour is not known at this time.
1849 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1850 * however it is very likely that other Samsung models are affected.
1852 * On systems which don't accumulate _Q events during sleep, this extra check
1853 * should be harmless.
1855 static int ec_clear_on_resume(const struct dmi_system_id *id)
1857 pr_debug("Detected system needing EC poll on resume.\n");
1858 EC_FLAGS_CLEAR_ON_RESUME = 1;
1859 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
1864 * Some ECDTs contain wrong register addresses.
1866 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
1868 static int ec_correct_ecdt(const struct dmi_system_id *id)
1870 pr_debug("Detected system needing ECDT address correction.\n");
1871 EC_FLAGS_CORRECT_ECDT = 1;
1876 * Some DSDTs contain wrong GPE setting.
1877 * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
1878 * https://bugzilla.kernel.org/show_bug.cgi?id=195651
1880 static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
1882 pr_debug("Detected system needing ignore DSDT GPE setting.\n");
1883 EC_FLAGS_IGNORE_DSDT_GPE = 1;
1887 static const struct dmi_system_id ec_dmi_table[] __initconst = {
1889 ec_correct_ecdt, "MSI MS-171F", {
1890 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1891 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
1893 ec_honor_ecdt_gpe, "ASUS FX502VD", {
1894 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1895 DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
1897 ec_honor_ecdt_gpe, "ASUS FX502VE", {
1898 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1899 DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
1901 ec_honor_ecdt_gpe, "ASUS GL702VMK", {
1902 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1903 DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
1905 ec_honor_ecdt_gpe, "ASUS X550VXK", {
1906 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1907 DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
1909 ec_honor_ecdt_gpe, "ASUS X580VD", {
1910 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1911 DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
1913 ec_clear_on_resume, "Samsung hardware", {
1914 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1918 void __init acpi_ec_ecdt_probe(void)
1920 struct acpi_table_ecdt *ecdt_ptr;
1925 /* Generate a boot ec context. */
1926 dmi_check_system(ec_dmi_table);
1927 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1928 (struct acpi_table_header **)&ecdt_ptr);
1929 if (ACPI_FAILURE(status))
1932 if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
1935 * https://bugzilla.kernel.org/show_bug.cgi?id=11880
1940 ec = acpi_ec_alloc();
1944 if (EC_FLAGS_CORRECT_ECDT) {
1945 ec->command_addr = ecdt_ptr->data.address;
1946 ec->data_addr = ecdt_ptr->control.address;
1948 ec->command_addr = ecdt_ptr->control.address;
1949 ec->data_addr = ecdt_ptr->data.address;
1953 * Ignore the GPE value on Reduced Hardware platforms.
1954 * Some products have this set to an erroneous value.
1956 if (!acpi_gbl_reduced_hardware)
1957 ec->gpe = ecdt_ptr->gpe;
1959 ec->handle = ACPI_ROOT_OBJECT;
1962 * At this point, the namespace is not initialized, so do not find
1963 * the namespace objects, or handle the events.
1965 ret = acpi_ec_setup(ec, NULL, false);
1972 boot_ec_is_ecdt = true;
1974 pr_info("Boot ECDT EC used to handle transactions\n");
1977 #ifdef CONFIG_PM_SLEEP
1978 static int acpi_ec_suspend(struct device *dev)
1980 struct acpi_ec *ec =
1981 acpi_driver_data(to_acpi_device(dev));
1983 if (!pm_suspend_no_platform() && ec_freeze_events)
1984 acpi_ec_disable_event(ec);
1988 static int acpi_ec_suspend_noirq(struct device *dev)
1990 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1993 * The SCI handler doesn't run at this point, so the GPE can be
1994 * masked at the low level without side effects.
1996 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1997 ec->gpe >= 0 && ec->reference_count >= 1)
1998 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
2000 acpi_ec_enter_noirq(ec);
2005 static int acpi_ec_resume_noirq(struct device *dev)
2007 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
2009 acpi_ec_leave_noirq(ec);
2011 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
2012 ec->gpe >= 0 && ec->reference_count >= 1)
2013 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
2018 static int acpi_ec_resume(struct device *dev)
2020 struct acpi_ec *ec =
2021 acpi_driver_data(to_acpi_device(dev));
2023 acpi_ec_enable_event(ec);
2027 void acpi_ec_mark_gpe_for_wake(void)
2029 if (first_ec && !ec_no_wakeup)
2030 acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
2032 EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
2034 void acpi_ec_set_gpe_wake_mask(u8 action)
2036 if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
2037 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
2040 bool acpi_ec_dispatch_gpe(void)
2047 ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
2048 if (ret == ACPI_INTERRUPT_HANDLED) {
2049 pm_pr_dbg("EC GPE dispatched\n");
2054 #endif /* CONFIG_PM_SLEEP */
2056 static const struct dev_pm_ops acpi_ec_pm = {
2057 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
2058 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
2061 static int param_set_event_clearing(const char *val,
2062 const struct kernel_param *kp)
2066 if (!strncmp(val, "status", sizeof("status") - 1)) {
2067 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
2068 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
2069 } else if (!strncmp(val, "query", sizeof("query") - 1)) {
2070 ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
2071 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
2072 } else if (!strncmp(val, "event", sizeof("event") - 1)) {
2073 ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
2074 pr_info("Assuming SCI_EVT clearing on event reads\n");
2080 static int param_get_event_clearing(char *buffer,
2081 const struct kernel_param *kp)
2083 switch (ec_event_clearing) {
2084 case ACPI_EC_EVT_TIMING_STATUS:
2085 return sprintf(buffer, "status");
2086 case ACPI_EC_EVT_TIMING_QUERY:
2087 return sprintf(buffer, "query");
2088 case ACPI_EC_EVT_TIMING_EVENT:
2089 return sprintf(buffer, "event");
2091 return sprintf(buffer, "invalid");
2096 module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
2098 MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
2100 static struct acpi_driver acpi_ec_driver = {
2102 .class = ACPI_EC_CLASS,
2103 .ids = ec_device_ids,
2106 .remove = acpi_ec_remove,
2108 .drv.pm = &acpi_ec_pm,
2111 static void acpi_ec_destroy_workqueues(void)
2114 destroy_workqueue(ec_wq);
2118 destroy_workqueue(ec_query_wq);
2123 static int acpi_ec_init_workqueues(void)
2126 ec_wq = alloc_ordered_workqueue("kec", 0);
2129 ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
2131 if (!ec_wq || !ec_query_wq) {
2132 acpi_ec_destroy_workqueues();
2138 static const struct dmi_system_id acpi_ec_no_wakeup[] = {
2140 .ident = "Thinkpad X1 Carbon 6th",
2142 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2143 DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
2147 .ident = "ThinkPad X1 Carbon 6th",
2149 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2150 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
2154 .ident = "ThinkPad X1 Yoga 3rd",
2156 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2157 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
2163 int __init acpi_ec_init(void)
2166 int ecdt_fail, dsdt_fail;
2168 result = acpi_ec_init_workqueues();
2173 * Disable EC wakeup on following systems to prevent periodic
2174 * wakeup from EC GPE.
2176 if (dmi_check_system(acpi_ec_no_wakeup)) {
2177 ec_no_wakeup = true;
2178 pr_debug("Disabling EC wakeup on suspend-to-idle\n");
2181 /* Drivers must be started after acpi_ec_query_init() */
2182 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
2184 * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
2185 * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
2186 * settings but invalid DSDT settings.
2187 * https://bugzilla.kernel.org/show_bug.cgi?id=196847
2189 ecdt_fail = acpi_ec_ecdt_start();
2190 return ecdt_fail && dsdt_fail ? -ENODEV : 0;
2193 /* EC driver currently not unloadable */
2195 static void __exit acpi_ec_exit(void)
2198 acpi_bus_unregister_driver(&acpi_ec_driver);
2199 acpi_ec_destroy_workqueues();