1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/slab.h>
10 #include <linux/cred.h>
11 #include <linux/key.h>
12 #include <linux/key-type.h>
13 #include <keys/user-type.h>
14 #include <keys/encrypted-type.h>
18 #define NVDIMM_BASE_KEY 0
19 #define NVDIMM_NEW_KEY 1
21 static bool key_revalidate = true;
22 module_param(key_revalidate, bool, 0444);
23 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
25 static const char zero_key[NVDIMM_PASSPHRASE_LEN];
27 static void *key_data(struct key *key)
29 struct encrypted_key_payload *epayload = dereference_key_locked(key);
31 lockdep_assert_held_read(&key->sem);
33 return epayload->decrypted_data;
36 static void nvdimm_put_key(struct key *key)
46 * Retrieve kernel key for DIMM and request from user space if
47 * necessary. Returns a key held for read and must be put by
48 * nvdimm_put_key() before the usage goes out of scope.
50 static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
52 struct key *key = NULL;
53 static const char NVDIMM_PREFIX[] = "nvdimm:";
54 char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)];
55 struct device *dev = &nvdimm->dev;
57 sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
58 key = request_key(&key_type_encrypted, desc, "");
60 if (PTR_ERR(key) == -ENOKEY)
61 dev_dbg(dev, "request_key() found no key\n");
63 dev_dbg(dev, "request_key() upcall failed\n");
66 struct encrypted_key_payload *epayload;
69 epayload = dereference_key_locked(key);
70 if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
80 static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
83 *key = nvdimm_request_key(nvdimm);
87 return key_data(*key);
90 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
91 key_serial_t id, int subclass)
95 struct encrypted_key_payload *epayload;
96 struct device *dev = &nvdimm->dev;
98 keyref = lookup_user_key(id, 0, 0);
102 key = key_ref_to_ptr(keyref);
103 if (key->type != &key_type_encrypted) {
108 dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key));
110 down_read_nested(&key->sem, subclass);
111 epayload = dereference_key_locked(key);
112 if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
120 static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
121 key_serial_t id, int subclass, struct key **key)
125 if (subclass == NVDIMM_BASE_KEY)
131 *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
135 return key_data(*key);
139 static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
145 if (!nvdimm->sec.ops->change_key)
148 data = nvdimm_get_key_payload(nvdimm, &key);
151 * Send the same key to the hardware as new and old key to
152 * verify that the key is good.
154 rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
161 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
165 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
167 struct device *dev = &nvdimm->dev;
168 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
173 /* The bus lock should be held at the top level of the call stack */
174 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
176 if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
177 || !nvdimm->sec.flags)
180 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
181 dev_dbg(dev, "Security operation in progress.\n");
186 * If the pre-OS has unlocked the DIMM, attempt to send the key
187 * from request_key() to the hardware for verification. Failure
188 * to revalidate the key against the hardware results in a
189 * freeze of the security configuration. I.e. if the OS does not
190 * have the key, security is being managed pre-OS.
192 if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) {
196 return nvdimm_key_revalidate(nvdimm);
198 data = nvdimm_get_key_payload(nvdimm, &key);
200 rc = nvdimm->sec.ops->unlock(nvdimm, data);
201 dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
202 rc == 0 ? "success" : "fail");
205 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
209 int nvdimm_security_unlock(struct device *dev)
211 struct nvdimm *nvdimm = to_nvdimm(dev);
214 nvdimm_bus_lock(dev);
215 rc = __nvdimm_security_unlock(nvdimm);
216 nvdimm_bus_unlock(dev);
220 static int check_security_state(struct nvdimm *nvdimm)
222 struct device *dev = &nvdimm->dev;
224 if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) {
225 dev_dbg(dev, "Incorrect security state: %#lx\n",
230 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
231 dev_dbg(dev, "Security operation in progress.\n");
238 static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
240 struct device *dev = &nvdimm->dev;
241 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
246 /* The bus lock should be held at the top level of the call stack */
247 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
249 if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
250 || !nvdimm->sec.flags)
253 rc = check_security_state(nvdimm);
257 data = nvdimm_get_user_key_payload(nvdimm, keyid,
258 NVDIMM_BASE_KEY, &key);
262 rc = nvdimm->sec.ops->disable(nvdimm, data);
263 dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
264 rc == 0 ? "success" : "fail");
267 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
271 static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
272 unsigned int new_keyid,
273 enum nvdimm_passphrase_type pass_type)
275 struct device *dev = &nvdimm->dev;
276 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
277 struct key *key, *newkey;
279 const void *data, *newdata;
281 /* The bus lock should be held at the top level of the call stack */
282 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
284 if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
285 || !nvdimm->sec.flags)
288 rc = check_security_state(nvdimm);
292 data = nvdimm_get_user_key_payload(nvdimm, keyid,
293 NVDIMM_BASE_KEY, &key);
297 newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
298 NVDIMM_NEW_KEY, &newkey);
304 rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
305 dev_dbg(dev, "key: %d %d update%s: %s\n",
306 key_serial(key), key_serial(newkey),
307 pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
308 rc == 0 ? "success" : "fail");
310 nvdimm_put_key(newkey);
312 if (pass_type == NVDIMM_MASTER)
313 nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
316 nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
321 static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
322 enum nvdimm_passphrase_type pass_type)
324 struct device *dev = &nvdimm->dev;
325 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
326 struct key *key = NULL;
330 /* The bus lock should be held at the top level of the call stack */
331 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
333 if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
334 || !nvdimm->sec.flags)
337 rc = check_security_state(nvdimm);
341 if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
342 && pass_type == NVDIMM_MASTER) {
344 "Attempt to secure erase in wrong master state.\n");
348 data = nvdimm_get_user_key_payload(nvdimm, keyid,
349 NVDIMM_BASE_KEY, &key);
353 rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
354 dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
355 pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
356 rc == 0 ? "success" : "fail");
359 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
363 static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
365 struct device *dev = &nvdimm->dev;
366 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
367 struct key *key = NULL;
371 /* The bus lock should be held at the top level of the call stack */
372 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
374 if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
375 || !nvdimm->sec.flags)
378 if (dev->driver == NULL) {
379 dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
383 rc = check_security_state(nvdimm);
387 data = nvdimm_get_user_key_payload(nvdimm, keyid,
388 NVDIMM_BASE_KEY, &key);
392 rc = nvdimm->sec.ops->overwrite(nvdimm, data);
393 dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
394 rc == 0 ? "success" : "fail");
398 set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
399 set_bit(NDD_WORK_PENDING, &nvdimm->flags);
400 set_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags);
402 * Make sure we don't lose device while doing overwrite
406 queue_delayed_work(system_wq, &nvdimm->dwork, 0);
412 void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
414 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
418 /* The bus lock should be held at the top level of the call stack */
419 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
422 * Abort and release device if we no longer have the overwrite
423 * flag set. It means the work has been canceled.
425 if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags))
428 tmo = nvdimm->sec.overwrite_tmo;
430 if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
431 || !nvdimm->sec.flags)
434 rc = nvdimm->sec.ops->query_overwrite(nvdimm);
437 /* setup delayed work again */
439 queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
440 nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
445 dev_dbg(&nvdimm->dev, "overwrite failed\n");
447 dev_dbg(&nvdimm->dev, "overwrite completed\n");
449 if (nvdimm->sec.overwrite_state)
450 sysfs_notify_dirent(nvdimm->sec.overwrite_state);
451 nvdimm->sec.overwrite_tmo = 0;
452 clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
453 clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
454 put_device(&nvdimm->dev);
455 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
456 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
459 void nvdimm_security_overwrite_query(struct work_struct *work)
461 struct nvdimm *nvdimm =
462 container_of(work, typeof(*nvdimm), dwork.work);
464 nvdimm_bus_lock(&nvdimm->dev);
465 __nvdimm_security_overwrite_query(nvdimm);
466 nvdimm_bus_unlock(&nvdimm->dev);
470 C( OP_FREEZE, "freeze", 1), \
471 C( OP_DISABLE, "disable", 2), \
472 C( OP_UPDATE, "update", 3), \
473 C( OP_ERASE, "erase", 2), \
474 C( OP_OVERWRITE, "overwrite", 2), \
475 C( OP_MASTER_UPDATE, "master_update", 3), \
476 C( OP_MASTER_ERASE, "master_erase", 2)
479 enum nvdimmsec_op_ids { OPS };
481 #define C(a, b, c) { b, c }
488 #define SEC_CMD_SIZE 32
489 #define KEY_ID_SIZE 10
491 ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
493 struct nvdimm *nvdimm = to_nvdimm(dev);
495 char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
496 nkeystr[KEY_ID_SIZE+1];
497 unsigned int key, newkey;
500 rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
501 " %"__stringify(KEY_ID_SIZE)"s"
502 " %"__stringify(KEY_ID_SIZE)"s",
503 cmd, keystr, nkeystr);
506 for (i = 0; i < ARRAY_SIZE(ops); i++)
507 if (sysfs_streq(cmd, ops[i].name))
509 if (i >= ARRAY_SIZE(ops))
512 rc = kstrtouint(keystr, 0, &key);
513 if (rc >= 0 && ops[i].args > 2)
514 rc = kstrtouint(nkeystr, 0, &newkey);
518 if (i == OP_FREEZE) {
519 dev_dbg(dev, "freeze\n");
520 rc = nvdimm_security_freeze(nvdimm);
521 } else if (i == OP_DISABLE) {
522 dev_dbg(dev, "disable %u\n", key);
523 rc = security_disable(nvdimm, key);
524 } else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
525 dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
526 rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
527 ? NVDIMM_USER : NVDIMM_MASTER);
528 } else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
529 dev_dbg(dev, "%s %u\n", ops[i].name, key);
530 if (atomic_read(&nvdimm->busy)) {
531 dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
534 rc = security_erase(nvdimm, key, i == OP_ERASE
535 ? NVDIMM_USER : NVDIMM_MASTER);
536 } else if (i == OP_OVERWRITE) {
537 dev_dbg(dev, "overwrite %u\n", key);
538 if (atomic_read(&nvdimm->busy)) {
539 dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
542 rc = security_overwrite(nvdimm, key);