1 // SPDX-License-Identifier: GPL-2.0
3 * Sysfs interface for the NVMe core driver.
5 * Copyright (c) 2011-2014, Intel Corporation.
8 #include <linux/nvme-auth.h>
13 static ssize_t nvme_sysfs_reset(struct device *dev,
14 struct device_attribute *attr, const char *buf,
17 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
20 ret = nvme_reset_ctrl_sync(ctrl);
25 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
27 static ssize_t nvme_sysfs_rescan(struct device *dev,
28 struct device_attribute *attr, const char *buf,
31 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
33 nvme_queue_scan(ctrl);
36 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
38 static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
39 struct device_attribute *attr, char *buf)
41 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
43 return sysfs_emit(buf,
44 ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
47 static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
48 struct device_attribute *attr, const char *buf, size_t count)
50 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
51 bool passthru_err_log_enabled;
54 err = kstrtobool(buf, &passthru_err_log_enabled);
58 ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
63 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
65 struct gendisk *disk = dev_to_disk(dev);
67 if (nvme_disk_is_ns_head(disk))
68 return disk->private_data;
69 return nvme_get_ns_from_dev(dev)->head;
72 static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
73 struct device_attribute *attr, char *buf)
75 struct nvme_ns_head *head = dev_to_ns_head(dev);
77 return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
80 static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
81 struct device_attribute *attr, const char *buf, size_t count)
83 struct nvme_ns_head *head = dev_to_ns_head(dev);
84 bool passthru_err_log_enabled;
87 err = kstrtobool(buf, &passthru_err_log_enabled);
90 head->passthru_err_log_enabled = passthru_err_log_enabled;
95 static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
96 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
97 nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
99 static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
100 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
101 nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
103 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
106 struct nvme_ns_head *head = dev_to_ns_head(dev);
107 struct nvme_ns_ids *ids = &head->ids;
108 struct nvme_subsystem *subsys = head->subsys;
109 int serial_len = sizeof(subsys->serial);
110 int model_len = sizeof(subsys->model);
112 if (!uuid_is_null(&ids->uuid))
113 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
115 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
116 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
118 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
119 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
121 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
122 subsys->serial[serial_len - 1] == '\0'))
124 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
125 subsys->model[model_len - 1] == '\0'))
128 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
129 serial_len, subsys->serial, model_len, subsys->model,
132 static DEVICE_ATTR_RO(wwid);
134 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
137 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
139 static DEVICE_ATTR_RO(nguid);
141 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
144 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
146 /* For backward compatibility expose the NGUID to userspace if
147 * we have no UUID set
149 if (uuid_is_null(&ids->uuid)) {
151 "No UUID available providing old NGUID\n");
152 return sysfs_emit(buf, "%pU\n", ids->nguid);
154 return sysfs_emit(buf, "%pU\n", &ids->uuid);
156 static DEVICE_ATTR_RO(uuid);
158 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
161 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
163 static DEVICE_ATTR_RO(eui);
165 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
168 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
170 static DEVICE_ATTR_RO(nsid);
172 static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
175 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
177 static DEVICE_ATTR_RO(csi);
179 static ssize_t metadata_bytes_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
182 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
184 static DEVICE_ATTR_RO(metadata_bytes);
186 static int ns_head_update_nuse(struct nvme_ns_head *head)
188 struct nvme_id_ns *id;
190 int srcu_idx, ret = -EWOULDBLOCK;
192 /* Avoid issuing commands too often by rate limiting the update */
193 if (!__ratelimit(&head->rs_nuse))
196 srcu_idx = srcu_read_lock(&head->srcu);
197 ns = nvme_find_path(head);
201 ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
205 head->nuse = le64_to_cpu(id->nuse);
209 srcu_read_unlock(&head->srcu, srcu_idx);
213 static int ns_update_nuse(struct nvme_ns *ns)
215 struct nvme_id_ns *id;
218 /* Avoid issuing commands too often by rate limiting the update. */
219 if (!__ratelimit(&ns->head->rs_nuse))
222 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
226 ns->head->nuse = le64_to_cpu(id->nuse);
231 static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
234 struct nvme_ns_head *head = dev_to_ns_head(dev);
235 struct gendisk *disk = dev_to_disk(dev);
236 struct block_device *bdev = disk->part0;
239 if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
240 bdev->bd_disk->fops == &nvme_ns_head_ops)
241 ret = ns_head_update_nuse(head);
243 ret = ns_update_nuse(bdev->bd_disk->private_data);
247 return sysfs_emit(buf, "%llu\n", head->nuse);
249 static DEVICE_ATTR_RO(nuse);
251 static struct attribute *nvme_ns_attrs[] = {
254 &dev_attr_nguid.attr,
258 &dev_attr_metadata_bytes.attr,
260 #ifdef CONFIG_NVME_MULTIPATH
261 &dev_attr_ana_grpid.attr,
262 &dev_attr_ana_state.attr,
264 &dev_attr_io_passthru_err_log_enabled.attr,
268 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
269 struct attribute *a, int n)
271 struct device *dev = container_of(kobj, struct device, kobj);
272 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
274 if (a == &dev_attr_uuid.attr) {
275 if (uuid_is_null(&ids->uuid) &&
276 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
279 if (a == &dev_attr_nguid.attr) {
280 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
283 if (a == &dev_attr_eui.attr) {
284 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
287 #ifdef CONFIG_NVME_MULTIPATH
288 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
290 if (nvme_disk_is_ns_head(dev_to_disk(dev)))
292 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
299 static const struct attribute_group nvme_ns_attr_group = {
300 .attrs = nvme_ns_attrs,
301 .is_visible = nvme_ns_attrs_are_visible,
304 const struct attribute_group *nvme_ns_attr_groups[] = {
309 #define nvme_show_str_function(field) \
310 static ssize_t field##_show(struct device *dev, \
311 struct device_attribute *attr, char *buf) \
313 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
314 return sysfs_emit(buf, "%.*s\n", \
315 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
317 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
319 nvme_show_str_function(model);
320 nvme_show_str_function(serial);
321 nvme_show_str_function(firmware_rev);
323 #define nvme_show_int_function(field) \
324 static ssize_t field##_show(struct device *dev, \
325 struct device_attribute *attr, char *buf) \
327 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
328 return sysfs_emit(buf, "%d\n", ctrl->field); \
330 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
332 nvme_show_int_function(cntlid);
333 nvme_show_int_function(numa_node);
334 nvme_show_int_function(queue_count);
335 nvme_show_int_function(sqsize);
336 nvme_show_int_function(kato);
338 static ssize_t nvme_sysfs_delete(struct device *dev,
339 struct device_attribute *attr, const char *buf,
342 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
344 if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
347 if (device_remove_file_self(dev, attr))
348 nvme_delete_ctrl_sync(ctrl);
351 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
353 static ssize_t nvme_sysfs_show_transport(struct device *dev,
354 struct device_attribute *attr,
357 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
359 return sysfs_emit(buf, "%s\n", ctrl->ops->name);
361 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
363 static ssize_t nvme_sysfs_show_state(struct device *dev,
364 struct device_attribute *attr,
367 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
368 unsigned state = (unsigned)nvme_ctrl_state(ctrl);
369 static const char *const state_name[] = {
370 [NVME_CTRL_NEW] = "new",
371 [NVME_CTRL_LIVE] = "live",
372 [NVME_CTRL_RESETTING] = "resetting",
373 [NVME_CTRL_CONNECTING] = "connecting",
374 [NVME_CTRL_DELETING] = "deleting",
375 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
376 [NVME_CTRL_DEAD] = "dead",
379 if (state < ARRAY_SIZE(state_name) && state_name[state])
380 return sysfs_emit(buf, "%s\n", state_name[state]);
382 return sysfs_emit(buf, "unknown state\n");
385 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
387 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
388 struct device_attribute *attr,
391 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
393 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
395 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
397 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
398 struct device_attribute *attr,
401 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
403 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
405 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
407 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
408 struct device_attribute *attr,
411 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
413 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
415 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
417 static ssize_t nvme_sysfs_show_address(struct device *dev,
418 struct device_attribute *attr,
421 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
423 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
425 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
427 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
428 struct device_attribute *attr, char *buf)
430 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
431 struct nvmf_ctrl_options *opts = ctrl->opts;
433 if (ctrl->opts->max_reconnects == -1)
434 return sysfs_emit(buf, "off\n");
435 return sysfs_emit(buf, "%d\n",
436 opts->max_reconnects * opts->reconnect_delay);
439 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
440 struct device_attribute *attr, const char *buf, size_t count)
442 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
443 struct nvmf_ctrl_options *opts = ctrl->opts;
444 int ctrl_loss_tmo, err;
446 err = kstrtoint(buf, 10, &ctrl_loss_tmo);
450 if (ctrl_loss_tmo < 0)
451 opts->max_reconnects = -1;
453 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
454 opts->reconnect_delay);
457 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
458 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
460 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
461 struct device_attribute *attr, char *buf)
463 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
465 if (ctrl->opts->reconnect_delay == -1)
466 return sysfs_emit(buf, "off\n");
467 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
470 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
471 struct device_attribute *attr, const char *buf, size_t count)
473 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
477 err = kstrtou32(buf, 10, &v);
481 ctrl->opts->reconnect_delay = v;
484 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
485 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
487 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
488 struct device_attribute *attr, char *buf)
490 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
492 if (ctrl->opts->fast_io_fail_tmo == -1)
493 return sysfs_emit(buf, "off\n");
494 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
497 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
498 struct device_attribute *attr, const char *buf, size_t count)
500 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
501 struct nvmf_ctrl_options *opts = ctrl->opts;
502 int fast_io_fail_tmo, err;
504 err = kstrtoint(buf, 10, &fast_io_fail_tmo);
508 if (fast_io_fail_tmo < 0)
509 opts->fast_io_fail_tmo = -1;
511 opts->fast_io_fail_tmo = fast_io_fail_tmo;
514 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
515 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
517 static ssize_t cntrltype_show(struct device *dev,
518 struct device_attribute *attr, char *buf)
520 static const char * const type[] = {
521 [NVME_CTRL_IO] = "io\n",
522 [NVME_CTRL_DISC] = "discovery\n",
523 [NVME_CTRL_ADMIN] = "admin\n",
525 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
527 if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
528 return sysfs_emit(buf, "reserved\n");
530 return sysfs_emit(buf, type[ctrl->cntrltype]);
532 static DEVICE_ATTR_RO(cntrltype);
534 static ssize_t dctype_show(struct device *dev,
535 struct device_attribute *attr, char *buf)
537 static const char * const type[] = {
538 [NVME_DCTYPE_NOT_REPORTED] = "none\n",
539 [NVME_DCTYPE_DDC] = "ddc\n",
540 [NVME_DCTYPE_CDC] = "cdc\n",
542 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
544 if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
545 return sysfs_emit(buf, "reserved\n");
547 return sysfs_emit(buf, type[ctrl->dctype]);
549 static DEVICE_ATTR_RO(dctype);
551 #ifdef CONFIG_NVME_HOST_AUTH
552 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
553 struct device_attribute *attr, char *buf)
555 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
556 struct nvmf_ctrl_options *opts = ctrl->opts;
558 if (!opts->dhchap_secret)
559 return sysfs_emit(buf, "none\n");
560 return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
563 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
564 struct device_attribute *attr, const char *buf, size_t count)
566 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
567 struct nvmf_ctrl_options *opts = ctrl->opts;
570 if (!ctrl->opts->dhchap_secret)
574 if (memcmp(buf, "DHHC-1:", 7))
577 dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
580 memcpy(dhchap_secret, buf, count);
581 nvme_auth_stop(ctrl);
582 if (strcmp(dhchap_secret, opts->dhchap_secret)) {
583 struct nvme_dhchap_key *key, *host_key;
586 ret = nvme_auth_generate_key(dhchap_secret, &key);
588 kfree(dhchap_secret);
591 kfree(opts->dhchap_secret);
592 opts->dhchap_secret = dhchap_secret;
593 host_key = ctrl->host_key;
594 mutex_lock(&ctrl->dhchap_auth_mutex);
595 ctrl->host_key = key;
596 mutex_unlock(&ctrl->dhchap_auth_mutex);
597 nvme_auth_free_key(host_key);
599 kfree(dhchap_secret);
600 /* Start re-authentication */
601 dev_info(ctrl->device, "re-authenticating controller\n");
602 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
607 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
608 nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
610 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
611 struct device_attribute *attr, char *buf)
613 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
614 struct nvmf_ctrl_options *opts = ctrl->opts;
616 if (!opts->dhchap_ctrl_secret)
617 return sysfs_emit(buf, "none\n");
618 return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
621 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
622 struct device_attribute *attr, const char *buf, size_t count)
624 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
625 struct nvmf_ctrl_options *opts = ctrl->opts;
628 if (!ctrl->opts->dhchap_ctrl_secret)
632 if (memcmp(buf, "DHHC-1:", 7))
635 dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
638 memcpy(dhchap_secret, buf, count);
639 nvme_auth_stop(ctrl);
640 if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
641 struct nvme_dhchap_key *key, *ctrl_key;
644 ret = nvme_auth_generate_key(dhchap_secret, &key);
646 kfree(dhchap_secret);
649 kfree(opts->dhchap_ctrl_secret);
650 opts->dhchap_ctrl_secret = dhchap_secret;
651 ctrl_key = ctrl->ctrl_key;
652 mutex_lock(&ctrl->dhchap_auth_mutex);
653 ctrl->ctrl_key = key;
654 mutex_unlock(&ctrl->dhchap_auth_mutex);
655 nvme_auth_free_key(ctrl_key);
657 kfree(dhchap_secret);
658 /* Start re-authentication */
659 dev_info(ctrl->device, "re-authenticating controller\n");
660 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
665 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
666 nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
669 #ifdef CONFIG_NVME_TCP_TLS
670 static ssize_t tls_key_show(struct device *dev,
671 struct device_attribute *attr, char *buf)
673 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
677 return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key));
679 static DEVICE_ATTR_RO(tls_key);
682 static struct attribute *nvme_dev_attrs[] = {
683 &dev_attr_reset_controller.attr,
684 &dev_attr_rescan_controller.attr,
685 &dev_attr_model.attr,
686 &dev_attr_serial.attr,
687 &dev_attr_firmware_rev.attr,
688 &dev_attr_cntlid.attr,
689 &dev_attr_delete_controller.attr,
690 &dev_attr_transport.attr,
691 &dev_attr_subsysnqn.attr,
692 &dev_attr_address.attr,
693 &dev_attr_state.attr,
694 &dev_attr_numa_node.attr,
695 &dev_attr_queue_count.attr,
696 &dev_attr_sqsize.attr,
697 &dev_attr_hostnqn.attr,
698 &dev_attr_hostid.attr,
699 &dev_attr_ctrl_loss_tmo.attr,
700 &dev_attr_reconnect_delay.attr,
701 &dev_attr_fast_io_fail_tmo.attr,
703 &dev_attr_cntrltype.attr,
704 &dev_attr_dctype.attr,
705 #ifdef CONFIG_NVME_HOST_AUTH
706 &dev_attr_dhchap_secret.attr,
707 &dev_attr_dhchap_ctrl_secret.attr,
709 #ifdef CONFIG_NVME_TCP_TLS
710 &dev_attr_tls_key.attr,
712 &dev_attr_adm_passthru_err_log_enabled.attr,
716 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
717 struct attribute *a, int n)
719 struct device *dev = container_of(kobj, struct device, kobj);
720 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
722 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
724 if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
726 if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
728 if (a == &dev_attr_hostid.attr && !ctrl->opts)
730 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
732 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
734 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
736 #ifdef CONFIG_NVME_HOST_AUTH
737 if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
739 if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
742 #ifdef CONFIG_NVME_TCP_TLS
743 if (a == &dev_attr_tls_key.attr &&
744 (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")))
751 const struct attribute_group nvme_dev_attrs_group = {
752 .attrs = nvme_dev_attrs,
753 .is_visible = nvme_dev_attrs_are_visible,
755 EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
757 const struct attribute_group *nvme_dev_attr_groups[] = {
758 &nvme_dev_attrs_group,
762 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
763 struct device_attribute subsys_attr_##_name = \
764 __ATTR(_name, _mode, _show, NULL)
766 static ssize_t nvme_subsys_show_nqn(struct device *dev,
767 struct device_attribute *attr,
770 struct nvme_subsystem *subsys =
771 container_of(dev, struct nvme_subsystem, dev);
773 return sysfs_emit(buf, "%s\n", subsys->subnqn);
775 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
777 static ssize_t nvme_subsys_show_type(struct device *dev,
778 struct device_attribute *attr,
781 struct nvme_subsystem *subsys =
782 container_of(dev, struct nvme_subsystem, dev);
784 switch (subsys->subtype) {
786 return sysfs_emit(buf, "discovery\n");
788 return sysfs_emit(buf, "nvm\n");
790 return sysfs_emit(buf, "reserved\n");
793 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
795 #define nvme_subsys_show_str_function(field) \
796 static ssize_t subsys_##field##_show(struct device *dev, \
797 struct device_attribute *attr, char *buf) \
799 struct nvme_subsystem *subsys = \
800 container_of(dev, struct nvme_subsystem, dev); \
801 return sysfs_emit(buf, "%.*s\n", \
802 (int)sizeof(subsys->field), subsys->field); \
804 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
806 nvme_subsys_show_str_function(model);
807 nvme_subsys_show_str_function(serial);
808 nvme_subsys_show_str_function(firmware_rev);
810 static struct attribute *nvme_subsys_attrs[] = {
811 &subsys_attr_model.attr,
812 &subsys_attr_serial.attr,
813 &subsys_attr_firmware_rev.attr,
814 &subsys_attr_subsysnqn.attr,
815 &subsys_attr_subsystype.attr,
816 #ifdef CONFIG_NVME_MULTIPATH
817 &subsys_attr_iopolicy.attr,
822 static const struct attribute_group nvme_subsys_attrs_group = {
823 .attrs = nvme_subsys_attrs,
826 const struct attribute_group *nvme_subsys_attrs_groups[] = {
827 &nvme_subsys_attrs_group,