firewire: core: add memo about the caller of show functions for device attributes
[sfrench/cifs-2.6.git] / drivers / s390 / block / dasd_eckd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *                  Horst Hummel <Horst.Hummel@de.ibm.com>
5  *                  Carsten Otte <Cotte@de.ibm.com>
6  *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12
13 #include <linux/stddef.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/hdreg.h>        /* HDIO_GETGEO                      */
17 #include <linux/bio.h>
18 #include <linux/module.h>
19 #include <linux/compat.h>
20 #include <linux/init.h>
21 #include <linux/seq_file.h>
22 #include <linux/uaccess.h>
23 #include <linux/io.h>
24
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
29 #include <asm/cio.h>
30 #include <asm/ccwdev.h>
31 #include <asm/itcw.h>
32 #include <asm/schid.h>
33 #include <asm/chpid.h>
34
35 #include "dasd_int.h"
36 #include "dasd_eckd.h"
37
38 /*
39  * raw track access always map to 64k in memory
40  * so it maps to 16 blocks of 4k per track
41  */
42 #define DASD_RAW_BLOCK_PER_TRACK 16
43 #define DASD_RAW_BLOCKSIZE 4096
44 /* 64k are 128 x 512 byte sectors  */
45 #define DASD_RAW_SECTORS_PER_TRACK 128
46
47 MODULE_LICENSE("GPL");
48
49 static struct dasd_discipline dasd_eckd_discipline;
50
51 /* The ccw bus type uses this table to find devices that it sends to
52  * dasd_eckd_probe */
53 static struct ccw_device_id dasd_eckd_ids[] = {
54         { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
55         { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
56         { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
57         { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
58         { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
59         { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
60         { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
61         { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
62         { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
63         { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
64         { /* end of list */ },
65 };
66
67 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
68
69 static struct ccw_driver dasd_eckd_driver; /* see below */
70
71 static void *rawpadpage;
72
73 #define INIT_CQR_OK 0
74 #define INIT_CQR_UNFORMATTED 1
75 #define INIT_CQR_ERROR 2
76
77 /* emergency request for reserve/release */
78 static struct {
79         struct dasd_ccw_req cqr;
80         struct ccw1 ccw;
81         char data[32];
82 } *dasd_reserve_req;
83 static DEFINE_MUTEX(dasd_reserve_mutex);
84
85 static struct {
86         struct dasd_ccw_req cqr;
87         struct ccw1 ccw[2];
88         char data[40];
89 } *dasd_vol_info_req;
90 static DEFINE_MUTEX(dasd_vol_info_mutex);
91
92 struct ext_pool_exhaust_work_data {
93         struct work_struct worker;
94         struct dasd_device *device;
95         struct dasd_device *base;
96 };
97
98 /* definitions for the path verification worker */
99 struct pe_handler_work_data {
100         struct work_struct worker;
101         struct dasd_device *device;
102         struct dasd_ccw_req cqr;
103         struct ccw1 ccw;
104         __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
105         int isglobal;
106         __u8 tbvpm;
107         __u8 fcsecpm;
108 };
109 static struct pe_handler_work_data *pe_handler_worker;
110 static DEFINE_MUTEX(dasd_pe_handler_mutex);
111
112 struct check_attention_work_data {
113         struct work_struct worker;
114         struct dasd_device *device;
115         __u8 lpum;
116 };
117
118 static int dasd_eckd_ext_pool_id(struct dasd_device *);
119 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
120                         struct dasd_device *, struct dasd_device *,
121                         unsigned int, int, unsigned int, unsigned int,
122                         unsigned int, unsigned int);
123 static int dasd_eckd_query_pprc_status(struct dasd_device *,
124                                        struct dasd_pprc_data_sc4 *);
125
126 /* initial attempt at a probe function. this can be simplified once
127  * the other detection code is gone */
128 static int
129 dasd_eckd_probe (struct ccw_device *cdev)
130 {
131         int ret;
132
133         /* set ECKD specific ccw-device options */
134         ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
135                                      CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
136         if (ret) {
137                 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
138                                 "dasd_eckd_probe: could not set "
139                                 "ccw-device options");
140                 return ret;
141         }
142         ret = dasd_generic_probe(cdev);
143         return ret;
144 }
145
146 static int
147 dasd_eckd_set_online(struct ccw_device *cdev)
148 {
149         return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
150 }
151
152 static const int sizes_trk0[] = { 28, 148, 84 };
153 #define LABEL_SIZE 140
154
155 /* head and record addresses of count_area read in analysis ccw */
156 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
157 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
158
159 static inline unsigned int
160 ceil_quot(unsigned int d1, unsigned int d2)
161 {
162         return (d1 + (d2 - 1)) / d2;
163 }
164
165 static unsigned int
166 recs_per_track(struct dasd_eckd_characteristics * rdc,
167                unsigned int kl, unsigned int dl)
168 {
169         int dn, kn;
170
171         switch (rdc->dev_type) {
172         case 0x3380:
173                 if (kl)
174                         return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
175                                        ceil_quot(dl + 12, 32));
176                 else
177                         return 1499 / (15 + ceil_quot(dl + 12, 32));
178         case 0x3390:
179                 dn = ceil_quot(dl + 6, 232) + 1;
180                 if (kl) {
181                         kn = ceil_quot(kl + 6, 232) + 1;
182                         return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
183                                        9 + ceil_quot(dl + 6 * dn, 34));
184                 } else
185                         return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
186         case 0x9345:
187                 dn = ceil_quot(dl + 6, 232) + 1;
188                 if (kl) {
189                         kn = ceil_quot(kl + 6, 232) + 1;
190                         return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
191                                        ceil_quot(dl + 6 * dn, 34));
192                 } else
193                         return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
194         }
195         return 0;
196 }
197
198 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
199 {
200         geo->cyl = (__u16) cyl;
201         geo->head = cyl >> 16;
202         geo->head <<= 4;
203         geo->head |= head;
204 }
205
206 /*
207  * calculate failing track from sense data depending if
208  * it is an EAV device or not
209  */
210 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
211                                     sector_t *track)
212 {
213         struct dasd_eckd_private *private = device->private;
214         u8 *sense = NULL;
215         u32 cyl;
216         u8 head;
217
218         sense = dasd_get_sense(irb);
219         if (!sense) {
220                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
221                               "ESE error no sense data\n");
222                 return -EINVAL;
223         }
224         if (!(sense[27] & DASD_SENSE_BIT_2)) {
225                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
226                               "ESE error no valid track data\n");
227                 return -EINVAL;
228         }
229
230         if (sense[27] & DASD_SENSE_BIT_3) {
231                 /* enhanced addressing */
232                 cyl = sense[30] << 20;
233                 cyl |= (sense[31] & 0xF0) << 12;
234                 cyl |= sense[28] << 8;
235                 cyl |= sense[29];
236         } else {
237                 cyl = sense[29] << 8;
238                 cyl |= sense[30];
239         }
240         head = sense[31] & 0x0F;
241         *track = cyl * private->rdc_data.trk_per_cyl + head;
242         return 0;
243 }
244
245 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
246                      struct dasd_device *device)
247 {
248         struct dasd_eckd_private *private = device->private;
249         int rc;
250
251         rc = get_phys_clock(&data->ep_sys_time);
252         /*
253          * Ignore return code if XRC is not supported or
254          * sync clock is switched off
255          */
256         if ((rc && !private->rdc_data.facilities.XRC_supported) ||
257             rc == -EOPNOTSUPP || rc == -EACCES)
258                 return 0;
259
260         /* switch on System Time Stamp - needed for XRC Support */
261         data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
262         data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
263
264         if (ccw) {
265                 ccw->count = sizeof(struct DE_eckd_data);
266                 ccw->flags |= CCW_FLAG_SLI;
267         }
268
269         return rc;
270 }
271
272 static int
273 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
274               unsigned int totrk, int cmd, struct dasd_device *device,
275               int blksize)
276 {
277         struct dasd_eckd_private *private = device->private;
278         u16 heads, beghead, endhead;
279         u32 begcyl, endcyl;
280         int rc = 0;
281
282         if (ccw) {
283                 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
284                 ccw->flags = 0;
285                 ccw->count = 16;
286                 ccw->cda = (__u32)virt_to_phys(data);
287         }
288
289         memset(data, 0, sizeof(struct DE_eckd_data));
290         switch (cmd) {
291         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
292         case DASD_ECKD_CCW_READ_RECORD_ZERO:
293         case DASD_ECKD_CCW_READ:
294         case DASD_ECKD_CCW_READ_MT:
295         case DASD_ECKD_CCW_READ_CKD:
296         case DASD_ECKD_CCW_READ_CKD_MT:
297         case DASD_ECKD_CCW_READ_KD:
298         case DASD_ECKD_CCW_READ_KD_MT:
299                 data->mask.perm = 0x1;
300                 data->attributes.operation = private->attrib.operation;
301                 break;
302         case DASD_ECKD_CCW_READ_COUNT:
303                 data->mask.perm = 0x1;
304                 data->attributes.operation = DASD_BYPASS_CACHE;
305                 break;
306         case DASD_ECKD_CCW_READ_TRACK:
307         case DASD_ECKD_CCW_READ_TRACK_DATA:
308                 data->mask.perm = 0x1;
309                 data->attributes.operation = private->attrib.operation;
310                 data->blk_size = 0;
311                 break;
312         case DASD_ECKD_CCW_WRITE:
313         case DASD_ECKD_CCW_WRITE_MT:
314         case DASD_ECKD_CCW_WRITE_KD:
315         case DASD_ECKD_CCW_WRITE_KD_MT:
316                 data->mask.perm = 0x02;
317                 data->attributes.operation = private->attrib.operation;
318                 rc = set_timestamp(ccw, data, device);
319                 break;
320         case DASD_ECKD_CCW_WRITE_CKD:
321         case DASD_ECKD_CCW_WRITE_CKD_MT:
322                 data->attributes.operation = DASD_BYPASS_CACHE;
323                 rc = set_timestamp(ccw, data, device);
324                 break;
325         case DASD_ECKD_CCW_ERASE:
326         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
327         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
328                 data->mask.perm = 0x3;
329                 data->mask.auth = 0x1;
330                 data->attributes.operation = DASD_BYPASS_CACHE;
331                 rc = set_timestamp(ccw, data, device);
332                 break;
333         case DASD_ECKD_CCW_WRITE_FULL_TRACK:
334                 data->mask.perm = 0x03;
335                 data->attributes.operation = private->attrib.operation;
336                 data->blk_size = 0;
337                 break;
338         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
339                 data->mask.perm = 0x02;
340                 data->attributes.operation = private->attrib.operation;
341                 data->blk_size = blksize;
342                 rc = set_timestamp(ccw, data, device);
343                 break;
344         default:
345                 dev_err(&device->cdev->dev,
346                         "0x%x is not a known command\n", cmd);
347                 break;
348         }
349
350         data->attributes.mode = 0x3;    /* ECKD */
351
352         if ((private->rdc_data.cu_type == 0x2105 ||
353              private->rdc_data.cu_type == 0x2107 ||
354              private->rdc_data.cu_type == 0x1750)
355             && !(private->uses_cdl && trk < 2))
356                 data->ga_extended |= 0x40; /* Regular Data Format Mode */
357
358         heads = private->rdc_data.trk_per_cyl;
359         begcyl = trk / heads;
360         beghead = trk % heads;
361         endcyl = totrk / heads;
362         endhead = totrk % heads;
363
364         /* check for sequential prestage - enhance cylinder range */
365         if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
366             data->attributes.operation == DASD_SEQ_ACCESS) {
367
368                 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
369                         endcyl += private->attrib.nr_cyl;
370                 else
371                         endcyl = (private->real_cyl - 1);
372         }
373
374         set_ch_t(&data->beg_ext, begcyl, beghead);
375         set_ch_t(&data->end_ext, endcyl, endhead);
376         return rc;
377 }
378
379
380 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
381                               unsigned int trk, unsigned int rec_on_trk,
382                               int count, int cmd, struct dasd_device *device,
383                               unsigned int reclen, unsigned int tlf)
384 {
385         struct dasd_eckd_private *private = device->private;
386         int sector;
387         int dn, d;
388
389         if (ccw) {
390                 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
391                 ccw->flags = 0;
392                 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
393                         ccw->count = 22;
394                 else
395                         ccw->count = 20;
396                 ccw->cda = (__u32)virt_to_phys(data);
397         }
398
399         memset(data, 0, sizeof(*data));
400         sector = 0;
401         if (rec_on_trk) {
402                 switch (private->rdc_data.dev_type) {
403                 case 0x3390:
404                         dn = ceil_quot(reclen + 6, 232);
405                         d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
406                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
407                         break;
408                 case 0x3380:
409                         d = 7 + ceil_quot(reclen + 12, 32);
410                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
411                         break;
412                 }
413         }
414         data->sector = sector;
415         /* note: meaning of count depends on the operation
416          *       for record based I/O it's the number of records, but for
417          *       track based I/O it's the number of tracks
418          */
419         data->count = count;
420         switch (cmd) {
421         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
422                 data->operation.orientation = 0x3;
423                 data->operation.operation = 0x03;
424                 break;
425         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
426                 data->operation.orientation = 0x3;
427                 data->operation.operation = 0x16;
428                 break;
429         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
430                 data->operation.orientation = 0x1;
431                 data->operation.operation = 0x03;
432                 data->count++;
433                 break;
434         case DASD_ECKD_CCW_READ_RECORD_ZERO:
435                 data->operation.orientation = 0x3;
436                 data->operation.operation = 0x16;
437                 data->count++;
438                 break;
439         case DASD_ECKD_CCW_WRITE:
440         case DASD_ECKD_CCW_WRITE_MT:
441         case DASD_ECKD_CCW_WRITE_KD:
442         case DASD_ECKD_CCW_WRITE_KD_MT:
443                 data->auxiliary.length_valid = 0x1;
444                 data->length = reclen;
445                 data->operation.operation = 0x01;
446                 break;
447         case DASD_ECKD_CCW_WRITE_CKD:
448         case DASD_ECKD_CCW_WRITE_CKD_MT:
449                 data->auxiliary.length_valid = 0x1;
450                 data->length = reclen;
451                 data->operation.operation = 0x03;
452                 break;
453         case DASD_ECKD_CCW_WRITE_FULL_TRACK:
454                 data->operation.orientation = 0x0;
455                 data->operation.operation = 0x3F;
456                 data->extended_operation = 0x11;
457                 data->length = 0;
458                 data->extended_parameter_length = 0x02;
459                 if (data->count > 8) {
460                         data->extended_parameter[0] = 0xFF;
461                         data->extended_parameter[1] = 0xFF;
462                         data->extended_parameter[1] <<= (16 - count);
463                 } else {
464                         data->extended_parameter[0] = 0xFF;
465                         data->extended_parameter[0] <<= (8 - count);
466                         data->extended_parameter[1] = 0x00;
467                 }
468                 data->sector = 0xFF;
469                 break;
470         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
471                 data->auxiliary.length_valid = 0x1;
472                 data->length = reclen;  /* not tlf, as one might think */
473                 data->operation.operation = 0x3F;
474                 data->extended_operation = 0x23;
475                 break;
476         case DASD_ECKD_CCW_READ:
477         case DASD_ECKD_CCW_READ_MT:
478         case DASD_ECKD_CCW_READ_KD:
479         case DASD_ECKD_CCW_READ_KD_MT:
480                 data->auxiliary.length_valid = 0x1;
481                 data->length = reclen;
482                 data->operation.operation = 0x06;
483                 break;
484         case DASD_ECKD_CCW_READ_CKD:
485         case DASD_ECKD_CCW_READ_CKD_MT:
486                 data->auxiliary.length_valid = 0x1;
487                 data->length = reclen;
488                 data->operation.operation = 0x16;
489                 break;
490         case DASD_ECKD_CCW_READ_COUNT:
491                 data->operation.operation = 0x06;
492                 break;
493         case DASD_ECKD_CCW_READ_TRACK:
494                 data->operation.orientation = 0x1;
495                 data->operation.operation = 0x0C;
496                 data->extended_parameter_length = 0;
497                 data->sector = 0xFF;
498                 break;
499         case DASD_ECKD_CCW_READ_TRACK_DATA:
500                 data->auxiliary.length_valid = 0x1;
501                 data->length = tlf;
502                 data->operation.operation = 0x0C;
503                 break;
504         case DASD_ECKD_CCW_ERASE:
505                 data->length = reclen;
506                 data->auxiliary.length_valid = 0x1;
507                 data->operation.operation = 0x0b;
508                 break;
509         default:
510                 DBF_DEV_EVENT(DBF_ERR, device,
511                             "fill LRE unknown opcode 0x%x", cmd);
512                 BUG();
513         }
514         set_ch_t(&data->seek_addr,
515                  trk / private->rdc_data.trk_per_cyl,
516                  trk % private->rdc_data.trk_per_cyl);
517         data->search_arg.cyl = data->seek_addr.cyl;
518         data->search_arg.head = data->seek_addr.head;
519         data->search_arg.record = rec_on_trk;
520 }
521
522 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
523                       unsigned int trk, unsigned int totrk, int cmd,
524                       struct dasd_device *basedev, struct dasd_device *startdev,
525                       unsigned int format, unsigned int rec_on_trk, int count,
526                       unsigned int blksize, unsigned int tlf)
527 {
528         struct dasd_eckd_private *basepriv, *startpriv;
529         struct LRE_eckd_data *lredata;
530         struct DE_eckd_data *dedata;
531         int rc = 0;
532
533         basepriv = basedev->private;
534         startpriv = startdev->private;
535         dedata = &pfxdata->define_extent;
536         lredata = &pfxdata->locate_record;
537
538         ccw->cmd_code = DASD_ECKD_CCW_PFX;
539         ccw->flags = 0;
540         if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
541                 ccw->count = sizeof(*pfxdata) + 2;
542                 ccw->cda = (__u32)virt_to_phys(pfxdata);
543                 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
544         } else {
545                 ccw->count = sizeof(*pfxdata);
546                 ccw->cda = (__u32)virt_to_phys(pfxdata);
547                 memset(pfxdata, 0, sizeof(*pfxdata));
548         }
549
550         /* prefix data */
551         if (format > 1) {
552                 DBF_DEV_EVENT(DBF_ERR, basedev,
553                               "PFX LRE unknown format 0x%x", format);
554                 BUG();
555                 return -EINVAL;
556         }
557         pfxdata->format = format;
558         pfxdata->base_address = basepriv->conf.ned->unit_addr;
559         pfxdata->base_lss = basepriv->conf.ned->ID;
560         pfxdata->validity.define_extent = 1;
561
562         /* private uid is kept up to date, conf_data may be outdated */
563         if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
564                 pfxdata->validity.verify_base = 1;
565
566         if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
567                 pfxdata->validity.verify_base = 1;
568                 pfxdata->validity.hyper_pav = 1;
569         }
570
571         rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
572
573         /*
574          * For some commands the System Time Stamp is set in the define extent
575          * data when XRC is supported. The validity of the time stamp must be
576          * reflected in the prefix data as well.
577          */
578         if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
579                 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid'   */
580
581         if (format == 1) {
582                 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
583                                   basedev, blksize, tlf);
584         }
585
586         return rc;
587 }
588
589 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
590                   unsigned int trk, unsigned int totrk, int cmd,
591                   struct dasd_device *basedev, struct dasd_device *startdev)
592 {
593         return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
594                           0, 0, 0, 0, 0);
595 }
596
597 static void
598 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
599               unsigned int rec_on_trk, int no_rec, int cmd,
600               struct dasd_device * device, int reclen)
601 {
602         struct dasd_eckd_private *private = device->private;
603         int sector;
604         int dn, d;
605
606         DBF_DEV_EVENT(DBF_INFO, device,
607                   "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
608                   trk, rec_on_trk, no_rec, cmd, reclen);
609
610         ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
611         ccw->flags = 0;
612         ccw->count = 16;
613         ccw->cda = (__u32)virt_to_phys(data);
614
615         memset(data, 0, sizeof(struct LO_eckd_data));
616         sector = 0;
617         if (rec_on_trk) {
618                 switch (private->rdc_data.dev_type) {
619                 case 0x3390:
620                         dn = ceil_quot(reclen + 6, 232);
621                         d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
622                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
623                         break;
624                 case 0x3380:
625                         d = 7 + ceil_quot(reclen + 12, 32);
626                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
627                         break;
628                 }
629         }
630         data->sector = sector;
631         data->count = no_rec;
632         switch (cmd) {
633         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
634                 data->operation.orientation = 0x3;
635                 data->operation.operation = 0x03;
636                 break;
637         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
638                 data->operation.orientation = 0x3;
639                 data->operation.operation = 0x16;
640                 break;
641         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
642                 data->operation.orientation = 0x1;
643                 data->operation.operation = 0x03;
644                 data->count++;
645                 break;
646         case DASD_ECKD_CCW_READ_RECORD_ZERO:
647                 data->operation.orientation = 0x3;
648                 data->operation.operation = 0x16;
649                 data->count++;
650                 break;
651         case DASD_ECKD_CCW_WRITE:
652         case DASD_ECKD_CCW_WRITE_MT:
653         case DASD_ECKD_CCW_WRITE_KD:
654         case DASD_ECKD_CCW_WRITE_KD_MT:
655                 data->auxiliary.last_bytes_used = 0x1;
656                 data->length = reclen;
657                 data->operation.operation = 0x01;
658                 break;
659         case DASD_ECKD_CCW_WRITE_CKD:
660         case DASD_ECKD_CCW_WRITE_CKD_MT:
661                 data->auxiliary.last_bytes_used = 0x1;
662                 data->length = reclen;
663                 data->operation.operation = 0x03;
664                 break;
665         case DASD_ECKD_CCW_READ:
666         case DASD_ECKD_CCW_READ_MT:
667         case DASD_ECKD_CCW_READ_KD:
668         case DASD_ECKD_CCW_READ_KD_MT:
669                 data->auxiliary.last_bytes_used = 0x1;
670                 data->length = reclen;
671                 data->operation.operation = 0x06;
672                 break;
673         case DASD_ECKD_CCW_READ_CKD:
674         case DASD_ECKD_CCW_READ_CKD_MT:
675                 data->auxiliary.last_bytes_used = 0x1;
676                 data->length = reclen;
677                 data->operation.operation = 0x16;
678                 break;
679         case DASD_ECKD_CCW_READ_COUNT:
680                 data->operation.operation = 0x06;
681                 break;
682         case DASD_ECKD_CCW_ERASE:
683                 data->length = reclen;
684                 data->auxiliary.last_bytes_used = 0x1;
685                 data->operation.operation = 0x0b;
686                 break;
687         default:
688                 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
689                               "opcode 0x%x", cmd);
690         }
691         set_ch_t(&data->seek_addr,
692                  trk / private->rdc_data.trk_per_cyl,
693                  trk % private->rdc_data.trk_per_cyl);
694         data->search_arg.cyl = data->seek_addr.cyl;
695         data->search_arg.head = data->seek_addr.head;
696         data->search_arg.record = rec_on_trk;
697 }
698
699 /*
700  * Returns 1 if the block is one of the special blocks that needs
701  * to get read/written with the KD variant of the command.
702  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
703  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
704  * Luckily the KD variants differ only by one bit (0x08) from the
705  * normal variant. So don't wonder about code like:
706  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
707  *         ccw->cmd_code |= 0x8;
708  */
709 static inline int
710 dasd_eckd_cdl_special(int blk_per_trk, int recid)
711 {
712         if (recid < 3)
713                 return 1;
714         if (recid < blk_per_trk)
715                 return 0;
716         if (recid < 2 * blk_per_trk)
717                 return 1;
718         return 0;
719 }
720
721 /*
722  * Returns the record size for the special blocks of the cdl format.
723  * Only returns something useful if dasd_eckd_cdl_special is true
724  * for the recid.
725  */
726 static inline int
727 dasd_eckd_cdl_reclen(int recid)
728 {
729         if (recid < 3)
730                 return sizes_trk0[recid];
731         return LABEL_SIZE;
732 }
733 /* create unique id from private structure. */
734 static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
735 {
736         int count;
737
738         memset(uid, 0, sizeof(struct dasd_uid));
739         memcpy(uid->vendor, conf->ned->HDA_manufacturer,
740                sizeof(uid->vendor) - 1);
741         EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
742         memcpy(uid->serial, &conf->ned->serial,
743                sizeof(uid->serial) - 1);
744         EBCASC(uid->serial, sizeof(uid->serial) - 1);
745         uid->ssid = conf->gneq->subsystemID;
746         uid->real_unit_addr = conf->ned->unit_addr;
747         if (conf->sneq) {
748                 uid->type = conf->sneq->sua_flags;
749                 if (uid->type == UA_BASE_PAV_ALIAS)
750                         uid->base_unit_addr = conf->sneq->base_unit_addr;
751         } else {
752                 uid->type = UA_BASE_DEVICE;
753         }
754         if (conf->vdsneq) {
755                 for (count = 0; count < 16; count++) {
756                         sprintf(uid->vduit+2*count, "%02x",
757                                 conf->vdsneq->uit[count]);
758                 }
759         }
760 }
761
762 /*
763  * Generate device unique id that specifies the physical device.
764  */
765 static int dasd_eckd_generate_uid(struct dasd_device *device)
766 {
767         struct dasd_eckd_private *private = device->private;
768         unsigned long flags;
769
770         if (!private)
771                 return -ENODEV;
772         if (!private->conf.ned || !private->conf.gneq)
773                 return -ENODEV;
774         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
775         create_uid(&private->conf, &private->uid);
776         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
777         return 0;
778 }
779
780 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
781 {
782         struct dasd_eckd_private *private = device->private;
783         unsigned long flags;
784
785         if (private) {
786                 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
787                 *uid = private->uid;
788                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
789                 return 0;
790         }
791         return -EINVAL;
792 }
793
794 /*
795  * compare device UID with data of a given dasd_eckd_private structure
796  * return 0 for match
797  */
798 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
799                                       struct dasd_conf *path_conf)
800 {
801         struct dasd_uid device_uid;
802         struct dasd_uid path_uid;
803
804         create_uid(path_conf, &path_uid);
805         dasd_eckd_get_uid(device, &device_uid);
806
807         return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
808 }
809
810 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
811                                    struct dasd_ccw_req *cqr,
812                                    __u8 *rcd_buffer,
813                                    __u8 lpm)
814 {
815         struct ccw1 *ccw;
816         /*
817          * buffer has to start with EBCDIC "V1.0" to show
818          * support for virtual device SNEQ
819          */
820         rcd_buffer[0] = 0xE5;
821         rcd_buffer[1] = 0xF1;
822         rcd_buffer[2] = 0x4B;
823         rcd_buffer[3] = 0xF0;
824
825         ccw = cqr->cpaddr;
826         ccw->cmd_code = DASD_ECKD_CCW_RCD;
827         ccw->flags = 0;
828         ccw->cda = (__u32)virt_to_phys(rcd_buffer);
829         ccw->count = DASD_ECKD_RCD_DATA_SIZE;
830         cqr->magic = DASD_ECKD_MAGIC;
831
832         cqr->startdev = device;
833         cqr->memdev = device;
834         cqr->block = NULL;
835         cqr->expires = 10*HZ;
836         cqr->lpm = lpm;
837         cqr->retries = 256;
838         cqr->buildclk = get_tod_clock();
839         cqr->status = DASD_CQR_FILLED;
840         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
841 }
842
843 /*
844  * Wakeup helper for read_conf
845  * if the cqr is not done and needs some error recovery
846  * the buffer has to be re-initialized with the EBCDIC "V1.0"
847  * to show support for virtual device SNEQ
848  */
849 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
850 {
851         struct ccw1 *ccw;
852         __u8 *rcd_buffer;
853
854         if (cqr->status !=  DASD_CQR_DONE) {
855                 ccw = cqr->cpaddr;
856                 rcd_buffer = phys_to_virt(ccw->cda);
857                 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
858
859                 rcd_buffer[0] = 0xE5;
860                 rcd_buffer[1] = 0xF1;
861                 rcd_buffer[2] = 0x4B;
862                 rcd_buffer[3] = 0xF0;
863         }
864         dasd_wakeup_cb(cqr, data);
865 }
866
867 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
868                                            struct dasd_ccw_req *cqr,
869                                            __u8 *rcd_buffer,
870                                            __u8 lpm)
871 {
872         struct ciw *ciw;
873         int rc;
874         /*
875          * sanity check: scan for RCD command in extended SenseID data
876          * some devices do not support RCD
877          */
878         ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
879         if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
880                 return -EOPNOTSUPP;
881
882         dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
883         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
884         set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
885         cqr->retries = 5;
886         cqr->callback = read_conf_cb;
887         rc = dasd_sleep_on_immediatly(cqr);
888         return rc;
889 }
890
891 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
892                                    void **rcd_buffer,
893                                    int *rcd_buffer_size, __u8 lpm)
894 {
895         struct ciw *ciw;
896         char *rcd_buf = NULL;
897         int ret;
898         struct dasd_ccw_req *cqr;
899
900         /*
901          * sanity check: scan for RCD command in extended SenseID data
902          * some devices do not support RCD
903          */
904         ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
905         if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
906                 ret = -EOPNOTSUPP;
907                 goto out_error;
908         }
909         rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
910         if (!rcd_buf) {
911                 ret = -ENOMEM;
912                 goto out_error;
913         }
914         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
915                                    0, /* use rcd_buf as data ara */
916                                    device, NULL);
917         if (IS_ERR(cqr)) {
918                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
919                               "Could not allocate RCD request");
920                 ret = -ENOMEM;
921                 goto out_error;
922         }
923         dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
924         cqr->callback = read_conf_cb;
925         ret = dasd_sleep_on(cqr);
926         /*
927          * on success we update the user input parms
928          */
929         dasd_sfree_request(cqr, cqr->memdev);
930         if (ret)
931                 goto out_error;
932
933         *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
934         *rcd_buffer = rcd_buf;
935         return 0;
936 out_error:
937         kfree(rcd_buf);
938         *rcd_buffer = NULL;
939         *rcd_buffer_size = 0;
940         return ret;
941 }
942
943 static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
944 {
945
946         struct dasd_sneq *sneq;
947         int i, count;
948
949         conf->ned = NULL;
950         conf->sneq = NULL;
951         conf->vdsneq = NULL;
952         conf->gneq = NULL;
953         count = conf->len / sizeof(struct dasd_sneq);
954         sneq = (struct dasd_sneq *)conf->data;
955         for (i = 0; i < count; ++i) {
956                 if (sneq->flags.identifier == 1 && sneq->format == 1)
957                         conf->sneq = sneq;
958                 else if (sneq->flags.identifier == 1 && sneq->format == 4)
959                         conf->vdsneq = (struct vd_sneq *)sneq;
960                 else if (sneq->flags.identifier == 2)
961                         conf->gneq = (struct dasd_gneq *)sneq;
962                 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
963                         conf->ned = (struct dasd_ned *)sneq;
964                 sneq++;
965         }
966         if (!conf->ned || !conf->gneq) {
967                 conf->ned = NULL;
968                 conf->sneq = NULL;
969                 conf->vdsneq = NULL;
970                 conf->gneq = NULL;
971                 return -EINVAL;
972         }
973         return 0;
974
975 };
976
977 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
978 {
979         struct dasd_gneq *gneq;
980         int i, count, found;
981
982         count = conf_len / sizeof(*gneq);
983         gneq = (struct dasd_gneq *)conf_data;
984         found = 0;
985         for (i = 0; i < count; ++i) {
986                 if (gneq->flags.identifier == 2) {
987                         found = 1;
988                         break;
989                 }
990                 gneq++;
991         }
992         if (found)
993                 return ((char *)gneq)[18] & 0x07;
994         else
995                 return 0;
996 }
997
998 static void dasd_eckd_store_conf_data(struct dasd_device *device,
999                                       struct dasd_conf_data *conf_data, int chp)
1000 {
1001         struct dasd_eckd_private *private = device->private;
1002         struct channel_path_desc_fmt0 *chp_desc;
1003         struct subchannel_id sch_id;
1004         void *cdp;
1005
1006         /*
1007          * path handling and read_conf allocate data
1008          * free it before replacing the pointer
1009          * also replace the old private->conf_data pointer
1010          * with the new one if this points to the same data
1011          */
1012         cdp = device->path[chp].conf_data;
1013         if (private->conf.data == cdp) {
1014                 private->conf.data = (void *)conf_data;
1015                 dasd_eckd_identify_conf_parts(&private->conf);
1016         }
1017         ccw_device_get_schid(device->cdev, &sch_id);
1018         device->path[chp].conf_data = conf_data;
1019         device->path[chp].cssid = sch_id.cssid;
1020         device->path[chp].ssid = sch_id.ssid;
1021         chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
1022         if (chp_desc)
1023                 device->path[chp].chpid = chp_desc->chpid;
1024         kfree(chp_desc);
1025         kfree(cdp);
1026 }
1027
1028 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1029 {
1030         struct dasd_eckd_private *private = device->private;
1031         int i;
1032
1033         private->conf.data = NULL;
1034         private->conf.len = 0;
1035         for (i = 0; i < 8; i++) {
1036                 kfree(device->path[i].conf_data);
1037                 device->path[i].conf_data = NULL;
1038                 device->path[i].cssid = 0;
1039                 device->path[i].ssid = 0;
1040                 device->path[i].chpid = 0;
1041                 dasd_path_notoper(device, i);
1042         }
1043 }
1044
1045 static void dasd_eckd_read_fc_security(struct dasd_device *device)
1046 {
1047         struct dasd_eckd_private *private = device->private;
1048         u8 esm_valid;
1049         u8 esm[8];
1050         int chp;
1051         int rc;
1052
1053         rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
1054         if (rc) {
1055                 for (chp = 0; chp < 8; chp++)
1056                         device->path[chp].fc_security = 0;
1057                 return;
1058         }
1059
1060         for (chp = 0; chp < 8; chp++) {
1061                 if (esm_valid & (0x80 >> chp))
1062                         device->path[chp].fc_security = esm[chp];
1063                 else
1064                         device->path[chp].fc_security = 0;
1065         }
1066 }
1067
1068 static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid)
1069 {
1070         struct dasd_uid uid;
1071
1072         create_uid(conf, &uid);
1073         snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s",
1074                  uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr,
1075                  uid.vduit[0] ? "." : "", uid.vduit);
1076 }
1077
1078 static int dasd_eckd_check_cabling(struct dasd_device *device,
1079                                    void *conf_data, __u8 lpm)
1080 {
1081         char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
1082         struct dasd_eckd_private *private = device->private;
1083         struct dasd_conf path_conf;
1084
1085         path_conf.data = conf_data;
1086         path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
1087         if (dasd_eckd_identify_conf_parts(&path_conf))
1088                 return 1;
1089
1090         if (dasd_eckd_compare_path_uid(device, &path_conf)) {
1091                 dasd_eckd_get_uid_string(&path_conf, print_path_uid);
1092                 dasd_eckd_get_uid_string(&private->conf, print_device_uid);
1093                 dev_err(&device->cdev->dev,
1094                         "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
1095                         lpm, print_path_uid, print_device_uid);
1096                 return 1;
1097         }
1098
1099         return 0;
1100 }
1101
1102 static int dasd_eckd_read_conf(struct dasd_device *device)
1103 {
1104         void *conf_data;
1105         int conf_len, conf_data_saved;
1106         int rc, path_err, pos;
1107         __u8 lpm, opm;
1108         struct dasd_eckd_private *private;
1109
1110         private = device->private;
1111         opm = ccw_device_get_path_mask(device->cdev);
1112         conf_data_saved = 0;
1113         path_err = 0;
1114         /* get configuration data per operational path */
1115         for (lpm = 0x80; lpm; lpm>>= 1) {
1116                 if (!(lpm & opm))
1117                         continue;
1118                 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1119                                              &conf_len, lpm);
1120                 if (rc && rc != -EOPNOTSUPP) {  /* -EOPNOTSUPP is ok */
1121                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1122                                         "Read configuration data returned "
1123                                         "error %d", rc);
1124                         return rc;
1125                 }
1126                 if (conf_data == NULL) {
1127                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1128                                         "No configuration data "
1129                                         "retrieved");
1130                         /* no further analysis possible */
1131                         dasd_path_add_opm(device, opm);
1132                         continue;       /* no error */
1133                 }
1134                 /* save first valid configuration data */
1135                 if (!conf_data_saved) {
1136                         /* initially clear previously stored conf_data */
1137                         dasd_eckd_clear_conf_data(device);
1138                         private->conf.data = conf_data;
1139                         private->conf.len = conf_len;
1140                         if (dasd_eckd_identify_conf_parts(&private->conf)) {
1141                                 private->conf.data = NULL;
1142                                 private->conf.len = 0;
1143                                 kfree(conf_data);
1144                                 continue;
1145                         }
1146                         /*
1147                          * build device UID that other path data
1148                          * can be compared to it
1149                          */
1150                         dasd_eckd_generate_uid(device);
1151                         conf_data_saved++;
1152                 } else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
1153                         dasd_path_add_cablepm(device, lpm);
1154                         path_err = -EINVAL;
1155                         kfree(conf_data);
1156                         continue;
1157                 }
1158
1159                 pos = pathmask_to_pos(lpm);
1160                 dasd_eckd_store_conf_data(device, conf_data, pos);
1161
1162                 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1163                 case 0x02:
1164                         dasd_path_add_nppm(device, lpm);
1165                         break;
1166                 case 0x03:
1167                         dasd_path_add_ppm(device, lpm);
1168                         break;
1169                 }
1170                 if (!dasd_path_get_opm(device)) {
1171                         dasd_path_set_opm(device, lpm);
1172                         dasd_generic_path_operational(device);
1173                 } else {
1174                         dasd_path_add_opm(device, lpm);
1175                 }
1176         }
1177
1178         return path_err;
1179 }
1180
1181 static u32 get_fcx_max_data(struct dasd_device *device)
1182 {
1183         struct dasd_eckd_private *private = device->private;
1184         int fcx_in_css, fcx_in_gneq, fcx_in_features;
1185         unsigned int mdc;
1186         int tpm;
1187
1188         if (dasd_nofcx)
1189                 return 0;
1190         /* is transport mode supported? */
1191         fcx_in_css = css_general_characteristics.fcx;
1192         fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
1193         fcx_in_features = private->features.feature[40] & 0x80;
1194         tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1195
1196         if (!tpm)
1197                 return 0;
1198
1199         mdc = ccw_device_get_mdc(device->cdev, 0);
1200         if (mdc == 0) {
1201                 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1202                 return 0;
1203         } else {
1204                 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1205         }
1206 }
1207
1208 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1209 {
1210         struct dasd_eckd_private *private = device->private;
1211         unsigned int mdc;
1212         u32 fcx_max_data;
1213
1214         if (private->fcx_max_data) {
1215                 mdc = ccw_device_get_mdc(device->cdev, lpm);
1216                 if (mdc == 0) {
1217                         dev_warn(&device->cdev->dev,
1218                                  "Detecting the maximum data size for zHPF "
1219                                  "requests failed (rc=%d) for a new path %x\n",
1220                                  mdc, lpm);
1221                         return mdc;
1222                 }
1223                 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1224                 if (fcx_max_data < private->fcx_max_data) {
1225                         dev_warn(&device->cdev->dev,
1226                                  "The maximum data size for zHPF requests %u "
1227                                  "on a new path %x is below the active maximum "
1228                                  "%u\n", fcx_max_data, lpm,
1229                                  private->fcx_max_data);
1230                         return -EACCES;
1231                 }
1232         }
1233         return 0;
1234 }
1235
1236 static int rebuild_device_uid(struct dasd_device *device,
1237                               struct pe_handler_work_data *data)
1238 {
1239         struct dasd_eckd_private *private = device->private;
1240         __u8 lpm, opm = dasd_path_get_opm(device);
1241         int rc = -ENODEV;
1242
1243         for (lpm = 0x80; lpm; lpm >>= 1) {
1244                 if (!(lpm & opm))
1245                         continue;
1246                 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1247                 memset(&data->cqr, 0, sizeof(data->cqr));
1248                 data->cqr.cpaddr = &data->ccw;
1249                 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1250                                                      data->rcd_buffer,
1251                                                      lpm);
1252
1253                 if (rc) {
1254                         if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1255                                 continue;
1256                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1257                                         "Read configuration data "
1258                                         "returned error %d", rc);
1259                         break;
1260                 }
1261                 memcpy(private->conf.data, data->rcd_buffer,
1262                        DASD_ECKD_RCD_DATA_SIZE);
1263                 if (dasd_eckd_identify_conf_parts(&private->conf)) {
1264                         rc = -ENODEV;
1265                 } else /* first valid path is enough */
1266                         break;
1267         }
1268
1269         if (!rc)
1270                 rc = dasd_eckd_generate_uid(device);
1271
1272         return rc;
1273 }
1274
1275 static void dasd_eckd_path_available_action(struct dasd_device *device,
1276                                             struct pe_handler_work_data *data)
1277 {
1278         __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1279         __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1280         struct dasd_conf_data *conf_data;
1281         char print_uid[DASD_UID_STRLEN];
1282         struct dasd_conf path_conf;
1283         unsigned long flags;
1284         int rc, pos;
1285
1286         opm = 0;
1287         npm = 0;
1288         ppm = 0;
1289         epm = 0;
1290         hpfpm = 0;
1291         cablepm = 0;
1292
1293         for (lpm = 0x80; lpm; lpm >>= 1) {
1294                 if (!(lpm & data->tbvpm))
1295                         continue;
1296                 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1297                 memset(&data->cqr, 0, sizeof(data->cqr));
1298                 data->cqr.cpaddr = &data->ccw;
1299                 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1300                                                      data->rcd_buffer,
1301                                                      lpm);
1302                 if (!rc) {
1303                         switch (dasd_eckd_path_access(data->rcd_buffer,
1304                                                       DASD_ECKD_RCD_DATA_SIZE)
1305                                 ) {
1306                         case 0x02:
1307                                 npm |= lpm;
1308                                 break;
1309                         case 0x03:
1310                                 ppm |= lpm;
1311                                 break;
1312                         }
1313                         opm |= lpm;
1314                 } else if (rc == -EOPNOTSUPP) {
1315                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1316                                         "path verification: No configuration "
1317                                         "data retrieved");
1318                         opm |= lpm;
1319                 } else if (rc == -EAGAIN) {
1320                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1321                                         "path verification: device is stopped,"
1322                                         " try again later");
1323                         epm |= lpm;
1324                 } else {
1325                         dev_warn(&device->cdev->dev,
1326                                  "Reading device feature codes failed "
1327                                  "(rc=%d) for new path %x\n", rc, lpm);
1328                         continue;
1329                 }
1330                 if (verify_fcx_max_data(device, lpm)) {
1331                         opm &= ~lpm;
1332                         npm &= ~lpm;
1333                         ppm &= ~lpm;
1334                         hpfpm |= lpm;
1335                         continue;
1336                 }
1337
1338                 /*
1339                  * save conf_data for comparison after
1340                  * rebuild_device_uid may have changed
1341                  * the original data
1342                  */
1343                 memcpy(&path_rcd_buf, data->rcd_buffer,
1344                        DASD_ECKD_RCD_DATA_SIZE);
1345                 path_conf.data = (void *)&path_rcd_buf;
1346                 path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
1347                 if (dasd_eckd_identify_conf_parts(&path_conf)) {
1348                         path_conf.data = NULL;
1349                         path_conf.len = 0;
1350                         continue;
1351                 }
1352
1353                 /*
1354                  * compare path UID with device UID only if at least
1355                  * one valid path is left
1356                  * in other case the device UID may have changed and
1357                  * the first working path UID will be used as device UID
1358                  */
1359                 if (dasd_path_get_opm(device) &&
1360                     dasd_eckd_compare_path_uid(device, &path_conf)) {
1361                         /*
1362                          * the comparison was not successful
1363                          * rebuild the device UID with at least one
1364                          * known path in case a z/VM hyperswap command
1365                          * has changed the device
1366                          *
1367                          * after this compare again
1368                          *
1369                          * if either the rebuild or the recompare fails
1370                          * the path can not be used
1371                          */
1372                         if (rebuild_device_uid(device, data) ||
1373                             dasd_eckd_compare_path_uid(
1374                                     device, &path_conf)) {
1375                                 dasd_eckd_get_uid_string(&path_conf, print_uid);
1376                                 dev_err(&device->cdev->dev,
1377                                         "The newly added channel path %02X "
1378                                         "will not be used because it leads "
1379                                         "to a different device %s\n",
1380                                         lpm, print_uid);
1381                                 opm &= ~lpm;
1382                                 npm &= ~lpm;
1383                                 ppm &= ~lpm;
1384                                 cablepm |= lpm;
1385                                 continue;
1386                         }
1387                 }
1388
1389                 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
1390                 if (conf_data) {
1391                         memcpy(conf_data, data->rcd_buffer,
1392                                DASD_ECKD_RCD_DATA_SIZE);
1393                 } else {
1394                         /*
1395                          * path is operational but path config data could not
1396                          * be stored due to low mem condition
1397                          * add it to the error path mask and schedule a path
1398                          * verification later that this could be added again
1399                          */
1400                         epm |= lpm;
1401                 }
1402                 pos = pathmask_to_pos(lpm);
1403                 dasd_eckd_store_conf_data(device, conf_data, pos);
1404
1405                 /*
1406                  * There is a small chance that a path is lost again between
1407                  * above path verification and the following modification of
1408                  * the device opm mask. We could avoid that race here by using
1409                  * yet another path mask, but we rather deal with this unlikely
1410                  * situation in dasd_start_IO.
1411                  */
1412                 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1413                 if (!dasd_path_get_opm(device) && opm) {
1414                         dasd_path_set_opm(device, opm);
1415                         dasd_generic_path_operational(device);
1416                 } else {
1417                         dasd_path_add_opm(device, opm);
1418                 }
1419                 dasd_path_add_nppm(device, npm);
1420                 dasd_path_add_ppm(device, ppm);
1421                 if (epm) {
1422                         dasd_path_add_tbvpm(device, epm);
1423                         dasd_device_set_timer(device, 50);
1424                 }
1425                 dasd_path_add_cablepm(device, cablepm);
1426                 dasd_path_add_nohpfpm(device, hpfpm);
1427                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1428
1429                 dasd_path_create_kobj(device, pos);
1430         }
1431 }
1432
1433 static void do_pe_handler_work(struct work_struct *work)
1434 {
1435         struct pe_handler_work_data *data;
1436         struct dasd_device *device;
1437
1438         data = container_of(work, struct pe_handler_work_data, worker);
1439         device = data->device;
1440
1441         /* delay path verification until device was resumed */
1442         if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1443                 schedule_work(work);
1444                 return;
1445         }
1446         /* check if path verification already running and delay if so */
1447         if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1448                 schedule_work(work);
1449                 return;
1450         }
1451
1452         if (data->tbvpm)
1453                 dasd_eckd_path_available_action(device, data);
1454         if (data->fcsecpm)
1455                 dasd_eckd_read_fc_security(device);
1456
1457         clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1458         dasd_put_device(device);
1459         if (data->isglobal)
1460                 mutex_unlock(&dasd_pe_handler_mutex);
1461         else
1462                 kfree(data);
1463 }
1464
1465 static int dasd_eckd_pe_handler(struct dasd_device *device,
1466                                 __u8 tbvpm, __u8 fcsecpm)
1467 {
1468         struct pe_handler_work_data *data;
1469
1470         data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1471         if (!data) {
1472                 if (mutex_trylock(&dasd_pe_handler_mutex)) {
1473                         data = pe_handler_worker;
1474                         data->isglobal = 1;
1475                 } else {
1476                         return -ENOMEM;
1477                 }
1478         }
1479         INIT_WORK(&data->worker, do_pe_handler_work);
1480         dasd_get_device(device);
1481         data->device = device;
1482         data->tbvpm = tbvpm;
1483         data->fcsecpm = fcsecpm;
1484         schedule_work(&data->worker);
1485         return 0;
1486 }
1487
1488 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1489 {
1490         struct dasd_eckd_private *private = device->private;
1491         unsigned long flags;
1492
1493         if (!private->fcx_max_data)
1494                 private->fcx_max_data = get_fcx_max_data(device);
1495         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1496         dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1497         dasd_schedule_device_bh(device);
1498         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1499 }
1500
1501 static int dasd_eckd_read_features(struct dasd_device *device)
1502 {
1503         struct dasd_eckd_private *private = device->private;
1504         struct dasd_psf_prssd_data *prssdp;
1505         struct dasd_rssd_features *features;
1506         struct dasd_ccw_req *cqr;
1507         struct ccw1 *ccw;
1508         int rc;
1509
1510         memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1511         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1512                                    (sizeof(struct dasd_psf_prssd_data) +
1513                                     sizeof(struct dasd_rssd_features)),
1514                                    device, NULL);
1515         if (IS_ERR(cqr)) {
1516                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1517                                 "allocate initialization request");
1518                 return PTR_ERR(cqr);
1519         }
1520         cqr->startdev = device;
1521         cqr->memdev = device;
1522         cqr->block = NULL;
1523         cqr->retries = 256;
1524         cqr->expires = 10 * HZ;
1525
1526         /* Prepare for Read Subsystem Data */
1527         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1528         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1529         prssdp->order = PSF_ORDER_PRSSD;
1530         prssdp->suborder = 0x41;        /* Read Feature Codes */
1531         /* all other bytes of prssdp must be zero */
1532
1533         ccw = cqr->cpaddr;
1534         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1535         ccw->count = sizeof(struct dasd_psf_prssd_data);
1536         ccw->flags |= CCW_FLAG_CC;
1537         ccw->cda = (__u32)virt_to_phys(prssdp);
1538
1539         /* Read Subsystem Data - feature codes */
1540         features = (struct dasd_rssd_features *) (prssdp + 1);
1541         memset(features, 0, sizeof(struct dasd_rssd_features));
1542
1543         ccw++;
1544         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1545         ccw->count = sizeof(struct dasd_rssd_features);
1546         ccw->cda = (__u32)virt_to_phys(features);
1547
1548         cqr->buildclk = get_tod_clock();
1549         cqr->status = DASD_CQR_FILLED;
1550         rc = dasd_sleep_on(cqr);
1551         if (rc == 0) {
1552                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1553                 features = (struct dasd_rssd_features *) (prssdp + 1);
1554                 memcpy(&private->features, features,
1555                        sizeof(struct dasd_rssd_features));
1556         } else
1557                 dev_warn(&device->cdev->dev, "Reading device feature codes"
1558                          " failed with rc=%d\n", rc);
1559         dasd_sfree_request(cqr, cqr->memdev);
1560         return rc;
1561 }
1562
1563 /* Read Volume Information - Volume Storage Query */
1564 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1565 {
1566         struct dasd_eckd_private *private = device->private;
1567         struct dasd_psf_prssd_data *prssdp;
1568         struct dasd_rssd_vsq *vsq;
1569         struct dasd_ccw_req *cqr;
1570         struct ccw1 *ccw;
1571         int useglobal;
1572         int rc;
1573
1574         /* This command cannot be executed on an alias device */
1575         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1576             private->uid.type == UA_HYPER_PAV_ALIAS)
1577                 return 0;
1578
1579         useglobal = 0;
1580         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1581                                    sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1582         if (IS_ERR(cqr)) {
1583                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1584                                 "Could not allocate initialization request");
1585                 mutex_lock(&dasd_vol_info_mutex);
1586                 useglobal = 1;
1587                 cqr = &dasd_vol_info_req->cqr;
1588                 memset(cqr, 0, sizeof(*cqr));
1589                 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1590                 cqr->cpaddr = &dasd_vol_info_req->ccw;
1591                 cqr->data = &dasd_vol_info_req->data;
1592                 cqr->magic = DASD_ECKD_MAGIC;
1593         }
1594
1595         /* Prepare for Read Subsystem Data */
1596         prssdp = cqr->data;
1597         prssdp->order = PSF_ORDER_PRSSD;
1598         prssdp->suborder = PSF_SUBORDER_VSQ;    /* Volume Storage Query */
1599         prssdp->lss = private->conf.ned->ID;
1600         prssdp->volume = private->conf.ned->unit_addr;
1601
1602         ccw = cqr->cpaddr;
1603         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1604         ccw->count = sizeof(*prssdp);
1605         ccw->flags |= CCW_FLAG_CC;
1606         ccw->cda = (__u32)virt_to_phys(prssdp);
1607
1608         /* Read Subsystem Data - Volume Storage Query */
1609         vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1610         memset(vsq, 0, sizeof(*vsq));
1611
1612         ccw++;
1613         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1614         ccw->count = sizeof(*vsq);
1615         ccw->flags |= CCW_FLAG_SLI;
1616         ccw->cda = (__u32)virt_to_phys(vsq);
1617
1618         cqr->buildclk = get_tod_clock();
1619         cqr->status = DASD_CQR_FILLED;
1620         cqr->startdev = device;
1621         cqr->memdev = device;
1622         cqr->block = NULL;
1623         cqr->retries = 256;
1624         cqr->expires = device->default_expires * HZ;
1625         /* The command might not be supported. Suppress the error output */
1626         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1627
1628         rc = dasd_sleep_on_interruptible(cqr);
1629         if (rc == 0) {
1630                 memcpy(&private->vsq, vsq, sizeof(*vsq));
1631         } else {
1632                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1633                                 "Reading the volume storage information failed with rc=%d", rc);
1634         }
1635
1636         if (useglobal)
1637                 mutex_unlock(&dasd_vol_info_mutex);
1638         else
1639                 dasd_sfree_request(cqr, cqr->memdev);
1640
1641         return rc;
1642 }
1643
1644 static int dasd_eckd_is_ese(struct dasd_device *device)
1645 {
1646         struct dasd_eckd_private *private = device->private;
1647
1648         return private->vsq.vol_info.ese;
1649 }
1650
1651 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1652 {
1653         struct dasd_eckd_private *private = device->private;
1654
1655         return private->vsq.extent_pool_id;
1656 }
1657
1658 /*
1659  * This value represents the total amount of available space. As more space is
1660  * allocated by ESE volumes, this value will decrease.
1661  * The data for this value is therefore updated on any call.
1662  */
1663 static int dasd_eckd_space_configured(struct dasd_device *device)
1664 {
1665         struct dasd_eckd_private *private = device->private;
1666         int rc;
1667
1668         rc = dasd_eckd_read_vol_info(device);
1669
1670         return rc ? : private->vsq.space_configured;
1671 }
1672
1673 /*
1674  * The value of space allocated by an ESE volume may have changed and is
1675  * therefore updated on any call.
1676  */
1677 static int dasd_eckd_space_allocated(struct dasd_device *device)
1678 {
1679         struct dasd_eckd_private *private = device->private;
1680         int rc;
1681
1682         rc = dasd_eckd_read_vol_info(device);
1683
1684         return rc ? : private->vsq.space_allocated;
1685 }
1686
1687 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1688 {
1689         struct dasd_eckd_private *private = device->private;
1690
1691         return private->vsq.logical_capacity;
1692 }
1693
1694 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1695 {
1696         struct ext_pool_exhaust_work_data *data;
1697         struct dasd_device *device;
1698         struct dasd_device *base;
1699
1700         data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1701         device = data->device;
1702         base = data->base;
1703
1704         if (!base)
1705                 base = device;
1706         if (dasd_eckd_space_configured(base) != 0) {
1707                 dasd_generic_space_avail(device);
1708         } else {
1709                 dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1710                 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1711         }
1712
1713         dasd_put_device(device);
1714         kfree(data);
1715 }
1716
1717 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1718                                       struct dasd_ccw_req *cqr)
1719 {
1720         struct ext_pool_exhaust_work_data *data;
1721
1722         data = kzalloc(sizeof(*data), GFP_ATOMIC);
1723         if (!data)
1724                 return -ENOMEM;
1725         INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1726         dasd_get_device(device);
1727         data->device = device;
1728
1729         if (cqr->block)
1730                 data->base = cqr->block->base;
1731         else if (cqr->basedev)
1732                 data->base = cqr->basedev;
1733         else
1734                 data->base = NULL;
1735
1736         schedule_work(&data->worker);
1737
1738         return 0;
1739 }
1740
1741 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1742                                         struct dasd_rssd_lcq *lcq)
1743 {
1744         struct dasd_eckd_private *private = device->private;
1745         int pool_id = dasd_eckd_ext_pool_id(device);
1746         struct dasd_ext_pool_sum eps;
1747         int i;
1748
1749         for (i = 0; i < lcq->pool_count; i++) {
1750                 eps = lcq->ext_pool_sum[i];
1751                 if (eps.pool_id == pool_id) {
1752                         memcpy(&private->eps, &eps,
1753                                sizeof(struct dasd_ext_pool_sum));
1754                 }
1755         }
1756 }
1757
1758 /* Read Extent Pool Information - Logical Configuration Query */
1759 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1760 {
1761         struct dasd_eckd_private *private = device->private;
1762         struct dasd_psf_prssd_data *prssdp;
1763         struct dasd_rssd_lcq *lcq;
1764         struct dasd_ccw_req *cqr;
1765         struct ccw1 *ccw;
1766         int rc;
1767
1768         /* This command cannot be executed on an alias device */
1769         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1770             private->uid.type == UA_HYPER_PAV_ALIAS)
1771                 return 0;
1772
1773         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1774                                    sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1775         if (IS_ERR(cqr)) {
1776                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1777                                 "Could not allocate initialization request");
1778                 return PTR_ERR(cqr);
1779         }
1780
1781         /* Prepare for Read Subsystem Data */
1782         prssdp = cqr->data;
1783         memset(prssdp, 0, sizeof(*prssdp));
1784         prssdp->order = PSF_ORDER_PRSSD;
1785         prssdp->suborder = PSF_SUBORDER_LCQ;    /* Logical Configuration Query */
1786
1787         ccw = cqr->cpaddr;
1788         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1789         ccw->count = sizeof(*prssdp);
1790         ccw->flags |= CCW_FLAG_CC;
1791         ccw->cda = (__u32)virt_to_phys(prssdp);
1792
1793         lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1794         memset(lcq, 0, sizeof(*lcq));
1795
1796         ccw++;
1797         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1798         ccw->count = sizeof(*lcq);
1799         ccw->flags |= CCW_FLAG_SLI;
1800         ccw->cda = (__u32)virt_to_phys(lcq);
1801
1802         cqr->buildclk = get_tod_clock();
1803         cqr->status = DASD_CQR_FILLED;
1804         cqr->startdev = device;
1805         cqr->memdev = device;
1806         cqr->block = NULL;
1807         cqr->retries = 256;
1808         cqr->expires = device->default_expires * HZ;
1809         /* The command might not be supported. Suppress the error output */
1810         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1811
1812         rc = dasd_sleep_on_interruptible(cqr);
1813         if (rc == 0) {
1814                 dasd_eckd_cpy_ext_pool_data(device, lcq);
1815         } else {
1816                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1817                                 "Reading the logical configuration failed with rc=%d", rc);
1818         }
1819
1820         dasd_sfree_request(cqr, cqr->memdev);
1821
1822         return rc;
1823 }
1824
1825 /*
1826  * Depending on the device type, the extent size is specified either as
1827  * cylinders per extent (CKD) or size per extent (FBA)
1828  * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1829  */
1830 static int dasd_eckd_ext_size(struct dasd_device *device)
1831 {
1832         struct dasd_eckd_private *private = device->private;
1833         struct dasd_ext_pool_sum eps = private->eps;
1834
1835         if (!eps.flags.extent_size_valid)
1836                 return 0;
1837         if (eps.extent_size.size_1G)
1838                 return 1113;
1839         if (eps.extent_size.size_16M)
1840                 return 21;
1841
1842         return 0;
1843 }
1844
1845 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1846 {
1847         struct dasd_eckd_private *private = device->private;
1848
1849         return private->eps.warn_thrshld;
1850 }
1851
1852 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1853 {
1854         struct dasd_eckd_private *private = device->private;
1855
1856         return private->eps.flags.capacity_at_warnlevel;
1857 }
1858
1859 /*
1860  * Extent Pool out of space
1861  */
1862 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1863 {
1864         struct dasd_eckd_private *private = device->private;
1865
1866         return private->eps.flags.pool_oos;
1867 }
1868
1869 /*
1870  * Build CP for Perform Subsystem Function - SSC.
1871  */
1872 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1873                                                     int enable_pav)
1874 {
1875         struct dasd_ccw_req *cqr;
1876         struct dasd_psf_ssc_data *psf_ssc_data;
1877         struct ccw1 *ccw;
1878
1879         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1880                                   sizeof(struct dasd_psf_ssc_data),
1881                                    device, NULL);
1882
1883         if (IS_ERR(cqr)) {
1884                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1885                            "Could not allocate PSF-SSC request");
1886                 return cqr;
1887         }
1888         psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1889         psf_ssc_data->order = PSF_ORDER_SSC;
1890         psf_ssc_data->suborder = 0xc0;
1891         if (enable_pav) {
1892                 psf_ssc_data->suborder |= 0x08;
1893                 psf_ssc_data->reserved[0] = 0x88;
1894         }
1895         ccw = cqr->cpaddr;
1896         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1897         ccw->cda = (__u32)virt_to_phys(psf_ssc_data);
1898         ccw->count = 66;
1899
1900         cqr->startdev = device;
1901         cqr->memdev = device;
1902         cqr->block = NULL;
1903         cqr->retries = 256;
1904         cqr->expires = 10*HZ;
1905         cqr->buildclk = get_tod_clock();
1906         cqr->status = DASD_CQR_FILLED;
1907         return cqr;
1908 }
1909
1910 /*
1911  * Perform Subsystem Function.
1912  * It is necessary to trigger CIO for channel revalidation since this
1913  * call might change behaviour of DASD devices.
1914  */
1915 static int
1916 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1917                   unsigned long flags)
1918 {
1919         struct dasd_ccw_req *cqr;
1920         int rc;
1921
1922         cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1923         if (IS_ERR(cqr))
1924                 return PTR_ERR(cqr);
1925
1926         /*
1927          * set flags e.g. turn on failfast, to prevent blocking
1928          * the calling function should handle failed requests
1929          */
1930         cqr->flags |= flags;
1931
1932         rc = dasd_sleep_on(cqr);
1933         if (!rc)
1934                 /* trigger CIO to reprobe devices */
1935                 css_schedule_reprobe();
1936         else if (cqr->intrc == -EAGAIN)
1937                 rc = -EAGAIN;
1938
1939         dasd_sfree_request(cqr, cqr->memdev);
1940         return rc;
1941 }
1942
1943 /*
1944  * Valide storage server of current device.
1945  */
1946 static int dasd_eckd_validate_server(struct dasd_device *device,
1947                                      unsigned long flags)
1948 {
1949         struct dasd_eckd_private *private = device->private;
1950         int enable_pav, rc;
1951
1952         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1953             private->uid.type == UA_HYPER_PAV_ALIAS)
1954                 return 0;
1955         if (dasd_nopav || MACHINE_IS_VM)
1956                 enable_pav = 0;
1957         else
1958                 enable_pav = 1;
1959         rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1960
1961         /* may be requested feature is not available on server,
1962          * therefore just report error and go ahead */
1963         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1964                         "returned rc=%d", private->uid.ssid, rc);
1965         return rc;
1966 }
1967
1968 /*
1969  * worker to do a validate server in case of a lost pathgroup
1970  */
1971 static void dasd_eckd_do_validate_server(struct work_struct *work)
1972 {
1973         struct dasd_device *device = container_of(work, struct dasd_device,
1974                                                   kick_validate);
1975         unsigned long flags = 0;
1976
1977         set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1978         if (dasd_eckd_validate_server(device, flags)
1979             == -EAGAIN) {
1980                 /* schedule worker again if failed */
1981                 schedule_work(&device->kick_validate);
1982                 return;
1983         }
1984
1985         dasd_put_device(device);
1986 }
1987
1988 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1989 {
1990         dasd_get_device(device);
1991         /* exit if device not online or in offline processing */
1992         if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1993            device->state < DASD_STATE_ONLINE) {
1994                 dasd_put_device(device);
1995                 return;
1996         }
1997         /* queue call to do_validate_server to the kernel event daemon. */
1998         if (!schedule_work(&device->kick_validate))
1999                 dasd_put_device(device);
2000 }
2001
2002 /*
2003  * return if the device is the copy relation primary if a copy relation is active
2004  */
2005 static int dasd_device_is_primary(struct dasd_device *device)
2006 {
2007         if (!device->copy)
2008                 return 1;
2009
2010         if (device->copy->active->device == device)
2011                 return 1;
2012
2013         return 0;
2014 }
2015
2016 static int dasd_eckd_alloc_block(struct dasd_device *device)
2017 {
2018         struct dasd_block *block;
2019         struct dasd_uid temp_uid;
2020
2021         if (!dasd_device_is_primary(device))
2022                 return 0;
2023
2024         dasd_eckd_get_uid(device, &temp_uid);
2025         if (temp_uid.type == UA_BASE_DEVICE) {
2026                 block = dasd_alloc_block();
2027                 if (IS_ERR(block)) {
2028                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
2029                                         "could not allocate dasd block structure");
2030                         return PTR_ERR(block);
2031                 }
2032                 device->block = block;
2033                 block->base = device;
2034         }
2035         return 0;
2036 }
2037
2038 static bool dasd_eckd_pprc_enabled(struct dasd_device *device)
2039 {
2040         struct dasd_eckd_private *private = device->private;
2041
2042         return private->rdc_data.facilities.PPRC_enabled;
2043 }
2044
2045 /*
2046  * Check device characteristics.
2047  * If the device is accessible using ECKD discipline, the device is enabled.
2048  */
2049 static int
2050 dasd_eckd_check_characteristics(struct dasd_device *device)
2051 {
2052         struct dasd_eckd_private *private = device->private;
2053         int rc, i;
2054         int readonly;
2055         unsigned long value;
2056
2057         /* setup work queue for validate server*/
2058         INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
2059         /* setup work queue for summary unit check */
2060         INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
2061
2062         if (!ccw_device_is_pathgroup(device->cdev)) {
2063                 dev_warn(&device->cdev->dev,
2064                          "A channel path group could not be established\n");
2065                 return -EIO;
2066         }
2067         if (!ccw_device_is_multipath(device->cdev)) {
2068                 dev_info(&device->cdev->dev,
2069                          "The DASD is not operating in multipath mode\n");
2070         }
2071         if (!private) {
2072                 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
2073                 if (!private) {
2074                         dev_warn(&device->cdev->dev,
2075                                  "Allocating memory for private DASD data "
2076                                  "failed\n");
2077                         return -ENOMEM;
2078                 }
2079                 device->private = private;
2080         } else {
2081                 memset(private, 0, sizeof(*private));
2082         }
2083         /* Invalidate status of initial analysis. */
2084         private->init_cqr_status = -1;
2085         /* Set default cache operations. */
2086         private->attrib.operation = DASD_NORMAL_CACHE;
2087         private->attrib.nr_cyl = 0;
2088
2089         /* Read Configuration Data */
2090         rc = dasd_eckd_read_conf(device);
2091         if (rc)
2092                 goto out_err1;
2093
2094         /* set some default values */
2095         device->default_expires = DASD_EXPIRES;
2096         device->default_retries = DASD_RETRIES;
2097         device->path_thrhld = DASD_ECKD_PATH_THRHLD;
2098         device->path_interval = DASD_ECKD_PATH_INTERVAL;
2099         device->aq_timeouts = DASD_RETRIES_MAX;
2100
2101         if (private->conf.gneq) {
2102                 value = 1;
2103                 for (i = 0; i < private->conf.gneq->timeout.value; i++)
2104                         value = 10 * value;
2105                 value = value * private->conf.gneq->timeout.number;
2106                 /* do not accept useless values */
2107                 if (value != 0 && value <= DASD_EXPIRES_MAX)
2108                         device->default_expires = value;
2109         }
2110
2111         /* Read Device Characteristics */
2112         rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2113                                          &private->rdc_data, 64);
2114         if (rc) {
2115                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2116                                 "Read device characteristic failed, rc=%d", rc);
2117                 goto out_err1;
2118         }
2119
2120         /* setup PPRC for device from devmap */
2121         rc = dasd_devmap_set_device_copy_relation(device->cdev,
2122                                                   dasd_eckd_pprc_enabled(device));
2123         if (rc) {
2124                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2125                                 "copy relation setup failed, rc=%d", rc);
2126                 goto out_err1;
2127         }
2128
2129         /* check if block device is needed and allocate in case */
2130         rc = dasd_eckd_alloc_block(device);
2131         if (rc)
2132                 goto out_err1;
2133
2134         /* register lcu with alias handling, enable PAV */
2135         rc = dasd_alias_make_device_known_to_lcu(device);
2136         if (rc)
2137                 goto out_err2;
2138
2139         dasd_eckd_validate_server(device, 0);
2140
2141         /* device may report different configuration data after LCU setup */
2142         rc = dasd_eckd_read_conf(device);
2143         if (rc)
2144                 goto out_err3;
2145
2146         dasd_eckd_read_fc_security(device);
2147         dasd_path_create_kobjects(device);
2148
2149         /* Read Feature Codes */
2150         dasd_eckd_read_features(device);
2151
2152         /* Read Volume Information */
2153         dasd_eckd_read_vol_info(device);
2154
2155         /* Read Extent Pool Information */
2156         dasd_eckd_read_ext_pool_info(device);
2157
2158         if ((device->features & DASD_FEATURE_USERAW) &&
2159             !(private->rdc_data.facilities.RT_in_LR)) {
2160                 dev_err(&device->cdev->dev, "The storage server does not "
2161                         "support raw-track access\n");
2162                 rc = -EINVAL;
2163                 goto out_err3;
2164         }
2165
2166         /* find the valid cylinder size */
2167         if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2168             private->rdc_data.long_no_cyl)
2169                 private->real_cyl = private->rdc_data.long_no_cyl;
2170         else
2171                 private->real_cyl = private->rdc_data.no_cyl;
2172
2173         private->fcx_max_data = get_fcx_max_data(device);
2174
2175         readonly = dasd_device_is_ro(device);
2176         if (readonly)
2177                 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2178
2179         dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2180                  "with %d cylinders, %d heads, %d sectors%s\n",
2181                  private->rdc_data.dev_type,
2182                  private->rdc_data.dev_model,
2183                  private->rdc_data.cu_type,
2184                  private->rdc_data.cu_model.model,
2185                  private->real_cyl,
2186                  private->rdc_data.trk_per_cyl,
2187                  private->rdc_data.sec_per_trk,
2188                  readonly ? ", read-only device" : "");
2189         return 0;
2190
2191 out_err3:
2192         dasd_alias_disconnect_device_from_lcu(device);
2193 out_err2:
2194         dasd_free_block(device->block);
2195         device->block = NULL;
2196 out_err1:
2197         dasd_eckd_clear_conf_data(device);
2198         dasd_path_remove_kobjects(device);
2199         kfree(device->private);
2200         device->private = NULL;
2201         return rc;
2202 }
2203
2204 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2205 {
2206         struct dasd_eckd_private *private = device->private;
2207
2208         if (!private)
2209                 return;
2210
2211         dasd_alias_disconnect_device_from_lcu(device);
2212         private->conf.ned = NULL;
2213         private->conf.sneq = NULL;
2214         private->conf.vdsneq = NULL;
2215         private->conf.gneq = NULL;
2216         dasd_eckd_clear_conf_data(device);
2217         dasd_path_remove_kobjects(device);
2218 }
2219
2220 static struct dasd_ccw_req *
2221 dasd_eckd_analysis_ccw(struct dasd_device *device)
2222 {
2223         struct dasd_eckd_private *private = device->private;
2224         struct eckd_count *count_data;
2225         struct LO_eckd_data *LO_data;
2226         struct dasd_ccw_req *cqr;
2227         struct ccw1 *ccw;
2228         int cplength, datasize;
2229         int i;
2230
2231         cplength = 8;
2232         datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2233         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2234                                    NULL);
2235         if (IS_ERR(cqr))
2236                 return cqr;
2237         ccw = cqr->cpaddr;
2238         /* Define extent for the first 2 tracks. */
2239         define_extent(ccw++, cqr->data, 0, 1,
2240                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2241         LO_data = cqr->data + sizeof(struct DE_eckd_data);
2242         /* Locate record for the first 4 records on track 0. */
2243         ccw[-1].flags |= CCW_FLAG_CC;
2244         locate_record(ccw++, LO_data++, 0, 0, 4,
2245                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2246
2247         count_data = private->count_area;
2248         for (i = 0; i < 4; i++) {
2249                 ccw[-1].flags |= CCW_FLAG_CC;
2250                 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2251                 ccw->flags = 0;
2252                 ccw->count = 8;
2253                 ccw->cda = (__u32)virt_to_phys(count_data);
2254                 ccw++;
2255                 count_data++;
2256         }
2257
2258         /* Locate record for the first record on track 1. */
2259         ccw[-1].flags |= CCW_FLAG_CC;
2260         locate_record(ccw++, LO_data++, 1, 0, 1,
2261                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2262         /* Read count ccw. */
2263         ccw[-1].flags |= CCW_FLAG_CC;
2264         ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2265         ccw->flags = 0;
2266         ccw->count = 8;
2267         ccw->cda = (__u32)virt_to_phys(count_data);
2268
2269         cqr->block = NULL;
2270         cqr->startdev = device;
2271         cqr->memdev = device;
2272         cqr->retries = 255;
2273         cqr->buildclk = get_tod_clock();
2274         cqr->status = DASD_CQR_FILLED;
2275         /* Set flags to suppress output for expected errors */
2276         set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2277
2278         return cqr;
2279 }
2280
2281 /* differentiate between 'no record found' and any other error */
2282 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2283 {
2284         char *sense;
2285         if (init_cqr->status == DASD_CQR_DONE)
2286                 return INIT_CQR_OK;
2287         else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2288                  init_cqr->status == DASD_CQR_FAILED) {
2289                 sense = dasd_get_sense(&init_cqr->irb);
2290                 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2291                         return INIT_CQR_UNFORMATTED;
2292                 else
2293                         return INIT_CQR_ERROR;
2294         } else
2295                 return INIT_CQR_ERROR;
2296 }
2297
2298 /*
2299  * This is the callback function for the init_analysis cqr. It saves
2300  * the status of the initial analysis ccw before it frees it and kicks
2301  * the device to continue the startup sequence. This will call
2302  * dasd_eckd_do_analysis again (if the devices has not been marked
2303  * for deletion in the meantime).
2304  */
2305 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2306                                         void *data)
2307 {
2308         struct dasd_device *device = init_cqr->startdev;
2309         struct dasd_eckd_private *private = device->private;
2310
2311         private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2312         dasd_sfree_request(init_cqr, device);
2313         dasd_kick_device(device);
2314 }
2315
2316 static int dasd_eckd_start_analysis(struct dasd_block *block)
2317 {
2318         struct dasd_ccw_req *init_cqr;
2319
2320         init_cqr = dasd_eckd_analysis_ccw(block->base);
2321         if (IS_ERR(init_cqr))
2322                 return PTR_ERR(init_cqr);
2323         init_cqr->callback = dasd_eckd_analysis_callback;
2324         init_cqr->callback_data = NULL;
2325         init_cqr->expires = 5*HZ;
2326         /* first try without ERP, so we can later handle unformatted
2327          * devices as special case
2328          */
2329         clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2330         init_cqr->retries = 0;
2331         dasd_add_request_head(init_cqr);
2332         return -EAGAIN;
2333 }
2334
2335 static int dasd_eckd_end_analysis(struct dasd_block *block)
2336 {
2337         struct dasd_device *device = block->base;
2338         struct dasd_eckd_private *private = device->private;
2339         struct eckd_count *count_area;
2340         unsigned int sb, blk_per_trk;
2341         int status, i;
2342         struct dasd_ccw_req *init_cqr;
2343
2344         status = private->init_cqr_status;
2345         private->init_cqr_status = -1;
2346         if (status == INIT_CQR_ERROR) {
2347                 /* try again, this time with full ERP */
2348                 init_cqr = dasd_eckd_analysis_ccw(device);
2349                 dasd_sleep_on(init_cqr);
2350                 status = dasd_eckd_analysis_evaluation(init_cqr);
2351                 dasd_sfree_request(init_cqr, device);
2352         }
2353
2354         if (device->features & DASD_FEATURE_USERAW) {
2355                 block->bp_block = DASD_RAW_BLOCKSIZE;
2356                 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2357                 block->s2b_shift = 3;
2358                 goto raw;
2359         }
2360
2361         if (status == INIT_CQR_UNFORMATTED) {
2362                 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2363                 return -EMEDIUMTYPE;
2364         } else if (status == INIT_CQR_ERROR) {
2365                 dev_err(&device->cdev->dev,
2366                         "Detecting the DASD disk layout failed because "
2367                         "of an I/O error\n");
2368                 return -EIO;
2369         }
2370
2371         private->uses_cdl = 1;
2372         /* Check Track 0 for Compatible Disk Layout */
2373         count_area = NULL;
2374         for (i = 0; i < 3; i++) {
2375                 if (private->count_area[i].kl != 4 ||
2376                     private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2377                     private->count_area[i].cyl != 0 ||
2378                     private->count_area[i].head != count_area_head[i] ||
2379                     private->count_area[i].record != count_area_rec[i]) {
2380                         private->uses_cdl = 0;
2381                         break;
2382                 }
2383         }
2384         if (i == 3)
2385                 count_area = &private->count_area[3];
2386
2387         if (private->uses_cdl == 0) {
2388                 for (i = 0; i < 5; i++) {
2389                         if ((private->count_area[i].kl != 0) ||
2390                             (private->count_area[i].dl !=
2391                              private->count_area[0].dl) ||
2392                             private->count_area[i].cyl !=  0 ||
2393                             private->count_area[i].head != count_area_head[i] ||
2394                             private->count_area[i].record != count_area_rec[i])
2395                                 break;
2396                 }
2397                 if (i == 5)
2398                         count_area = &private->count_area[0];
2399         } else {
2400                 if (private->count_area[3].record == 1)
2401                         dev_warn(&device->cdev->dev,
2402                                  "Track 0 has no records following the VTOC\n");
2403         }
2404
2405         if (count_area != NULL && count_area->kl == 0) {
2406                 /* we found notthing violating our disk layout */
2407                 if (dasd_check_blocksize(count_area->dl) == 0)
2408                         block->bp_block = count_area->dl;
2409         }
2410         if (block->bp_block == 0) {
2411                 dev_warn(&device->cdev->dev,
2412                          "The disk layout of the DASD is not supported\n");
2413                 return -EMEDIUMTYPE;
2414         }
2415         block->s2b_shift = 0;   /* bits to shift 512 to get a block */
2416         for (sb = 512; sb < block->bp_block; sb = sb << 1)
2417                 block->s2b_shift++;
2418
2419         blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2420
2421 raw:
2422         block->blocks = ((unsigned long) private->real_cyl *
2423                           private->rdc_data.trk_per_cyl *
2424                           blk_per_trk);
2425
2426         dev_info(&device->cdev->dev,
2427                  "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2428                  "%s\n", (block->bp_block >> 10),
2429                  (((unsigned long) private->real_cyl *
2430                    private->rdc_data.trk_per_cyl *
2431                    blk_per_trk * (block->bp_block >> 9)) >> 1),
2432                  ((blk_per_trk * block->bp_block) >> 10),
2433                  private->uses_cdl ?
2434                  "compatible disk layout" : "linux disk layout");
2435
2436         return 0;
2437 }
2438
2439 static int dasd_eckd_do_analysis(struct dasd_block *block)
2440 {
2441         struct dasd_eckd_private *private = block->base->private;
2442
2443         if (private->init_cqr_status < 0)
2444                 return dasd_eckd_start_analysis(block);
2445         else
2446                 return dasd_eckd_end_analysis(block);
2447 }
2448
2449 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2450 {
2451         return dasd_alias_add_device(device);
2452 };
2453
2454 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2455 {
2456         if (cancel_work_sync(&device->reload_device))
2457                 dasd_put_device(device);
2458         if (cancel_work_sync(&device->kick_validate))
2459                 dasd_put_device(device);
2460
2461         return 0;
2462 };
2463
2464 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2465 {
2466         return dasd_alias_remove_device(device);
2467 };
2468
2469 static int
2470 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2471 {
2472         struct dasd_eckd_private *private = block->base->private;
2473
2474         if (dasd_check_blocksize(block->bp_block) == 0) {
2475                 geo->sectors = recs_per_track(&private->rdc_data,
2476                                               0, block->bp_block);
2477         }
2478         geo->cylinders = private->rdc_data.no_cyl;
2479         geo->heads = private->rdc_data.trk_per_cyl;
2480         return 0;
2481 }
2482
2483 /*
2484  * Build the TCW request for the format check
2485  */
2486 static struct dasd_ccw_req *
2487 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2488                           int enable_pav, struct eckd_count *fmt_buffer,
2489                           int rpt)
2490 {
2491         struct dasd_eckd_private *start_priv;
2492         struct dasd_device *startdev = NULL;
2493         struct tidaw *last_tidaw = NULL;
2494         struct dasd_ccw_req *cqr;
2495         struct itcw *itcw;
2496         int itcw_size;
2497         int count;
2498         int rc;
2499         int i;
2500
2501         if (enable_pav)
2502                 startdev = dasd_alias_get_start_dev(base);
2503
2504         if (!startdev)
2505                 startdev = base;
2506
2507         start_priv = startdev->private;
2508
2509         count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2510
2511         /*
2512          * we're adding 'count' amount of tidaw to the itcw.
2513          * calculate the corresponding itcw_size
2514          */
2515         itcw_size = itcw_calc_size(0, count, 0);
2516
2517         cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2518         if (IS_ERR(cqr))
2519                 return cqr;
2520
2521         start_priv->count++;
2522
2523         itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2524         if (IS_ERR(itcw)) {
2525                 rc = -EINVAL;
2526                 goto out_err;
2527         }
2528
2529         cqr->cpaddr = itcw_get_tcw(itcw);
2530         rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2531                           DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2532                           sizeof(struct eckd_count),
2533                           count * sizeof(struct eckd_count), 0, rpt);
2534         if (rc)
2535                 goto out_err;
2536
2537         for (i = 0; i < count; i++) {
2538                 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2539                                             sizeof(struct eckd_count));
2540                 if (IS_ERR(last_tidaw)) {
2541                         rc = -EINVAL;
2542                         goto out_err;
2543                 }
2544         }
2545
2546         last_tidaw->flags |= TIDAW_FLAGS_LAST;
2547         itcw_finalize(itcw);
2548
2549         cqr->cpmode = 1;
2550         cqr->startdev = startdev;
2551         cqr->memdev = startdev;
2552         cqr->basedev = base;
2553         cqr->retries = startdev->default_retries;
2554         cqr->expires = startdev->default_expires * HZ;
2555         cqr->buildclk = get_tod_clock();
2556         cqr->status = DASD_CQR_FILLED;
2557         /* Set flags to suppress output for expected errors */
2558         set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2559         set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2560
2561         return cqr;
2562
2563 out_err:
2564         dasd_sfree_request(cqr, startdev);
2565
2566         return ERR_PTR(rc);
2567 }
2568
2569 /*
2570  * Build the CCW request for the format check
2571  */
2572 static struct dasd_ccw_req *
2573 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2574                       int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2575 {
2576         struct dasd_eckd_private *start_priv;
2577         struct dasd_eckd_private *base_priv;
2578         struct dasd_device *startdev = NULL;
2579         struct dasd_ccw_req *cqr;
2580         struct ccw1 *ccw;
2581         void *data;
2582         int cplength, datasize;
2583         int use_prefix;
2584         int count;
2585         int i;
2586
2587         if (enable_pav)
2588                 startdev = dasd_alias_get_start_dev(base);
2589
2590         if (!startdev)
2591                 startdev = base;
2592
2593         start_priv = startdev->private;
2594         base_priv = base->private;
2595
2596         count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2597
2598         use_prefix = base_priv->features.feature[8] & 0x01;
2599
2600         if (use_prefix) {
2601                 cplength = 1;
2602                 datasize = sizeof(struct PFX_eckd_data);
2603         } else {
2604                 cplength = 2;
2605                 datasize = sizeof(struct DE_eckd_data) +
2606                         sizeof(struct LO_eckd_data);
2607         }
2608         cplength += count;
2609
2610         cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2611         if (IS_ERR(cqr))
2612                 return cqr;
2613
2614         start_priv->count++;
2615         data = cqr->data;
2616         ccw = cqr->cpaddr;
2617
2618         if (use_prefix) {
2619                 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2620                            DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2621                            count, 0, 0);
2622         } else {
2623                 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2624                               DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2625
2626                 data += sizeof(struct DE_eckd_data);
2627                 ccw[-1].flags |= CCW_FLAG_CC;
2628
2629                 locate_record(ccw++, data, fdata->start_unit, 0, count,
2630                               DASD_ECKD_CCW_READ_COUNT, base, 0);
2631         }
2632
2633         for (i = 0; i < count; i++) {
2634                 ccw[-1].flags |= CCW_FLAG_CC;
2635                 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2636                 ccw->flags = CCW_FLAG_SLI;
2637                 ccw->count = 8;
2638                 ccw->cda = (__u32)virt_to_phys(fmt_buffer);
2639                 ccw++;
2640                 fmt_buffer++;
2641         }
2642
2643         cqr->startdev = startdev;
2644         cqr->memdev = startdev;
2645         cqr->basedev = base;
2646         cqr->retries = DASD_RETRIES;
2647         cqr->expires = startdev->default_expires * HZ;
2648         cqr->buildclk = get_tod_clock();
2649         cqr->status = DASD_CQR_FILLED;
2650         /* Set flags to suppress output for expected errors */
2651         set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2652
2653         return cqr;
2654 }
2655
2656 static struct dasd_ccw_req *
2657 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2658                        struct format_data_t *fdata, int enable_pav)
2659 {
2660         struct dasd_eckd_private *base_priv;
2661         struct dasd_eckd_private *start_priv;
2662         struct dasd_ccw_req *fcp;
2663         struct eckd_count *ect;
2664         struct ch_t address;
2665         struct ccw1 *ccw;
2666         void *data;
2667         int rpt;
2668         int cplength, datasize;
2669         int i, j;
2670         int intensity = 0;
2671         int r0_perm;
2672         int nr_tracks;
2673         int use_prefix;
2674
2675         if (enable_pav)
2676                 startdev = dasd_alias_get_start_dev(base);
2677
2678         if (!startdev)
2679                 startdev = base;
2680
2681         start_priv = startdev->private;
2682         base_priv = base->private;
2683
2684         rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2685
2686         nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2687
2688         /*
2689          * fdata->intensity is a bit string that tells us what to do:
2690          *   Bit 0: write record zero
2691          *   Bit 1: write home address, currently not supported
2692          *   Bit 2: invalidate tracks
2693          *   Bit 3: use OS/390 compatible disk layout (cdl)
2694          *   Bit 4: do not allow storage subsystem to modify record zero
2695          * Only some bit combinations do make sense.
2696          */
2697         if (fdata->intensity & 0x10) {
2698                 r0_perm = 0;
2699                 intensity = fdata->intensity & ~0x10;
2700         } else {
2701                 r0_perm = 1;
2702                 intensity = fdata->intensity;
2703         }
2704
2705         use_prefix = base_priv->features.feature[8] & 0x01;
2706
2707         switch (intensity) {
2708         case 0x00:      /* Normal format */
2709         case 0x08:      /* Normal format, use cdl. */
2710                 cplength = 2 + (rpt*nr_tracks);
2711                 if (use_prefix)
2712                         datasize = sizeof(struct PFX_eckd_data) +
2713                                 sizeof(struct LO_eckd_data) +
2714                                 rpt * nr_tracks * sizeof(struct eckd_count);
2715                 else
2716                         datasize = sizeof(struct DE_eckd_data) +
2717                                 sizeof(struct LO_eckd_data) +
2718                                 rpt * nr_tracks * sizeof(struct eckd_count);
2719                 break;
2720         case 0x01:      /* Write record zero and format track. */
2721         case 0x09:      /* Write record zero and format track, use cdl. */
2722                 cplength = 2 + rpt * nr_tracks;
2723                 if (use_prefix)
2724                         datasize = sizeof(struct PFX_eckd_data) +
2725                                 sizeof(struct LO_eckd_data) +
2726                                 sizeof(struct eckd_count) +
2727                                 rpt * nr_tracks * sizeof(struct eckd_count);
2728                 else
2729                         datasize = sizeof(struct DE_eckd_data) +
2730                                 sizeof(struct LO_eckd_data) +
2731                                 sizeof(struct eckd_count) +
2732                                 rpt * nr_tracks * sizeof(struct eckd_count);
2733                 break;
2734         case 0x04:      /* Invalidate track. */
2735         case 0x0c:      /* Invalidate track, use cdl. */
2736                 cplength = 3;
2737                 if (use_prefix)
2738                         datasize = sizeof(struct PFX_eckd_data) +
2739                                 sizeof(struct LO_eckd_data) +
2740                                 sizeof(struct eckd_count);
2741                 else
2742                         datasize = sizeof(struct DE_eckd_data) +
2743                                 sizeof(struct LO_eckd_data) +
2744                                 sizeof(struct eckd_count);
2745                 break;
2746         default:
2747                 dev_warn(&startdev->cdev->dev,
2748                          "An I/O control call used incorrect flags 0x%x\n",
2749                          fdata->intensity);
2750                 return ERR_PTR(-EINVAL);
2751         }
2752
2753         fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2754         if (IS_ERR(fcp))
2755                 return fcp;
2756
2757         start_priv->count++;
2758         data = fcp->data;
2759         ccw = fcp->cpaddr;
2760
2761         switch (intensity & ~0x08) {
2762         case 0x00: /* Normal format. */
2763                 if (use_prefix) {
2764                         prefix(ccw++, (struct PFX_eckd_data *) data,
2765                                fdata->start_unit, fdata->stop_unit,
2766                                DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2767                         /* grant subsystem permission to format R0 */
2768                         if (r0_perm)
2769                                 ((struct PFX_eckd_data *)data)
2770                                         ->define_extent.ga_extended |= 0x04;
2771                         data += sizeof(struct PFX_eckd_data);
2772                 } else {
2773                         define_extent(ccw++, (struct DE_eckd_data *) data,
2774                                       fdata->start_unit, fdata->stop_unit,
2775                                       DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2776                         /* grant subsystem permission to format R0 */
2777                         if (r0_perm)
2778                                 ((struct DE_eckd_data *) data)
2779                                         ->ga_extended |= 0x04;
2780                         data += sizeof(struct DE_eckd_data);
2781                 }
2782                 ccw[-1].flags |= CCW_FLAG_CC;
2783                 locate_record(ccw++, (struct LO_eckd_data *) data,
2784                               fdata->start_unit, 0, rpt*nr_tracks,
2785                               DASD_ECKD_CCW_WRITE_CKD, base,
2786                               fdata->blksize);
2787                 data += sizeof(struct LO_eckd_data);
2788                 break;
2789         case 0x01: /* Write record zero + format track. */
2790                 if (use_prefix) {
2791                         prefix(ccw++, (struct PFX_eckd_data *) data,
2792                                fdata->start_unit, fdata->stop_unit,
2793                                DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2794                                base, startdev);
2795                         data += sizeof(struct PFX_eckd_data);
2796                 } else {
2797                         define_extent(ccw++, (struct DE_eckd_data *) data,
2798                                fdata->start_unit, fdata->stop_unit,
2799                                DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2800                         data += sizeof(struct DE_eckd_data);
2801                 }
2802                 ccw[-1].flags |= CCW_FLAG_CC;
2803                 locate_record(ccw++, (struct LO_eckd_data *) data,
2804                               fdata->start_unit, 0, rpt * nr_tracks + 1,
2805                               DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2806                               base->block->bp_block);
2807                 data += sizeof(struct LO_eckd_data);
2808                 break;
2809         case 0x04: /* Invalidate track. */
2810                 if (use_prefix) {
2811                         prefix(ccw++, (struct PFX_eckd_data *) data,
2812                                fdata->start_unit, fdata->stop_unit,
2813                                DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2814                         data += sizeof(struct PFX_eckd_data);
2815                 } else {
2816                         define_extent(ccw++, (struct DE_eckd_data *) data,
2817                                fdata->start_unit, fdata->stop_unit,
2818                                DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2819                         data += sizeof(struct DE_eckd_data);
2820                 }
2821                 ccw[-1].flags |= CCW_FLAG_CC;
2822                 locate_record(ccw++, (struct LO_eckd_data *) data,
2823                               fdata->start_unit, 0, 1,
2824                               DASD_ECKD_CCW_WRITE_CKD, base, 8);
2825                 data += sizeof(struct LO_eckd_data);
2826                 break;
2827         }
2828
2829         for (j = 0; j < nr_tracks; j++) {
2830                 /* calculate cylinder and head for the current track */
2831                 set_ch_t(&address,
2832                          (fdata->start_unit + j) /
2833                          base_priv->rdc_data.trk_per_cyl,
2834                          (fdata->start_unit + j) %
2835                          base_priv->rdc_data.trk_per_cyl);
2836                 if (intensity & 0x01) { /* write record zero */
2837                         ect = (struct eckd_count *) data;
2838                         data += sizeof(struct eckd_count);
2839                         ect->cyl = address.cyl;
2840                         ect->head = address.head;
2841                         ect->record = 0;
2842                         ect->kl = 0;
2843                         ect->dl = 8;
2844                         ccw[-1].flags |= CCW_FLAG_CC;
2845                         ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2846                         ccw->flags = CCW_FLAG_SLI;
2847                         ccw->count = 8;
2848                         ccw->cda = (__u32)virt_to_phys(ect);
2849                         ccw++;
2850                 }
2851                 if ((intensity & ~0x08) & 0x04) {       /* erase track */
2852                         ect = (struct eckd_count *) data;
2853                         data += sizeof(struct eckd_count);
2854                         ect->cyl = address.cyl;
2855                         ect->head = address.head;
2856                         ect->record = 1;
2857                         ect->kl = 0;
2858                         ect->dl = 0;
2859                         ccw[-1].flags |= CCW_FLAG_CC;
2860                         ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2861                         ccw->flags = CCW_FLAG_SLI;
2862                         ccw->count = 8;
2863                         ccw->cda = (__u32)virt_to_phys(ect);
2864                 } else {                /* write remaining records */
2865                         for (i = 0; i < rpt; i++) {
2866                                 ect = (struct eckd_count *) data;
2867                                 data += sizeof(struct eckd_count);
2868                                 ect->cyl = address.cyl;
2869                                 ect->head = address.head;
2870                                 ect->record = i + 1;
2871                                 ect->kl = 0;
2872                                 ect->dl = fdata->blksize;
2873                                 /*
2874                                  * Check for special tracks 0-1
2875                                  * when formatting CDL
2876                                  */
2877                                 if ((intensity & 0x08) &&
2878                                     address.cyl == 0 && address.head == 0) {
2879                                         if (i < 3) {
2880                                                 ect->kl = 4;
2881                                                 ect->dl = sizes_trk0[i] - 4;
2882                                         }
2883                                 }
2884                                 if ((intensity & 0x08) &&
2885                                     address.cyl == 0 && address.head == 1) {
2886                                         ect->kl = 44;
2887                                         ect->dl = LABEL_SIZE - 44;
2888                                 }
2889                                 ccw[-1].flags |= CCW_FLAG_CC;
2890                                 if (i != 0 || j == 0)
2891                                         ccw->cmd_code =
2892                                                 DASD_ECKD_CCW_WRITE_CKD;
2893                                 else
2894                                         ccw->cmd_code =
2895                                                 DASD_ECKD_CCW_WRITE_CKD_MT;
2896                                 ccw->flags = CCW_FLAG_SLI;
2897                                 ccw->count = 8;
2898                                 ccw->cda = (__u32)virt_to_phys(ect);
2899                                 ccw++;
2900                         }
2901                 }
2902         }
2903
2904         fcp->startdev = startdev;
2905         fcp->memdev = startdev;
2906         fcp->basedev = base;
2907         fcp->retries = 256;
2908         fcp->expires = startdev->default_expires * HZ;
2909         fcp->buildclk = get_tod_clock();
2910         fcp->status = DASD_CQR_FILLED;
2911
2912         return fcp;
2913 }
2914
2915 /*
2916  * Wrapper function to build a CCW request depending on input data
2917  */
2918 static struct dasd_ccw_req *
2919 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2920                                struct format_data_t *fdata, int enable_pav,
2921                                int tpm, struct eckd_count *fmt_buffer, int rpt)
2922 {
2923         struct dasd_ccw_req *ccw_req;
2924
2925         if (!fmt_buffer) {
2926                 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2927         } else {
2928                 if (tpm)
2929                         ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2930                                                             enable_pav,
2931                                                             fmt_buffer, rpt);
2932                 else
2933                         ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2934                                                         fmt_buffer, rpt);
2935         }
2936
2937         return ccw_req;
2938 }
2939
2940 /*
2941  * Sanity checks on format_data
2942  */
2943 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2944                                           struct format_data_t *fdata)
2945 {
2946         struct dasd_eckd_private *private = base->private;
2947
2948         if (fdata->start_unit >=
2949             (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2950                 dev_warn(&base->cdev->dev,
2951                          "Start track number %u used in formatting is too big\n",
2952                          fdata->start_unit);
2953                 return -EINVAL;
2954         }
2955         if (fdata->stop_unit >=
2956             (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2957                 dev_warn(&base->cdev->dev,
2958                          "Stop track number %u used in formatting is too big\n",
2959                          fdata->stop_unit);
2960                 return -EINVAL;
2961         }
2962         if (fdata->start_unit > fdata->stop_unit) {
2963                 dev_warn(&base->cdev->dev,
2964                          "Start track %u used in formatting exceeds end track\n",
2965                          fdata->start_unit);
2966                 return -EINVAL;
2967         }
2968         if (dasd_check_blocksize(fdata->blksize) != 0) {
2969                 dev_warn(&base->cdev->dev,
2970                          "The DASD cannot be formatted with block size %u\n",
2971                          fdata->blksize);
2972                 return -EINVAL;
2973         }
2974         return 0;
2975 }
2976
2977 /*
2978  * This function will process format_data originally coming from an IOCTL
2979  */
2980 static int dasd_eckd_format_process_data(struct dasd_device *base,
2981                                          struct format_data_t *fdata,
2982                                          int enable_pav, int tpm,
2983                                          struct eckd_count *fmt_buffer, int rpt,
2984                                          struct irb *irb)
2985 {
2986         struct dasd_eckd_private *private = base->private;
2987         struct dasd_ccw_req *cqr, *n;
2988         struct list_head format_queue;
2989         struct dasd_device *device;
2990         char *sense = NULL;
2991         int old_start, old_stop, format_step;
2992         int step, retry;
2993         int rc;
2994
2995         rc = dasd_eckd_format_sanity_checks(base, fdata);
2996         if (rc)
2997                 return rc;
2998
2999         INIT_LIST_HEAD(&format_queue);
3000
3001         old_start = fdata->start_unit;
3002         old_stop = fdata->stop_unit;
3003
3004         if (!tpm && fmt_buffer != NULL) {
3005                 /* Command Mode / Format Check */
3006                 format_step = 1;
3007         } else if (tpm && fmt_buffer != NULL) {
3008                 /* Transport Mode / Format Check */
3009                 format_step = DASD_CQR_MAX_CCW / rpt;
3010         } else {
3011                 /* Normal Formatting */
3012                 format_step = DASD_CQR_MAX_CCW /
3013                         recs_per_track(&private->rdc_data, 0, fdata->blksize);
3014         }
3015
3016         do {
3017                 retry = 0;
3018                 while (fdata->start_unit <= old_stop) {
3019                         step = fdata->stop_unit - fdata->start_unit + 1;
3020                         if (step > format_step) {
3021                                 fdata->stop_unit =
3022                                         fdata->start_unit + format_step - 1;
3023                         }
3024
3025                         cqr = dasd_eckd_format_build_ccw_req(base, fdata,
3026                                                              enable_pav, tpm,
3027                                                              fmt_buffer, rpt);
3028                         if (IS_ERR(cqr)) {
3029                                 rc = PTR_ERR(cqr);
3030                                 if (rc == -ENOMEM) {
3031                                         if (list_empty(&format_queue))
3032                                                 goto out;
3033                                         /*
3034                                          * not enough memory available, start
3035                                          * requests retry after first requests
3036                                          * were finished
3037                                          */
3038                                         retry = 1;
3039                                         break;
3040                                 }
3041                                 goto out_err;
3042                         }
3043                         list_add_tail(&cqr->blocklist, &format_queue);
3044
3045                         if (fmt_buffer) {
3046                                 step = fdata->stop_unit - fdata->start_unit + 1;
3047                                 fmt_buffer += rpt * step;
3048                         }
3049                         fdata->start_unit = fdata->stop_unit + 1;
3050                         fdata->stop_unit = old_stop;
3051                 }
3052
3053                 rc = dasd_sleep_on_queue(&format_queue);
3054
3055 out_err:
3056                 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3057                         device = cqr->startdev;
3058                         private = device->private;
3059
3060                         if (cqr->status == DASD_CQR_FAILED) {
3061                                 /*
3062                                  * Only get sense data if called by format
3063                                  * check
3064                                  */
3065                                 if (fmt_buffer && irb) {
3066                                         sense = dasd_get_sense(&cqr->irb);
3067                                         memcpy(irb, &cqr->irb, sizeof(*irb));
3068                                 }
3069                                 rc = -EIO;
3070                         }
3071                         list_del_init(&cqr->blocklist);
3072                         dasd_ffree_request(cqr, device);
3073                         private->count--;
3074                 }
3075
3076                 if (rc && rc != -EIO)
3077                         goto out;
3078                 if (rc == -EIO) {
3079                         /*
3080                          * In case fewer than the expected records are on the
3081                          * track, we will most likely get a 'No Record Found'
3082                          * error (in command mode) or a 'File Protected' error
3083                          * (in transport mode). Those particular cases shouldn't
3084                          * pass the -EIO to the IOCTL, therefore reset the rc
3085                          * and continue.
3086                          */
3087                         if (sense &&
3088                             (sense[1] & SNS1_NO_REC_FOUND ||
3089                              sense[1] & SNS1_FILE_PROTECTED))
3090                                 retry = 1;
3091                         else
3092                                 goto out;
3093                 }
3094
3095         } while (retry);
3096
3097 out:
3098         fdata->start_unit = old_start;
3099         fdata->stop_unit = old_stop;
3100
3101         return rc;
3102 }
3103
3104 static int dasd_eckd_format_device(struct dasd_device *base,
3105                                    struct format_data_t *fdata, int enable_pav)
3106 {
3107         return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3108                                              0, NULL);
3109 }
3110
3111 static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3112                                       struct dasd_ccw_req *cqr)
3113 {
3114         struct dasd_block *block = cqr->block;
3115         struct dasd_format_entry *format;
3116         unsigned long flags;
3117         bool rc = false;
3118
3119         spin_lock_irqsave(&block->format_lock, flags);
3120         if (cqr->trkcount != atomic_read(&block->trkcount)) {
3121                 /*
3122                  * The number of formatted tracks has changed after request
3123                  * start and we can not tell if the current track was involved.
3124                  * To avoid data corruption treat it as if the current track is
3125                  * involved
3126                  */
3127                 rc = true;
3128                 goto out;
3129         }
3130         list_for_each_entry(format, &block->format_list, list) {
3131                 if (format->track == to_format->track) {
3132                         rc = true;
3133                         goto out;
3134                 }
3135         }
3136         list_add_tail(&to_format->list, &block->format_list);
3137
3138 out:
3139         spin_unlock_irqrestore(&block->format_lock, flags);
3140         return rc;
3141 }
3142
3143 static void clear_format_track(struct dasd_format_entry *format,
3144                               struct dasd_block *block)
3145 {
3146         unsigned long flags;
3147
3148         spin_lock_irqsave(&block->format_lock, flags);
3149         atomic_inc(&block->trkcount);
3150         list_del_init(&format->list);
3151         spin_unlock_irqrestore(&block->format_lock, flags);
3152 }
3153
3154 /*
3155  * Callback function to free ESE format requests.
3156  */
3157 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3158 {
3159         struct dasd_device *device = cqr->startdev;
3160         struct dasd_eckd_private *private = device->private;
3161         struct dasd_format_entry *format = data;
3162
3163         clear_format_track(format, cqr->basedev->block);
3164         private->count--;
3165         dasd_ffree_request(cqr, device);
3166 }
3167
3168 static struct dasd_ccw_req *
3169 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3170                      struct irb *irb)
3171 {
3172         struct dasd_eckd_private *private;
3173         struct dasd_format_entry *format;
3174         struct format_data_t fdata;
3175         unsigned int recs_per_trk;
3176         struct dasd_ccw_req *fcqr;
3177         struct dasd_device *base;
3178         struct dasd_block *block;
3179         unsigned int blksize;
3180         struct request *req;
3181         sector_t first_trk;
3182         sector_t last_trk;
3183         sector_t curr_trk;
3184         int rc;
3185
3186         req = dasd_get_callback_data(cqr);
3187         block = cqr->block;
3188         base = block->base;
3189         private = base->private;
3190         blksize = block->bp_block;
3191         recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3192         format = &startdev->format_entry;
3193
3194         first_trk = blk_rq_pos(req) >> block->s2b_shift;
3195         sector_div(first_trk, recs_per_trk);
3196         last_trk =
3197                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3198         sector_div(last_trk, recs_per_trk);
3199         rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3200         if (rc)
3201                 return ERR_PTR(rc);
3202
3203         if (curr_trk < first_trk || curr_trk > last_trk) {
3204                 DBF_DEV_EVENT(DBF_WARNING, startdev,
3205                               "ESE error track %llu not within range %llu - %llu\n",
3206                               curr_trk, first_trk, last_trk);
3207                 return ERR_PTR(-EINVAL);
3208         }
3209         format->track = curr_trk;
3210         /* test if track is already in formatting by another thread */
3211         if (test_and_set_format_track(format, cqr)) {
3212                 /* this is no real error so do not count down retries */
3213                 cqr->retries++;
3214                 return ERR_PTR(-EEXIST);
3215         }
3216
3217         fdata.start_unit = curr_trk;
3218         fdata.stop_unit = curr_trk;
3219         fdata.blksize = blksize;
3220         fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3221
3222         rc = dasd_eckd_format_sanity_checks(base, &fdata);
3223         if (rc)
3224                 return ERR_PTR(-EINVAL);
3225
3226         /*
3227          * We're building the request with PAV disabled as we're reusing
3228          * the former startdev.
3229          */
3230         fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3231         if (IS_ERR(fcqr))
3232                 return fcqr;
3233
3234         fcqr->callback = dasd_eckd_ese_format_cb;
3235         fcqr->callback_data = (void *) format;
3236
3237         return fcqr;
3238 }
3239
3240 /*
3241  * When data is read from an unformatted area of an ESE volume, this function
3242  * returns zeroed data and thereby mimics a read of zero data.
3243  *
3244  * The first unformatted track is the one that got the NRF error, the address is
3245  * encoded in the sense data.
3246  *
3247  * All tracks before have returned valid data and should not be touched.
3248  * All tracks after the unformatted track might be formatted or not. This is
3249  * currently not known, remember the processed data and return the remainder of
3250  * the request to the blocklayer in __dasd_cleanup_cqr().
3251  */
3252 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3253 {
3254         struct dasd_eckd_private *private;
3255         sector_t first_trk, last_trk;
3256         sector_t first_blk, last_blk;
3257         unsigned int blksize, off;
3258         unsigned int recs_per_trk;
3259         struct dasd_device *base;
3260         struct req_iterator iter;
3261         struct dasd_block *block;
3262         unsigned int skip_block;
3263         unsigned int blk_count;
3264         struct request *req;
3265         struct bio_vec bv;
3266         sector_t curr_trk;
3267         sector_t end_blk;
3268         char *dst;
3269         int rc;
3270
3271         req = (struct request *) cqr->callback_data;
3272         base = cqr->block->base;
3273         blksize = base->block->bp_block;
3274         block =  cqr->block;
3275         private = base->private;
3276         skip_block = 0;
3277         blk_count = 0;
3278
3279         recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3280         first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3281         sector_div(first_trk, recs_per_trk);
3282         last_trk = last_blk =
3283                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3284         sector_div(last_trk, recs_per_trk);
3285         rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3286         if (rc)
3287                 return rc;
3288
3289         /* sanity check if the current track from sense data is valid */
3290         if (curr_trk < first_trk || curr_trk > last_trk) {
3291                 DBF_DEV_EVENT(DBF_WARNING, base,
3292                               "ESE error track %llu not within range %llu - %llu\n",
3293                               curr_trk, first_trk, last_trk);
3294                 return -EINVAL;
3295         }
3296
3297         /*
3298          * if not the first track got the NRF error we have to skip over valid
3299          * blocks
3300          */
3301         if (curr_trk != first_trk)
3302                 skip_block = curr_trk * recs_per_trk - first_blk;
3303
3304         /* we have no information beyond the current track */
3305         end_blk = (curr_trk + 1) * recs_per_trk;
3306
3307         rq_for_each_segment(bv, req, iter) {
3308                 dst = bvec_virt(&bv);
3309                 for (off = 0; off < bv.bv_len; off += blksize) {
3310                         if (first_blk + blk_count >= end_blk) {
3311                                 cqr->proc_bytes = blk_count * blksize;
3312                                 return 0;
3313                         }
3314                         if (dst && !skip_block)
3315                                 memset(dst, 0, blksize);
3316                         else
3317                                 skip_block--;
3318                         dst += blksize;
3319                         blk_count++;
3320                 }
3321         }
3322         return 0;
3323 }
3324
3325 /*
3326  * Helper function to count consecutive records of a single track.
3327  */
3328 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3329                                    int max)
3330 {
3331         int head;
3332         int i;
3333
3334         head = fmt_buffer[start].head;
3335
3336         /*
3337          * There are 3 conditions where we stop counting:
3338          * - if data reoccurs (same head and record may reoccur), which may
3339          *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
3340          * - when the head changes, because we're iterating over several tracks
3341          *   then (DASD_ECKD_CCW_READ_COUNT_MT)
3342          * - when we've reached the end of sensible data in the buffer (the
3343          *   record will be 0 then)
3344          */
3345         for (i = start; i < max; i++) {
3346                 if (i > start) {
3347                         if ((fmt_buffer[i].head == head &&
3348                             fmt_buffer[i].record == 1) ||
3349                             fmt_buffer[i].head != head ||
3350                             fmt_buffer[i].record == 0)
3351                                 break;
3352                 }
3353         }
3354
3355         return i - start;
3356 }
3357
3358 /*
3359  * Evaluate a given range of tracks. Data like number of records, blocksize,
3360  * record ids, and key length are compared with expected data.
3361  *
3362  * If a mismatch occurs, the corresponding error bit is set, as well as
3363  * additional information, depending on the error.
3364  */
3365 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3366                                              struct format_check_t *cdata,
3367                                              int rpt_max, int rpt_exp,
3368                                              int trk_per_cyl, int tpm)
3369 {
3370         struct ch_t geo;
3371         int max_entries;
3372         int count = 0;
3373         int trkcount;
3374         int blksize;
3375         int pos = 0;
3376         int i, j;
3377         int kl;
3378
3379         trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3380         max_entries = trkcount * rpt_max;
3381
3382         for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3383                 /* Calculate the correct next starting position in the buffer */
3384                 if (tpm) {
3385                         while (fmt_buffer[pos].record == 0 &&
3386                                fmt_buffer[pos].dl == 0) {
3387                                 if (pos++ > max_entries)
3388                                         break;
3389                         }
3390                 } else {
3391                         if (i != cdata->expect.start_unit)
3392                                 pos += rpt_max - count;
3393                 }
3394
3395                 /* Calculate the expected geo values for the current track */
3396                 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3397
3398                 /* Count and check number of records */
3399                 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3400
3401                 if (count < rpt_exp) {
3402                         cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3403                         break;
3404                 }
3405                 if (count > rpt_exp) {
3406                         cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3407                         break;
3408                 }
3409
3410                 for (j = 0; j < count; j++, pos++) {
3411                         blksize = cdata->expect.blksize;
3412                         kl = 0;
3413
3414                         /*
3415                          * Set special values when checking CDL formatted
3416                          * devices.
3417                          */
3418                         if ((cdata->expect.intensity & 0x08) &&
3419                             geo.cyl == 0 && geo.head == 0) {
3420                                 if (j < 3) {
3421                                         blksize = sizes_trk0[j] - 4;
3422                                         kl = 4;
3423                                 }
3424                         }
3425                         if ((cdata->expect.intensity & 0x08) &&
3426                             geo.cyl == 0 && geo.head == 1) {
3427                                 blksize = LABEL_SIZE - 44;
3428                                 kl = 44;
3429                         }
3430
3431                         /* Check blocksize */
3432                         if (fmt_buffer[pos].dl != blksize) {
3433                                 cdata->result = DASD_FMT_ERR_BLKSIZE;
3434                                 goto out;
3435                         }
3436                         /* Check if key length is 0 */
3437                         if (fmt_buffer[pos].kl != kl) {
3438                                 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3439                                 goto out;
3440                         }
3441                         /* Check if record_id is correct */
3442                         if (fmt_buffer[pos].cyl != geo.cyl ||
3443                             fmt_buffer[pos].head != geo.head ||
3444                             fmt_buffer[pos].record != (j + 1)) {
3445                                 cdata->result = DASD_FMT_ERR_RECORD_ID;
3446                                 goto out;
3447                         }
3448                 }
3449         }
3450
3451 out:
3452         /*
3453          * In case of no errors, we need to decrease by one
3454          * to get the correct positions.
3455          */
3456         if (!cdata->result) {
3457                 i--;
3458                 pos--;
3459         }
3460
3461         cdata->unit = i;
3462         cdata->num_records = count;
3463         cdata->rec = fmt_buffer[pos].record;
3464         cdata->blksize = fmt_buffer[pos].dl;
3465         cdata->key_length = fmt_buffer[pos].kl;
3466 }
3467
3468 /*
3469  * Check the format of a range of tracks of a DASD.
3470  */
3471 static int dasd_eckd_check_device_format(struct dasd_device *base,
3472                                          struct format_check_t *cdata,
3473                                          int enable_pav)
3474 {
3475         struct dasd_eckd_private *private = base->private;
3476         struct eckd_count *fmt_buffer;
3477         struct irb irb;
3478         int rpt_max, rpt_exp;
3479         int fmt_buffer_size;
3480         int trk_per_cyl;
3481         int trkcount;
3482         int tpm = 0;
3483         int rc;
3484
3485         trk_per_cyl = private->rdc_data.trk_per_cyl;
3486
3487         /* Get maximum and expected amount of records per track */
3488         rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3489         rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3490
3491         trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3492         fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3493
3494         fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3495         if (!fmt_buffer)
3496                 return -ENOMEM;
3497
3498         /*
3499          * A certain FICON feature subset is needed to operate in transport
3500          * mode. Additionally, the support for transport mode is implicitly
3501          * checked by comparing the buffer size with fcx_max_data. As long as
3502          * the buffer size is smaller we can operate in transport mode and
3503          * process multiple tracks. If not, only one track at once is being
3504          * processed using command mode.
3505          */
3506         if ((private->features.feature[40] & 0x04) &&
3507             fmt_buffer_size <= private->fcx_max_data)
3508                 tpm = 1;
3509
3510         rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3511                                            tpm, fmt_buffer, rpt_max, &irb);
3512         if (rc && rc != -EIO)
3513                 goto out;
3514         if (rc == -EIO) {
3515                 /*
3516                  * If our first attempt with transport mode enabled comes back
3517                  * with an incorrect length error, we're going to retry the
3518                  * check with command mode.
3519                  */
3520                 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3521                         tpm = 0;
3522                         rc = dasd_eckd_format_process_data(base, &cdata->expect,
3523                                                            enable_pav, tpm,
3524                                                            fmt_buffer, rpt_max,
3525                                                            &irb);
3526                         if (rc)
3527                                 goto out;
3528                 } else {
3529                         goto out;
3530                 }
3531         }
3532
3533         dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3534                                          trk_per_cyl, tpm);
3535
3536 out:
3537         kfree(fmt_buffer);
3538
3539         return rc;
3540 }
3541
3542 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3543 {
3544         if (cqr->retries < 0) {
3545                 cqr->status = DASD_CQR_FAILED;
3546                 return;
3547         }
3548         cqr->status = DASD_CQR_FILLED;
3549         if (cqr->block && (cqr->startdev != cqr->block->base)) {
3550                 dasd_eckd_reset_ccw_to_base_io(cqr);
3551                 cqr->startdev = cqr->block->base;
3552                 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3553         }
3554 };
3555
3556 static dasd_erp_fn_t
3557 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3558 {
3559         struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3560         struct ccw_device *cdev = device->cdev;
3561
3562         switch (cdev->id.cu_type) {
3563         case 0x3990:
3564         case 0x2105:
3565         case 0x2107:
3566         case 0x1750:
3567                 return dasd_3990_erp_action;
3568         case 0x9343:
3569         case 0x3880:
3570         default:
3571                 return dasd_default_erp_action;
3572         }
3573 }
3574
3575 static dasd_erp_fn_t
3576 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3577 {
3578         return dasd_default_erp_postaction;
3579 }
3580
3581 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3582                                               struct dasd_ccw_req *cqr,
3583                                               struct irb *irb)
3584 {
3585         char mask;
3586         char *sense = NULL;
3587         struct dasd_eckd_private *private = device->private;
3588
3589         /* first of all check for state change pending interrupt */
3590         mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3591         if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3592                 /*
3593                  * for alias only, not in offline processing
3594                  * and only if not suspended
3595                  */
3596                 if (!device->block && private->lcu &&
3597                     device->state == DASD_STATE_ONLINE &&
3598                     !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3599                     !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3600                         /* schedule worker to reload device */
3601                         dasd_reload_device(device);
3602                 }
3603                 dasd_generic_handle_state_change(device);
3604                 return;
3605         }
3606
3607         sense = dasd_get_sense(irb);
3608         if (!sense)
3609                 return;
3610
3611         /* summary unit check */
3612         if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3613             (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3614                 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3615                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3616                                       "eckd suc: device already notified");
3617                         return;
3618                 }
3619                 sense = dasd_get_sense(irb);
3620                 if (!sense) {
3621                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3622                                       "eckd suc: no reason code available");
3623                         clear_bit(DASD_FLAG_SUC, &device->flags);
3624                         return;
3625
3626                 }
3627                 private->suc_reason = sense[8];
3628                 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3629                               "eckd handle summary unit check: reason",
3630                               private->suc_reason);
3631                 dasd_get_device(device);
3632                 if (!schedule_work(&device->suc_work))
3633                         dasd_put_device(device);
3634
3635                 return;
3636         }
3637
3638         /* service information message SIM */
3639         if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3640             ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3641                 dasd_3990_erp_handle_sim(device, sense);
3642                 return;
3643         }
3644
3645         /* loss of device reservation is handled via base devices only
3646          * as alias devices may be used with several bases
3647          */
3648         if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3649             (sense[7] == 0x3F) &&
3650             (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3651             test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3652                 if (device->features & DASD_FEATURE_FAILONSLCK)
3653                         set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3654                 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3655                 dev_err(&device->cdev->dev,
3656                         "The device reservation was lost\n");
3657         }
3658 }
3659
3660 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3661                                        unsigned int first_trk,
3662                                        unsigned int last_trk)
3663 {
3664         struct dasd_eckd_private *private = device->private;
3665         unsigned int trks_per_vol;
3666         int rc = 0;
3667
3668         trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3669
3670         if (first_trk >= trks_per_vol) {
3671                 dev_warn(&device->cdev->dev,
3672                          "Start track number %u used in the space release command is too big\n",
3673                          first_trk);
3674                 rc = -EINVAL;
3675         } else if (last_trk >= trks_per_vol) {
3676                 dev_warn(&device->cdev->dev,
3677                          "Stop track number %u used in the space release command is too big\n",
3678                          last_trk);
3679                 rc = -EINVAL;
3680         } else if (first_trk > last_trk) {
3681                 dev_warn(&device->cdev->dev,
3682                          "Start track %u used in the space release command exceeds the end track\n",
3683                          first_trk);
3684                 rc = -EINVAL;
3685         }
3686         return rc;
3687 }
3688
3689 /*
3690  * Helper function to count the amount of involved extents within a given range
3691  * with extent alignment in mind.
3692  */
3693 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3694 {
3695         int cur_pos = 0;
3696         int count = 0;
3697         int tmp;
3698
3699         if (from == to)
3700                 return 1;
3701
3702         /* Count first partial extent */
3703         if (from % trks_per_ext != 0) {
3704                 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3705                 if (tmp > to)
3706                         tmp = to;
3707                 cur_pos = tmp - from + 1;
3708                 count++;
3709         }
3710         /* Count full extents */
3711         if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3712                 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3713                 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3714                 cur_pos = tmp;
3715         }
3716         /* Count last partial extent */
3717         if (cur_pos < to)
3718                 count++;
3719
3720         return count;
3721 }
3722
3723 static int dasd_in_copy_relation(struct dasd_device *device)
3724 {
3725         struct dasd_pprc_data_sc4 *temp;
3726         int rc;
3727
3728         if (!dasd_eckd_pprc_enabled(device))
3729                 return 0;
3730
3731         temp = kzalloc(sizeof(*temp), GFP_KERNEL);
3732         if (!temp)
3733                 return -ENOMEM;
3734
3735         rc = dasd_eckd_query_pprc_status(device, temp);
3736         if (!rc)
3737                 rc = temp->dev_info[0].state;
3738
3739         kfree(temp);
3740         return rc;
3741 }
3742
3743 /*
3744  * Release allocated space for a given range or an entire volume.
3745  */
3746 static struct dasd_ccw_req *
3747 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3748                   struct request *req, unsigned int first_trk,
3749                   unsigned int last_trk, int by_extent)
3750 {
3751         struct dasd_eckd_private *private = device->private;
3752         struct dasd_dso_ras_ext_range *ras_range;
3753         struct dasd_rssd_features *features;
3754         struct dasd_dso_ras_data *ras_data;
3755         u16 heads, beg_head, end_head;
3756         int cur_to_trk, cur_from_trk;
3757         struct dasd_ccw_req *cqr;
3758         u32 beg_cyl, end_cyl;
3759         int copy_relation;
3760         struct ccw1 *ccw;
3761         int trks_per_ext;
3762         size_t ras_size;
3763         size_t size;
3764         int nr_exts;
3765         void *rq;
3766         int i;
3767
3768         if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3769                 return ERR_PTR(-EINVAL);
3770
3771         copy_relation = dasd_in_copy_relation(device);
3772         if (copy_relation < 0)
3773                 return ERR_PTR(copy_relation);
3774
3775         rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3776
3777         features = &private->features;
3778
3779         trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3780         nr_exts = 0;
3781         if (by_extent)
3782                 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3783         ras_size = sizeof(*ras_data);
3784         size = ras_size + (nr_exts * sizeof(*ras_range));
3785
3786         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3787         if (IS_ERR(cqr)) {
3788                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3789                                 "Could not allocate RAS request");
3790                 return cqr;
3791         }
3792
3793         ras_data = cqr->data;
3794         memset(ras_data, 0, size);
3795
3796         ras_data->order = DSO_ORDER_RAS;
3797         ras_data->flags.vol_type = 0; /* CKD volume */
3798         /* Release specified extents or entire volume */
3799         ras_data->op_flags.by_extent = by_extent;
3800         /*
3801          * This bit guarantees initialisation of tracks within an extent that is
3802          * not fully specified, but is only supported with a certain feature
3803          * subset and for devices not in a copy relation.
3804          */
3805         if (features->feature[56] & 0x01 && !copy_relation)
3806                 ras_data->op_flags.guarantee_init = 1;
3807
3808         ras_data->lss = private->conf.ned->ID;
3809         ras_data->dev_addr = private->conf.ned->unit_addr;
3810         ras_data->nr_exts = nr_exts;
3811
3812         if (by_extent) {
3813                 heads = private->rdc_data.trk_per_cyl;
3814                 cur_from_trk = first_trk;
3815                 cur_to_trk = first_trk + trks_per_ext -
3816                         (first_trk % trks_per_ext) - 1;
3817                 if (cur_to_trk > last_trk)
3818                         cur_to_trk = last_trk;
3819                 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3820
3821                 for (i = 0; i < nr_exts; i++) {
3822                         beg_cyl = cur_from_trk / heads;
3823                         beg_head = cur_from_trk % heads;
3824                         end_cyl = cur_to_trk / heads;
3825                         end_head = cur_to_trk % heads;
3826
3827                         set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3828                         set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3829
3830                         cur_from_trk = cur_to_trk + 1;
3831                         cur_to_trk = cur_from_trk + trks_per_ext - 1;
3832                         if (cur_to_trk > last_trk)
3833                                 cur_to_trk = last_trk;
3834                         ras_range++;
3835                 }
3836         }
3837
3838         ccw = cqr->cpaddr;
3839         ccw->cda = (__u32)virt_to_phys(cqr->data);
3840         ccw->cmd_code = DASD_ECKD_CCW_DSO;
3841         ccw->count = size;
3842
3843         cqr->startdev = device;
3844         cqr->memdev = device;
3845         cqr->block = block;
3846         cqr->retries = 256;
3847         cqr->expires = device->default_expires * HZ;
3848         cqr->buildclk = get_tod_clock();
3849         cqr->status = DASD_CQR_FILLED;
3850
3851         return cqr;
3852 }
3853
3854 static int dasd_eckd_release_space_full(struct dasd_device *device)
3855 {
3856         struct dasd_ccw_req *cqr;
3857         int rc;
3858
3859         cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3860         if (IS_ERR(cqr))
3861                 return PTR_ERR(cqr);
3862
3863         rc = dasd_sleep_on_interruptible(cqr);
3864
3865         dasd_sfree_request(cqr, cqr->memdev);
3866
3867         return rc;
3868 }
3869
3870 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3871                                         unsigned int from, unsigned int to)
3872 {
3873         struct dasd_eckd_private *private = device->private;
3874         struct dasd_block *block = device->block;
3875         struct dasd_ccw_req *cqr, *n;
3876         struct list_head ras_queue;
3877         unsigned int device_exts;
3878         int trks_per_ext;
3879         int stop, step;
3880         int cur_pos;
3881         int rc = 0;
3882         int retry;
3883
3884         INIT_LIST_HEAD(&ras_queue);
3885
3886         device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3887         trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3888
3889         /* Make sure device limits are not exceeded */
3890         step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3891         cur_pos = from;
3892
3893         do {
3894                 retry = 0;
3895                 while (cur_pos < to) {
3896                         stop = cur_pos + step -
3897                                 ((cur_pos + step) % trks_per_ext) - 1;
3898                         if (stop > to)
3899                                 stop = to;
3900
3901                         cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3902                         if (IS_ERR(cqr)) {
3903                                 rc = PTR_ERR(cqr);
3904                                 if (rc == -ENOMEM) {
3905                                         if (list_empty(&ras_queue))
3906                                                 goto out;
3907                                         retry = 1;
3908                                         break;
3909                                 }
3910                                 goto err_out;
3911                         }
3912
3913                         spin_lock_irq(&block->queue_lock);
3914                         list_add_tail(&cqr->blocklist, &ras_queue);
3915                         spin_unlock_irq(&block->queue_lock);
3916                         cur_pos = stop + 1;
3917                 }
3918
3919                 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3920
3921 err_out:
3922                 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3923                         device = cqr->startdev;
3924                         private = device->private;
3925
3926                         spin_lock_irq(&block->queue_lock);
3927                         list_del_init(&cqr->blocklist);
3928                         spin_unlock_irq(&block->queue_lock);
3929                         dasd_sfree_request(cqr, device);
3930                         private->count--;
3931                 }
3932         } while (retry);
3933
3934 out:
3935         return rc;
3936 }
3937
3938 static int dasd_eckd_release_space(struct dasd_device *device,
3939                                    struct format_data_t *rdata)
3940 {
3941         if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3942                 return dasd_eckd_release_space_full(device);
3943         else if (rdata->intensity == 0)
3944                 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3945                                                     rdata->stop_unit);
3946         else
3947                 return -EINVAL;
3948 }
3949
3950 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3951                                                struct dasd_device *startdev,
3952                                                struct dasd_block *block,
3953                                                struct request *req,
3954                                                sector_t first_rec,
3955                                                sector_t last_rec,
3956                                                sector_t first_trk,
3957                                                sector_t last_trk,
3958                                                unsigned int first_offs,
3959                                                unsigned int last_offs,
3960                                                unsigned int blk_per_trk,
3961                                                unsigned int blksize)
3962 {
3963         struct dasd_eckd_private *private;
3964         unsigned long *idaws;
3965         struct LO_eckd_data *LO_data;
3966         struct dasd_ccw_req *cqr;
3967         struct ccw1 *ccw;
3968         struct req_iterator iter;
3969         struct bio_vec bv;
3970         char *dst;
3971         unsigned int off;
3972         int count, cidaw, cplength, datasize;
3973         sector_t recid;
3974         unsigned char cmd, rcmd;
3975         int use_prefix;
3976         struct dasd_device *basedev;
3977
3978         basedev = block->base;
3979         private = basedev->private;
3980         if (rq_data_dir(req) == READ)
3981                 cmd = DASD_ECKD_CCW_READ_MT;
3982         else if (rq_data_dir(req) == WRITE)
3983                 cmd = DASD_ECKD_CCW_WRITE_MT;
3984         else
3985                 return ERR_PTR(-EINVAL);
3986
3987         /* Check struct bio and count the number of blocks for the request. */
3988         count = 0;
3989         cidaw = 0;
3990         rq_for_each_segment(bv, req, iter) {
3991                 if (bv.bv_len & (blksize - 1))
3992                         /* Eckd can only do full blocks. */
3993                         return ERR_PTR(-EINVAL);
3994                 count += bv.bv_len >> (block->s2b_shift + 9);
3995                 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3996                         cidaw += bv.bv_len >> (block->s2b_shift + 9);
3997         }
3998         /* Paranoia. */
3999         if (count != last_rec - first_rec + 1)
4000                 return ERR_PTR(-EINVAL);
4001
4002         /* use the prefix command if available */
4003         use_prefix = private->features.feature[8] & 0x01;
4004         if (use_prefix) {
4005                 /* 1x prefix + number of blocks */
4006                 cplength = 2 + count;
4007                 /* 1x prefix + cidaws*sizeof(long) */
4008                 datasize = sizeof(struct PFX_eckd_data) +
4009                         sizeof(struct LO_eckd_data) +
4010                         cidaw * sizeof(unsigned long);
4011         } else {
4012                 /* 1x define extent + 1x locate record + number of blocks */
4013                 cplength = 2 + count;
4014                 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
4015                 datasize = sizeof(struct DE_eckd_data) +
4016                         sizeof(struct LO_eckd_data) +
4017                         cidaw * sizeof(unsigned long);
4018         }
4019         /* Find out the number of additional locate record ccws for cdl. */
4020         if (private->uses_cdl && first_rec < 2*blk_per_trk) {
4021                 if (last_rec >= 2*blk_per_trk)
4022                         count = 2*blk_per_trk - first_rec;
4023                 cplength += count;
4024                 datasize += count*sizeof(struct LO_eckd_data);
4025         }
4026         /* Allocate the ccw request. */
4027         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4028                                    startdev, blk_mq_rq_to_pdu(req));
4029         if (IS_ERR(cqr))
4030                 return cqr;
4031         ccw = cqr->cpaddr;
4032         /* First ccw is define extent or prefix. */
4033         if (use_prefix) {
4034                 if (prefix(ccw++, cqr->data, first_trk,
4035                            last_trk, cmd, basedev, startdev) == -EAGAIN) {
4036                         /* Clock not in sync and XRC is enabled.
4037                          * Try again later.
4038                          */
4039                         dasd_sfree_request(cqr, startdev);
4040                         return ERR_PTR(-EAGAIN);
4041                 }
4042                 idaws = (unsigned long *) (cqr->data +
4043                                            sizeof(struct PFX_eckd_data));
4044         } else {
4045                 if (define_extent(ccw++, cqr->data, first_trk,
4046                                   last_trk, cmd, basedev, 0) == -EAGAIN) {
4047                         /* Clock not in sync and XRC is enabled.
4048                          * Try again later.
4049                          */
4050                         dasd_sfree_request(cqr, startdev);
4051                         return ERR_PTR(-EAGAIN);
4052                 }
4053                 idaws = (unsigned long *) (cqr->data +
4054                                            sizeof(struct DE_eckd_data));
4055         }
4056         /* Build locate_record+read/write/ccws. */
4057         LO_data = (struct LO_eckd_data *) (idaws + cidaw);
4058         recid = first_rec;
4059         if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
4060                 /* Only standard blocks so there is just one locate record. */
4061                 ccw[-1].flags |= CCW_FLAG_CC;
4062                 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
4063                               last_rec - recid + 1, cmd, basedev, blksize);
4064         }
4065         rq_for_each_segment(bv, req, iter) {
4066                 dst = bvec_virt(&bv);
4067                 if (dasd_page_cache) {
4068                         char *copy = kmem_cache_alloc(dasd_page_cache,
4069                                                       GFP_DMA | __GFP_NOWARN);
4070                         if (copy && rq_data_dir(req) == WRITE)
4071                                 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
4072                         if (copy)
4073                                 dst = copy + bv.bv_offset;
4074                 }
4075                 for (off = 0; off < bv.bv_len; off += blksize) {
4076                         sector_t trkid = recid;
4077                         unsigned int recoffs = sector_div(trkid, blk_per_trk);
4078                         rcmd = cmd;
4079                         count = blksize;
4080                         /* Locate record for cdl special block ? */
4081                         if (private->uses_cdl && recid < 2*blk_per_trk) {
4082                                 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
4083                                         rcmd |= 0x8;
4084                                         count = dasd_eckd_cdl_reclen(recid);
4085                                         if (count < blksize &&
4086                                             rq_data_dir(req) == READ)
4087                                                 memset(dst + count, 0xe5,
4088                                                        blksize - count);
4089                                 }
4090                                 ccw[-1].flags |= CCW_FLAG_CC;
4091                                 locate_record(ccw++, LO_data++,
4092                                               trkid, recoffs + 1,
4093                                               1, rcmd, basedev, count);
4094                         }
4095                         /* Locate record for standard blocks ? */
4096                         if (private->uses_cdl && recid == 2*blk_per_trk) {
4097                                 ccw[-1].flags |= CCW_FLAG_CC;
4098                                 locate_record(ccw++, LO_data++,
4099                                               trkid, recoffs + 1,
4100                                               last_rec - recid + 1,
4101                                               cmd, basedev, count);
4102                         }
4103                         /* Read/write ccw. */
4104                         ccw[-1].flags |= CCW_FLAG_CC;
4105                         ccw->cmd_code = rcmd;
4106                         ccw->count = count;
4107                         if (idal_is_needed(dst, blksize)) {
4108                                 ccw->cda = (__u32)virt_to_phys(idaws);
4109                                 ccw->flags = CCW_FLAG_IDA;
4110                                 idaws = idal_create_words(idaws, dst, blksize);
4111                         } else {
4112                                 ccw->cda = (__u32)virt_to_phys(dst);
4113                                 ccw->flags = 0;
4114                         }
4115                         ccw++;
4116                         dst += blksize;
4117                         recid++;
4118                 }
4119         }
4120         if (blk_noretry_request(req) ||
4121             block->base->features & DASD_FEATURE_FAILFAST)
4122                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4123         cqr->startdev = startdev;
4124         cqr->memdev = startdev;
4125         cqr->block = block;
4126         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4127         cqr->lpm = dasd_path_get_ppm(startdev);
4128         cqr->retries = startdev->default_retries;
4129         cqr->buildclk = get_tod_clock();
4130         cqr->status = DASD_CQR_FILLED;
4131
4132         /* Set flags to suppress output for expected errors */
4133         if (dasd_eckd_is_ese(basedev)) {
4134                 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4135                 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4136                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4137         }
4138
4139         return cqr;
4140 }
4141
4142 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
4143                                                struct dasd_device *startdev,
4144                                                struct dasd_block *block,
4145                                                struct request *req,
4146                                                sector_t first_rec,
4147                                                sector_t last_rec,
4148                                                sector_t first_trk,
4149                                                sector_t last_trk,
4150                                                unsigned int first_offs,
4151                                                unsigned int last_offs,
4152                                                unsigned int blk_per_trk,
4153                                                unsigned int blksize)
4154 {
4155         unsigned long *idaws;
4156         struct dasd_ccw_req *cqr;
4157         struct ccw1 *ccw;
4158         struct req_iterator iter;
4159         struct bio_vec bv;
4160         char *dst, *idaw_dst;
4161         unsigned int cidaw, cplength, datasize;
4162         unsigned int tlf;
4163         sector_t recid;
4164         unsigned char cmd;
4165         struct dasd_device *basedev;
4166         unsigned int trkcount, count, count_to_trk_end;
4167         unsigned int idaw_len, seg_len, part_len, len_to_track_end;
4168         unsigned char new_track, end_idaw;
4169         sector_t trkid;
4170         unsigned int recoffs;
4171
4172         basedev = block->base;
4173         if (rq_data_dir(req) == READ)
4174                 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4175         else if (rq_data_dir(req) == WRITE)
4176                 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4177         else
4178                 return ERR_PTR(-EINVAL);
4179
4180         /* Track based I/O needs IDAWs for each page, and not just for
4181          * 64 bit addresses. We need additional idals for pages
4182          * that get filled from two tracks, so we use the number
4183          * of records as upper limit.
4184          */
4185         cidaw = last_rec - first_rec + 1;
4186         trkcount = last_trk - first_trk + 1;
4187
4188         /* 1x prefix + one read/write ccw per track */
4189         cplength = 1 + trkcount;
4190
4191         datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
4192
4193         /* Allocate the ccw request. */
4194         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4195                                    startdev, blk_mq_rq_to_pdu(req));
4196         if (IS_ERR(cqr))
4197                 return cqr;
4198         ccw = cqr->cpaddr;
4199         /* transfer length factor: how many bytes to read from the last track */
4200         if (first_trk == last_trk)
4201                 tlf = last_offs - first_offs + 1;
4202         else
4203                 tlf = last_offs + 1;
4204         tlf *= blksize;
4205
4206         if (prefix_LRE(ccw++, cqr->data, first_trk,
4207                        last_trk, cmd, basedev, startdev,
4208                        1 /* format */, first_offs + 1,
4209                        trkcount, blksize,
4210                        tlf) == -EAGAIN) {
4211                 /* Clock not in sync and XRC is enabled.
4212                  * Try again later.
4213                  */
4214                 dasd_sfree_request(cqr, startdev);
4215                 return ERR_PTR(-EAGAIN);
4216         }
4217
4218         /*
4219          * The translation of request into ccw programs must meet the
4220          * following conditions:
4221          * - all idaws but the first and the last must address full pages
4222          *   (or 2K blocks on 31-bit)
4223          * - the scope of a ccw and it's idal ends with the track boundaries
4224          */
4225         idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4226         recid = first_rec;
4227         new_track = 1;
4228         end_idaw = 0;
4229         len_to_track_end = 0;
4230         idaw_dst = NULL;
4231         idaw_len = 0;
4232         rq_for_each_segment(bv, req, iter) {
4233                 dst = bvec_virt(&bv);
4234                 seg_len = bv.bv_len;
4235                 while (seg_len) {
4236                         if (new_track) {
4237                                 trkid = recid;
4238                                 recoffs = sector_div(trkid, blk_per_trk);
4239                                 count_to_trk_end = blk_per_trk - recoffs;
4240                                 count = min((last_rec - recid + 1),
4241                                             (sector_t)count_to_trk_end);
4242                                 len_to_track_end = count * blksize;
4243                                 ccw[-1].flags |= CCW_FLAG_CC;
4244                                 ccw->cmd_code = cmd;
4245                                 ccw->count = len_to_track_end;
4246                                 ccw->cda = (__u32)virt_to_phys(idaws);
4247                                 ccw->flags = CCW_FLAG_IDA;
4248                                 ccw++;
4249                                 recid += count;
4250                                 new_track = 0;
4251                                 /* first idaw for a ccw may start anywhere */
4252                                 if (!idaw_dst)
4253                                         idaw_dst = dst;
4254                         }
4255                         /* If we start a new idaw, we must make sure that it
4256                          * starts on an IDA_BLOCK_SIZE boundary.
4257                          * If we continue an idaw, we must make sure that the
4258                          * current segment begins where the so far accumulated
4259                          * idaw ends
4260                          */
4261                         if (!idaw_dst) {
4262                                 if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) {
4263                                         dasd_sfree_request(cqr, startdev);
4264                                         return ERR_PTR(-ERANGE);
4265                                 } else
4266                                         idaw_dst = dst;
4267                         }
4268                         if ((idaw_dst + idaw_len) != dst) {
4269                                 dasd_sfree_request(cqr, startdev);
4270                                 return ERR_PTR(-ERANGE);
4271                         }
4272                         part_len = min(seg_len, len_to_track_end);
4273                         seg_len -= part_len;
4274                         dst += part_len;
4275                         idaw_len += part_len;
4276                         len_to_track_end -= part_len;
4277                         /* collected memory area ends on an IDA_BLOCK border,
4278                          * -> create an idaw
4279                          * idal_create_words will handle cases where idaw_len
4280                          * is larger then IDA_BLOCK_SIZE
4281                          */
4282                         if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
4283                                 end_idaw = 1;
4284                         /* We also need to end the idaw at track end */
4285                         if (!len_to_track_end) {
4286                                 new_track = 1;
4287                                 end_idaw = 1;
4288                         }
4289                         if (end_idaw) {
4290                                 idaws = idal_create_words(idaws, idaw_dst,
4291                                                           idaw_len);
4292                                 idaw_dst = NULL;
4293                                 idaw_len = 0;
4294                                 end_idaw = 0;
4295                         }
4296                 }
4297         }
4298
4299         if (blk_noretry_request(req) ||
4300             block->base->features & DASD_FEATURE_FAILFAST)
4301                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4302         cqr->startdev = startdev;
4303         cqr->memdev = startdev;
4304         cqr->block = block;
4305         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4306         cqr->lpm = dasd_path_get_ppm(startdev);
4307         cqr->retries = startdev->default_retries;
4308         cqr->buildclk = get_tod_clock();
4309         cqr->status = DASD_CQR_FILLED;
4310
4311         /* Set flags to suppress output for expected errors */
4312         if (dasd_eckd_is_ese(basedev))
4313                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4314
4315         return cqr;
4316 }
4317
4318 static int prepare_itcw(struct itcw *itcw,
4319                         unsigned int trk, unsigned int totrk, int cmd,
4320                         struct dasd_device *basedev,
4321                         struct dasd_device *startdev,
4322                         unsigned int rec_on_trk, int count,
4323                         unsigned int blksize,
4324                         unsigned int total_data_size,
4325                         unsigned int tlf,
4326                         unsigned int blk_per_trk)
4327 {
4328         struct PFX_eckd_data pfxdata;
4329         struct dasd_eckd_private *basepriv, *startpriv;
4330         struct DE_eckd_data *dedata;
4331         struct LRE_eckd_data *lredata;
4332         struct dcw *dcw;
4333
4334         u32 begcyl, endcyl;
4335         u16 heads, beghead, endhead;
4336         u8 pfx_cmd;
4337
4338         int rc = 0;
4339         int sector = 0;
4340         int dn, d;
4341
4342
4343         /* setup prefix data */
4344         basepriv = basedev->private;
4345         startpriv = startdev->private;
4346         dedata = &pfxdata.define_extent;
4347         lredata = &pfxdata.locate_record;
4348
4349         memset(&pfxdata, 0, sizeof(pfxdata));
4350         pfxdata.format = 1; /* PFX with LRE */
4351         pfxdata.base_address = basepriv->conf.ned->unit_addr;
4352         pfxdata.base_lss = basepriv->conf.ned->ID;
4353         pfxdata.validity.define_extent = 1;
4354
4355         /* private uid is kept up to date, conf_data may be outdated */
4356         if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4357                 pfxdata.validity.verify_base = 1;
4358
4359         if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4360                 pfxdata.validity.verify_base = 1;
4361                 pfxdata.validity.hyper_pav = 1;
4362         }
4363
4364         switch (cmd) {
4365         case DASD_ECKD_CCW_READ_TRACK_DATA:
4366                 dedata->mask.perm = 0x1;
4367                 dedata->attributes.operation = basepriv->attrib.operation;
4368                 dedata->blk_size = blksize;
4369                 dedata->ga_extended |= 0x42;
4370                 lredata->operation.orientation = 0x0;
4371                 lredata->operation.operation = 0x0C;
4372                 lredata->auxiliary.check_bytes = 0x01;
4373                 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4374                 break;
4375         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4376                 dedata->mask.perm = 0x02;
4377                 dedata->attributes.operation = basepriv->attrib.operation;
4378                 dedata->blk_size = blksize;
4379                 rc = set_timestamp(NULL, dedata, basedev);
4380                 dedata->ga_extended |= 0x42;
4381                 lredata->operation.orientation = 0x0;
4382                 lredata->operation.operation = 0x3F;
4383                 lredata->extended_operation = 0x23;
4384                 lredata->auxiliary.check_bytes = 0x2;
4385                 /*
4386                  * If XRC is supported the System Time Stamp is set. The
4387                  * validity of the time stamp must be reflected in the prefix
4388                  * data as well.
4389                  */
4390                 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4391                         pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4392                 pfx_cmd = DASD_ECKD_CCW_PFX;
4393                 break;
4394         case DASD_ECKD_CCW_READ_COUNT_MT:
4395                 dedata->mask.perm = 0x1;
4396                 dedata->attributes.operation = DASD_BYPASS_CACHE;
4397                 dedata->ga_extended |= 0x42;
4398                 dedata->blk_size = blksize;
4399                 lredata->operation.orientation = 0x2;
4400                 lredata->operation.operation = 0x16;
4401                 lredata->auxiliary.check_bytes = 0x01;
4402                 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4403                 break;
4404         default:
4405                 DBF_DEV_EVENT(DBF_ERR, basedev,
4406                               "prepare itcw, unknown opcode 0x%x", cmd);
4407                 BUG();
4408                 break;
4409         }
4410         if (rc)
4411                 return rc;
4412
4413         dedata->attributes.mode = 0x3;  /* ECKD */
4414
4415         heads = basepriv->rdc_data.trk_per_cyl;
4416         begcyl = trk / heads;
4417         beghead = trk % heads;
4418         endcyl = totrk / heads;
4419         endhead = totrk % heads;
4420
4421         /* check for sequential prestage - enhance cylinder range */
4422         if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4423             dedata->attributes.operation == DASD_SEQ_ACCESS) {
4424
4425                 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4426                         endcyl += basepriv->attrib.nr_cyl;
4427                 else
4428                         endcyl = (basepriv->real_cyl - 1);
4429         }
4430
4431         set_ch_t(&dedata->beg_ext, begcyl, beghead);
4432         set_ch_t(&dedata->end_ext, endcyl, endhead);
4433
4434         dedata->ep_format = 0x20; /* records per track is valid */
4435         dedata->ep_rec_per_track = blk_per_trk;
4436
4437         if (rec_on_trk) {
4438                 switch (basepriv->rdc_data.dev_type) {
4439                 case 0x3390:
4440                         dn = ceil_quot(blksize + 6, 232);
4441                         d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4442                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4443                         break;
4444                 case 0x3380:
4445                         d = 7 + ceil_quot(blksize + 12, 32);
4446                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4447                         break;
4448                 }
4449         }
4450
4451         if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4452                 lredata->auxiliary.length_valid = 0;
4453                 lredata->auxiliary.length_scope = 0;
4454                 lredata->sector = 0xff;
4455         } else {
4456                 lredata->auxiliary.length_valid = 1;
4457                 lredata->auxiliary.length_scope = 1;
4458                 lredata->sector = sector;
4459         }
4460         lredata->auxiliary.imbedded_ccw_valid = 1;
4461         lredata->length = tlf;
4462         lredata->imbedded_ccw = cmd;
4463         lredata->count = count;
4464         set_ch_t(&lredata->seek_addr, begcyl, beghead);
4465         lredata->search_arg.cyl = lredata->seek_addr.cyl;
4466         lredata->search_arg.head = lredata->seek_addr.head;
4467         lredata->search_arg.record = rec_on_trk;
4468
4469         dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4470                      &pfxdata, sizeof(pfxdata), total_data_size);
4471         return PTR_ERR_OR_ZERO(dcw);
4472 }
4473
4474 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4475                                                struct dasd_device *startdev,
4476                                                struct dasd_block *block,
4477                                                struct request *req,
4478                                                sector_t first_rec,
4479                                                sector_t last_rec,
4480                                                sector_t first_trk,
4481                                                sector_t last_trk,
4482                                                unsigned int first_offs,
4483                                                unsigned int last_offs,
4484                                                unsigned int blk_per_trk,
4485                                                unsigned int blksize)
4486 {
4487         struct dasd_ccw_req *cqr;
4488         struct req_iterator iter;
4489         struct bio_vec bv;
4490         char *dst;
4491         unsigned int trkcount, ctidaw;
4492         unsigned char cmd;
4493         struct dasd_device *basedev;
4494         unsigned int tlf;
4495         struct itcw *itcw;
4496         struct tidaw *last_tidaw = NULL;
4497         int itcw_op;
4498         size_t itcw_size;
4499         u8 tidaw_flags;
4500         unsigned int seg_len, part_len, len_to_track_end;
4501         unsigned char new_track;
4502         sector_t recid, trkid;
4503         unsigned int offs;
4504         unsigned int count, count_to_trk_end;
4505         int ret;
4506
4507         basedev = block->base;
4508         if (rq_data_dir(req) == READ) {
4509                 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4510                 itcw_op = ITCW_OP_READ;
4511         } else if (rq_data_dir(req) == WRITE) {
4512                 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4513                 itcw_op = ITCW_OP_WRITE;
4514         } else
4515                 return ERR_PTR(-EINVAL);
4516
4517         /* trackbased I/O needs address all memory via TIDAWs,
4518          * not just for 64 bit addresses. This allows us to map
4519          * each segment directly to one tidaw.
4520          * In the case of write requests, additional tidaws may
4521          * be needed when a segment crosses a track boundary.
4522          */
4523         trkcount = last_trk - first_trk + 1;
4524         ctidaw = 0;
4525         rq_for_each_segment(bv, req, iter) {
4526                 ++ctidaw;
4527         }
4528         if (rq_data_dir(req) == WRITE)
4529                 ctidaw += (last_trk - first_trk);
4530
4531         /* Allocate the ccw request. */
4532         itcw_size = itcw_calc_size(0, ctidaw, 0);
4533         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4534                                    blk_mq_rq_to_pdu(req));
4535         if (IS_ERR(cqr))
4536                 return cqr;
4537
4538         /* transfer length factor: how many bytes to read from the last track */
4539         if (first_trk == last_trk)
4540                 tlf = last_offs - first_offs + 1;
4541         else
4542                 tlf = last_offs + 1;
4543         tlf *= blksize;
4544
4545         itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4546         if (IS_ERR(itcw)) {
4547                 ret = -EINVAL;
4548                 goto out_error;
4549         }
4550         cqr->cpaddr = itcw_get_tcw(itcw);
4551         if (prepare_itcw(itcw, first_trk, last_trk,
4552                          cmd, basedev, startdev,
4553                          first_offs + 1,
4554                          trkcount, blksize,
4555                          (last_rec - first_rec + 1) * blksize,
4556                          tlf, blk_per_trk) == -EAGAIN) {
4557                 /* Clock not in sync and XRC is enabled.
4558                  * Try again later.
4559                  */
4560                 ret = -EAGAIN;
4561                 goto out_error;
4562         }
4563         len_to_track_end = 0;
4564         /*
4565          * A tidaw can address 4k of memory, but must not cross page boundaries
4566          * We can let the block layer handle this by setting
4567          * blk_queue_segment_boundary to page boundaries and
4568          * blk_max_segment_size to page size when setting up the request queue.
4569          * For write requests, a TIDAW must not cross track boundaries, because
4570          * we have to set the CBC flag on the last tidaw for each track.
4571          */
4572         if (rq_data_dir(req) == WRITE) {
4573                 new_track = 1;
4574                 recid = first_rec;
4575                 rq_for_each_segment(bv, req, iter) {
4576                         dst = bvec_virt(&bv);
4577                         seg_len = bv.bv_len;
4578                         while (seg_len) {
4579                                 if (new_track) {
4580                                         trkid = recid;
4581                                         offs = sector_div(trkid, blk_per_trk);
4582                                         count_to_trk_end = blk_per_trk - offs;
4583                                         count = min((last_rec - recid + 1),
4584                                                     (sector_t)count_to_trk_end);
4585                                         len_to_track_end = count * blksize;
4586                                         recid += count;
4587                                         new_track = 0;
4588                                 }
4589                                 part_len = min(seg_len, len_to_track_end);
4590                                 seg_len -= part_len;
4591                                 len_to_track_end -= part_len;
4592                                 /* We need to end the tidaw at track end */
4593                                 if (!len_to_track_end) {
4594                                         new_track = 1;
4595                                         tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4596                                 } else
4597                                         tidaw_flags = 0;
4598                                 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4599                                                             dst, part_len);
4600                                 if (IS_ERR(last_tidaw)) {
4601                                         ret = -EINVAL;
4602                                         goto out_error;
4603                                 }
4604                                 dst += part_len;
4605                         }
4606                 }
4607         } else {
4608                 rq_for_each_segment(bv, req, iter) {
4609                         dst = bvec_virt(&bv);
4610                         last_tidaw = itcw_add_tidaw(itcw, 0x00,
4611                                                     dst, bv.bv_len);
4612                         if (IS_ERR(last_tidaw)) {
4613                                 ret = -EINVAL;
4614                                 goto out_error;
4615                         }
4616                 }
4617         }
4618         last_tidaw->flags |= TIDAW_FLAGS_LAST;
4619         last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4620         itcw_finalize(itcw);
4621
4622         if (blk_noretry_request(req) ||
4623             block->base->features & DASD_FEATURE_FAILFAST)
4624                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4625         cqr->cpmode = 1;
4626         cqr->startdev = startdev;
4627         cqr->memdev = startdev;
4628         cqr->block = block;
4629         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4630         cqr->lpm = dasd_path_get_ppm(startdev);
4631         cqr->retries = startdev->default_retries;
4632         cqr->buildclk = get_tod_clock();
4633         cqr->status = DASD_CQR_FILLED;
4634
4635         /* Set flags to suppress output for expected errors */
4636         if (dasd_eckd_is_ese(basedev)) {
4637                 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4638                 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4639                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4640         }
4641
4642         return cqr;
4643 out_error:
4644         dasd_sfree_request(cqr, startdev);
4645         return ERR_PTR(ret);
4646 }
4647
4648 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4649                                                struct dasd_block *block,
4650                                                struct request *req)
4651 {
4652         int cmdrtd, cmdwtd;
4653         int use_prefix;
4654         int fcx_multitrack;
4655         struct dasd_eckd_private *private;
4656         struct dasd_device *basedev;
4657         sector_t first_rec, last_rec;
4658         sector_t first_trk, last_trk;
4659         unsigned int first_offs, last_offs;
4660         unsigned int blk_per_trk, blksize;
4661         int cdlspecial;
4662         unsigned int data_size;
4663         struct dasd_ccw_req *cqr;
4664
4665         basedev = block->base;
4666         private = basedev->private;
4667
4668         /* Calculate number of blocks/records per track. */
4669         blksize = block->bp_block;
4670         blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4671         if (blk_per_trk == 0)
4672                 return ERR_PTR(-EINVAL);
4673         /* Calculate record id of first and last block. */
4674         first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4675         first_offs = sector_div(first_trk, blk_per_trk);
4676         last_rec = last_trk =
4677                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4678         last_offs = sector_div(last_trk, blk_per_trk);
4679         cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4680
4681         fcx_multitrack = private->features.feature[40] & 0x20;
4682         data_size = blk_rq_bytes(req);
4683         if (data_size % blksize)
4684                 return ERR_PTR(-EINVAL);
4685         /* tpm write request add CBC data on each track boundary */
4686         if (rq_data_dir(req) == WRITE)
4687                 data_size += (last_trk - first_trk) * 4;
4688
4689         /* is read track data and write track data in command mode supported? */
4690         cmdrtd = private->features.feature[9] & 0x20;
4691         cmdwtd = private->features.feature[12] & 0x40;
4692         use_prefix = private->features.feature[8] & 0x01;
4693
4694         cqr = NULL;
4695         if (cdlspecial || dasd_page_cache) {
4696                 /* do nothing, just fall through to the cmd mode single case */
4697         } else if ((data_size <= private->fcx_max_data)
4698                    && (fcx_multitrack || (first_trk == last_trk))) {
4699                 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4700                                                     first_rec, last_rec,
4701                                                     first_trk, last_trk,
4702                                                     first_offs, last_offs,
4703                                                     blk_per_trk, blksize);
4704                 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4705                     (PTR_ERR(cqr) != -ENOMEM))
4706                         cqr = NULL;
4707         } else if (use_prefix &&
4708                    (((rq_data_dir(req) == READ) && cmdrtd) ||
4709                     ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4710                 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4711                                                    first_rec, last_rec,
4712                                                    first_trk, last_trk,
4713                                                    first_offs, last_offs,
4714                                                    blk_per_trk, blksize);
4715                 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4716                     (PTR_ERR(cqr) != -ENOMEM))
4717                         cqr = NULL;
4718         }
4719         if (!cqr)
4720                 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4721                                                     first_rec, last_rec,
4722                                                     first_trk, last_trk,
4723                                                     first_offs, last_offs,
4724                                                     blk_per_trk, blksize);
4725         return cqr;
4726 }
4727
4728 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4729                                                    struct dasd_block *block,
4730                                                    struct request *req)
4731 {
4732         sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4733         unsigned int seg_len, len_to_track_end;
4734         unsigned int cidaw, cplength, datasize;
4735         sector_t first_trk, last_trk, sectors;
4736         struct dasd_eckd_private *base_priv;
4737         struct dasd_device *basedev;
4738         struct req_iterator iter;
4739         struct dasd_ccw_req *cqr;
4740         unsigned int trkcount;
4741         unsigned long *idaws;
4742         unsigned int size;
4743         unsigned char cmd;
4744         struct bio_vec bv;
4745         struct ccw1 *ccw;
4746         int use_prefix;
4747         void *data;
4748         char *dst;
4749
4750         /*
4751          * raw track access needs to be mutiple of 64k and on 64k boundary
4752          * For read requests we can fix an incorrect alignment by padding
4753          * the request with dummy pages.
4754          */
4755         start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4756         end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4757                 DASD_RAW_SECTORS_PER_TRACK;
4758         end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4759                 DASD_RAW_SECTORS_PER_TRACK;
4760         basedev = block->base;
4761         if ((start_padding_sectors || end_padding_sectors) &&
4762             (rq_data_dir(req) == WRITE)) {
4763                 DBF_DEV_EVENT(DBF_ERR, basedev,
4764                               "raw write not track aligned (%llu,%llu) req %p",
4765                               start_padding_sectors, end_padding_sectors, req);
4766                 return ERR_PTR(-EINVAL);
4767         }
4768
4769         first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4770         last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4771                 DASD_RAW_SECTORS_PER_TRACK;
4772         trkcount = last_trk - first_trk + 1;
4773
4774         if (rq_data_dir(req) == READ)
4775                 cmd = DASD_ECKD_CCW_READ_TRACK;
4776         else if (rq_data_dir(req) == WRITE)
4777                 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4778         else
4779                 return ERR_PTR(-EINVAL);
4780
4781         /*
4782          * Raw track based I/O needs IDAWs for each page,
4783          * and not just for 64 bit addresses.
4784          */
4785         cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4786
4787         /*
4788          * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4789          * of extended parameter. This is needed for write full track.
4790          */
4791         base_priv = basedev->private;
4792         use_prefix = base_priv->features.feature[8] & 0x01;
4793         if (use_prefix) {
4794                 cplength = 1 + trkcount;
4795                 size = sizeof(struct PFX_eckd_data) + 2;
4796         } else {
4797                 cplength = 2 + trkcount;
4798                 size = sizeof(struct DE_eckd_data) +
4799                         sizeof(struct LRE_eckd_data) + 2;
4800         }
4801         size = ALIGN(size, 8);
4802
4803         datasize = size + cidaw * sizeof(unsigned long);
4804
4805         /* Allocate the ccw request. */
4806         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4807                                    datasize, startdev, blk_mq_rq_to_pdu(req));
4808         if (IS_ERR(cqr))
4809                 return cqr;
4810
4811         ccw = cqr->cpaddr;
4812         data = cqr->data;
4813
4814         if (use_prefix) {
4815                 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4816                            startdev, 1, 0, trkcount, 0, 0);
4817         } else {
4818                 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4819                 ccw[-1].flags |= CCW_FLAG_CC;
4820
4821                 data += sizeof(struct DE_eckd_data);
4822                 locate_record_ext(ccw++, data, first_trk, 0,
4823                                   trkcount, cmd, basedev, 0, 0);
4824         }
4825
4826         idaws = (unsigned long *)(cqr->data + size);
4827         len_to_track_end = 0;
4828         if (start_padding_sectors) {
4829                 ccw[-1].flags |= CCW_FLAG_CC;
4830                 ccw->cmd_code = cmd;
4831                 /* maximum 3390 track size */
4832                 ccw->count = 57326;
4833                 /* 64k map to one track */
4834                 len_to_track_end = 65536 - start_padding_sectors * 512;
4835                 ccw->cda = (__u32)virt_to_phys(idaws);
4836                 ccw->flags |= CCW_FLAG_IDA;
4837                 ccw->flags |= CCW_FLAG_SLI;
4838                 ccw++;
4839                 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4840                         idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4841         }
4842         rq_for_each_segment(bv, req, iter) {
4843                 dst = bvec_virt(&bv);
4844                 seg_len = bv.bv_len;
4845                 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4846                         memset(dst, 0, seg_len);
4847                 if (!len_to_track_end) {
4848                         ccw[-1].flags |= CCW_FLAG_CC;
4849                         ccw->cmd_code = cmd;
4850                         /* maximum 3390 track size */
4851                         ccw->count = 57326;
4852                         /* 64k map to one track */
4853                         len_to_track_end = 65536;
4854                         ccw->cda = (__u32)virt_to_phys(idaws);
4855                         ccw->flags |= CCW_FLAG_IDA;
4856                         ccw->flags |= CCW_FLAG_SLI;
4857                         ccw++;
4858                 }
4859                 len_to_track_end -= seg_len;
4860                 idaws = idal_create_words(idaws, dst, seg_len);
4861         }
4862         for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4863                 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4864         if (blk_noretry_request(req) ||
4865             block->base->features & DASD_FEATURE_FAILFAST)
4866                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4867         cqr->startdev = startdev;
4868         cqr->memdev = startdev;
4869         cqr->block = block;
4870         cqr->expires = startdev->default_expires * HZ;
4871         cqr->lpm = dasd_path_get_ppm(startdev);
4872         cqr->retries = startdev->default_retries;
4873         cqr->buildclk = get_tod_clock();
4874         cqr->status = DASD_CQR_FILLED;
4875
4876         return cqr;
4877 }
4878
4879
4880 static int
4881 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4882 {
4883         struct dasd_eckd_private *private;
4884         struct ccw1 *ccw;
4885         struct req_iterator iter;
4886         struct bio_vec bv;
4887         char *dst, *cda;
4888         unsigned int blksize, blk_per_trk, off;
4889         sector_t recid;
4890         int status;
4891
4892         if (!dasd_page_cache)
4893                 goto out;
4894         private = cqr->block->base->private;
4895         blksize = cqr->block->bp_block;
4896         blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4897         recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4898         ccw = cqr->cpaddr;
4899         /* Skip over define extent & locate record. */
4900         ccw++;
4901         if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4902                 ccw++;
4903         rq_for_each_segment(bv, req, iter) {
4904                 dst = bvec_virt(&bv);
4905                 for (off = 0; off < bv.bv_len; off += blksize) {
4906                         /* Skip locate record. */
4907                         if (private->uses_cdl && recid <= 2*blk_per_trk)
4908                                 ccw++;
4909                         if (dst) {
4910                                 if (ccw->flags & CCW_FLAG_IDA)
4911                                         cda = *((char **)phys_to_virt(ccw->cda));
4912                                 else
4913                                         cda = phys_to_virt(ccw->cda);
4914                                 if (dst != cda) {
4915                                         if (rq_data_dir(req) == READ)
4916                                                 memcpy(dst, cda, bv.bv_len);
4917                                         kmem_cache_free(dasd_page_cache,
4918                                             (void *)((addr_t)cda & PAGE_MASK));
4919                                 }
4920                                 dst = NULL;
4921                         }
4922                         ccw++;
4923                         recid++;
4924                 }
4925         }
4926 out:
4927         status = cqr->status == DASD_CQR_DONE;
4928         dasd_sfree_request(cqr, cqr->memdev);
4929         return status;
4930 }
4931
4932 /*
4933  * Modify ccw/tcw in cqr so it can be started on a base device.
4934  *
4935  * Note that this is not enough to restart the cqr!
4936  * Either reset cqr->startdev as well (summary unit check handling)
4937  * or restart via separate cqr (as in ERP handling).
4938  */
4939 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4940 {
4941         struct ccw1 *ccw;
4942         struct PFX_eckd_data *pfxdata;
4943         struct tcw *tcw;
4944         struct tccb *tccb;
4945         struct dcw *dcw;
4946
4947         if (cqr->cpmode == 1) {
4948                 tcw = cqr->cpaddr;
4949                 tccb = tcw_get_tccb(tcw);
4950                 dcw = (struct dcw *)&tccb->tca[0];
4951                 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4952                 pfxdata->validity.verify_base = 0;
4953                 pfxdata->validity.hyper_pav = 0;
4954         } else {
4955                 ccw = cqr->cpaddr;
4956                 pfxdata = cqr->data;
4957                 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4958                         pfxdata->validity.verify_base = 0;
4959                         pfxdata->validity.hyper_pav = 0;
4960                 }
4961         }
4962 }
4963
4964 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4965
4966 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4967                                                      struct dasd_block *block,
4968                                                      struct request *req)
4969 {
4970         struct dasd_eckd_private *private;
4971         struct dasd_device *startdev;
4972         unsigned long flags;
4973         struct dasd_ccw_req *cqr;
4974
4975         startdev = dasd_alias_get_start_dev(base);
4976         if (!startdev)
4977                 startdev = base;
4978         private = startdev->private;
4979         if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4980                 return ERR_PTR(-EBUSY);
4981
4982         spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4983         private->count++;
4984         if ((base->features & DASD_FEATURE_USERAW))
4985                 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4986         else
4987                 cqr = dasd_eckd_build_cp(startdev, block, req);
4988         if (IS_ERR(cqr))
4989                 private->count--;
4990         spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4991         return cqr;
4992 }
4993
4994 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4995                                    struct request *req)
4996 {
4997         struct dasd_eckd_private *private;
4998         unsigned long flags;
4999
5000         spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
5001         private = cqr->memdev->private;
5002         private->count--;
5003         spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
5004         return dasd_eckd_free_cp(cqr, req);
5005 }
5006
5007 static int
5008 dasd_eckd_fill_info(struct dasd_device * device,
5009                     struct dasd_information2_t * info)
5010 {
5011         struct dasd_eckd_private *private = device->private;
5012
5013         info->label_block = 2;
5014         info->FBA_layout = private->uses_cdl ? 0 : 1;
5015         info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
5016         info->characteristics_size = sizeof(private->rdc_data);
5017         memcpy(info->characteristics, &private->rdc_data,
5018                sizeof(private->rdc_data));
5019         info->confdata_size = min_t(unsigned long, private->conf.len,
5020                                     sizeof(info->configuration_data));
5021         memcpy(info->configuration_data, private->conf.data,
5022                info->confdata_size);
5023         return 0;
5024 }
5025
5026 /*
5027  * SECTION: ioctl functions for eckd devices.
5028  */
5029
5030 /*
5031  * Release device ioctl.
5032  * Buils a channel programm to releases a prior reserved
5033  * (see dasd_eckd_reserve) device.
5034  */
5035 static int
5036 dasd_eckd_release(struct dasd_device *device)
5037 {
5038         struct dasd_ccw_req *cqr;
5039         int rc;
5040         struct ccw1 *ccw;
5041         int useglobal;
5042
5043         if (!capable(CAP_SYS_ADMIN))
5044                 return -EACCES;
5045
5046         useglobal = 0;
5047         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5048         if (IS_ERR(cqr)) {
5049                 mutex_lock(&dasd_reserve_mutex);
5050                 useglobal = 1;
5051                 cqr = &dasd_reserve_req->cqr;
5052                 memset(cqr, 0, sizeof(*cqr));
5053                 memset(&dasd_reserve_req->ccw, 0,
5054                        sizeof(dasd_reserve_req->ccw));
5055                 cqr->cpaddr = &dasd_reserve_req->ccw;
5056                 cqr->data = &dasd_reserve_req->data;
5057                 cqr->magic = DASD_ECKD_MAGIC;
5058         }
5059         ccw = cqr->cpaddr;
5060         ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
5061         ccw->flags |= CCW_FLAG_SLI;
5062         ccw->count = 32;
5063         ccw->cda = (__u32)virt_to_phys(cqr->data);
5064         cqr->startdev = device;
5065         cqr->memdev = device;
5066         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5067         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5068         cqr->retries = 2;       /* set retry counter to enable basic ERP */
5069         cqr->expires = 2 * HZ;
5070         cqr->buildclk = get_tod_clock();
5071         cqr->status = DASD_CQR_FILLED;
5072
5073         rc = dasd_sleep_on_immediatly(cqr);
5074         if (!rc)
5075                 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5076
5077         if (useglobal)
5078                 mutex_unlock(&dasd_reserve_mutex);
5079         else
5080                 dasd_sfree_request(cqr, cqr->memdev);
5081         return rc;
5082 }
5083
5084 /*
5085  * Reserve device ioctl.
5086  * Options are set to 'synchronous wait for interrupt' and
5087  * 'timeout the request'. This leads to a terminate IO if
5088  * the interrupt is outstanding for a certain time.
5089  */
5090 static int
5091 dasd_eckd_reserve(struct dasd_device *device)
5092 {
5093         struct dasd_ccw_req *cqr;
5094         int rc;
5095         struct ccw1 *ccw;
5096         int useglobal;
5097
5098         if (!capable(CAP_SYS_ADMIN))
5099                 return -EACCES;
5100
5101         useglobal = 0;
5102         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5103         if (IS_ERR(cqr)) {
5104                 mutex_lock(&dasd_reserve_mutex);
5105                 useglobal = 1;
5106                 cqr = &dasd_reserve_req->cqr;
5107                 memset(cqr, 0, sizeof(*cqr));
5108                 memset(&dasd_reserve_req->ccw, 0,
5109                        sizeof(dasd_reserve_req->ccw));
5110                 cqr->cpaddr = &dasd_reserve_req->ccw;
5111                 cqr->data = &dasd_reserve_req->data;
5112                 cqr->magic = DASD_ECKD_MAGIC;
5113         }
5114         ccw = cqr->cpaddr;
5115         ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
5116         ccw->flags |= CCW_FLAG_SLI;
5117         ccw->count = 32;
5118         ccw->cda = (__u32)virt_to_phys(cqr->data);
5119         cqr->startdev = device;
5120         cqr->memdev = device;
5121         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5122         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5123         cqr->retries = 2;       /* set retry counter to enable basic ERP */
5124         cqr->expires = 2 * HZ;
5125         cqr->buildclk = get_tod_clock();
5126         cqr->status = DASD_CQR_FILLED;
5127
5128         rc = dasd_sleep_on_immediatly(cqr);
5129         if (!rc)
5130                 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5131
5132         if (useglobal)
5133                 mutex_unlock(&dasd_reserve_mutex);
5134         else
5135                 dasd_sfree_request(cqr, cqr->memdev);
5136         return rc;
5137 }
5138
5139 /*
5140  * Steal lock ioctl - unconditional reserve device.
5141  * Buils a channel programm to break a device's reservation.
5142  * (unconditional reserve)
5143  */
5144 static int
5145 dasd_eckd_steal_lock(struct dasd_device *device)
5146 {
5147         struct dasd_ccw_req *cqr;
5148         int rc;
5149         struct ccw1 *ccw;
5150         int useglobal;
5151
5152         if (!capable(CAP_SYS_ADMIN))
5153                 return -EACCES;
5154
5155         useglobal = 0;
5156         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5157         if (IS_ERR(cqr)) {
5158                 mutex_lock(&dasd_reserve_mutex);
5159                 useglobal = 1;
5160                 cqr = &dasd_reserve_req->cqr;
5161                 memset(cqr, 0, sizeof(*cqr));
5162                 memset(&dasd_reserve_req->ccw, 0,
5163                        sizeof(dasd_reserve_req->ccw));
5164                 cqr->cpaddr = &dasd_reserve_req->ccw;
5165                 cqr->data = &dasd_reserve_req->data;
5166                 cqr->magic = DASD_ECKD_MAGIC;
5167         }
5168         ccw = cqr->cpaddr;
5169         ccw->cmd_code = DASD_ECKD_CCW_SLCK;
5170         ccw->flags |= CCW_FLAG_SLI;
5171         ccw->count = 32;
5172         ccw->cda = (__u32)virt_to_phys(cqr->data);
5173         cqr->startdev = device;
5174         cqr->memdev = device;
5175         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5176         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5177         cqr->retries = 2;       /* set retry counter to enable basic ERP */
5178         cqr->expires = 2 * HZ;
5179         cqr->buildclk = get_tod_clock();
5180         cqr->status = DASD_CQR_FILLED;
5181
5182         rc = dasd_sleep_on_immediatly(cqr);
5183         if (!rc)
5184                 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5185
5186         if (useglobal)
5187                 mutex_unlock(&dasd_reserve_mutex);
5188         else
5189                 dasd_sfree_request(cqr, cqr->memdev);
5190         return rc;
5191 }
5192
5193 /*
5194  * SNID - Sense Path Group ID
5195  * This ioctl may be used in situations where I/O is stalled due to
5196  * a reserve, so if the normal dasd_smalloc_request fails, we use the
5197  * preallocated dasd_reserve_req.
5198  */
5199 static int dasd_eckd_snid(struct dasd_device *device,
5200                           void __user *argp)
5201 {
5202         struct dasd_ccw_req *cqr;
5203         int rc;
5204         struct ccw1 *ccw;
5205         int useglobal;
5206         struct dasd_snid_ioctl_data usrparm;
5207
5208         if (!capable(CAP_SYS_ADMIN))
5209                 return -EACCES;
5210
5211         if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5212                 return -EFAULT;
5213
5214         useglobal = 0;
5215         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5216                                    sizeof(struct dasd_snid_data), device,
5217                                    NULL);
5218         if (IS_ERR(cqr)) {
5219                 mutex_lock(&dasd_reserve_mutex);
5220                 useglobal = 1;
5221                 cqr = &dasd_reserve_req->cqr;
5222                 memset(cqr, 0, sizeof(*cqr));
5223                 memset(&dasd_reserve_req->ccw, 0,
5224                        sizeof(dasd_reserve_req->ccw));
5225                 cqr->cpaddr = &dasd_reserve_req->ccw;
5226                 cqr->data = &dasd_reserve_req->data;
5227                 cqr->magic = DASD_ECKD_MAGIC;
5228         }
5229         ccw = cqr->cpaddr;
5230         ccw->cmd_code = DASD_ECKD_CCW_SNID;
5231         ccw->flags |= CCW_FLAG_SLI;
5232         ccw->count = 12;
5233         ccw->cda = (__u32)virt_to_phys(cqr->data);
5234         cqr->startdev = device;
5235         cqr->memdev = device;
5236         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5237         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5238         set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5239         cqr->retries = 5;
5240         cqr->expires = 10 * HZ;
5241         cqr->buildclk = get_tod_clock();
5242         cqr->status = DASD_CQR_FILLED;
5243         cqr->lpm = usrparm.path_mask;
5244
5245         rc = dasd_sleep_on_immediatly(cqr);
5246         /* verify that I/O processing didn't modify the path mask */
5247         if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5248                 rc = -EIO;
5249         if (!rc) {
5250                 usrparm.data = *((struct dasd_snid_data *)cqr->data);
5251                 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
5252                         rc = -EFAULT;
5253         }
5254
5255         if (useglobal)
5256                 mutex_unlock(&dasd_reserve_mutex);
5257         else
5258                 dasd_sfree_request(cqr, cqr->memdev);
5259         return rc;
5260 }
5261
5262 /*
5263  * Read performance statistics
5264  */
5265 static int
5266 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5267 {
5268         struct dasd_psf_prssd_data *prssdp;
5269         struct dasd_rssd_perf_stats_t *stats;
5270         struct dasd_ccw_req *cqr;
5271         struct ccw1 *ccw;
5272         int rc;
5273
5274         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
5275                                    (sizeof(struct dasd_psf_prssd_data) +
5276                                     sizeof(struct dasd_rssd_perf_stats_t)),
5277                                    device, NULL);
5278         if (IS_ERR(cqr)) {
5279                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5280                             "Could not allocate initialization request");
5281                 return PTR_ERR(cqr);
5282         }
5283         cqr->startdev = device;
5284         cqr->memdev = device;
5285         cqr->retries = 0;
5286         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5287         cqr->expires = 10 * HZ;
5288
5289         /* Prepare for Read Subsystem Data */
5290         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5291         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5292         prssdp->order = PSF_ORDER_PRSSD;
5293         prssdp->suborder = 0x01;        /* Performance Statistics */
5294         prssdp->varies[1] = 0x01;       /* Perf Statistics for the Subsystem */
5295
5296         ccw = cqr->cpaddr;
5297         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5298         ccw->count = sizeof(struct dasd_psf_prssd_data);
5299         ccw->flags |= CCW_FLAG_CC;
5300         ccw->cda = (__u32)virt_to_phys(prssdp);
5301
5302         /* Read Subsystem Data - Performance Statistics */
5303         stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5304         memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5305
5306         ccw++;
5307         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5308         ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5309         ccw->cda = (__u32)virt_to_phys(stats);
5310
5311         cqr->buildclk = get_tod_clock();
5312         cqr->status = DASD_CQR_FILLED;
5313         rc = dasd_sleep_on(cqr);
5314         if (rc == 0) {
5315                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5316                 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5317                 if (copy_to_user(argp, stats,
5318                                  sizeof(struct dasd_rssd_perf_stats_t)))
5319                         rc = -EFAULT;
5320         }
5321         dasd_sfree_request(cqr, cqr->memdev);
5322         return rc;
5323 }
5324
5325 /*
5326  * Get attributes (cache operations)
5327  * Returnes the cache attributes used in Define Extend (DE).
5328  */
5329 static int
5330 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5331 {
5332         struct dasd_eckd_private *private = device->private;
5333         struct attrib_data_t attrib = private->attrib;
5334         int rc;
5335
5336         if (!capable(CAP_SYS_ADMIN))
5337                 return -EACCES;
5338         if (!argp)
5339                 return -EINVAL;
5340
5341         rc = 0;
5342         if (copy_to_user(argp, (long *) &attrib,
5343                          sizeof(struct attrib_data_t)))
5344                 rc = -EFAULT;
5345
5346         return rc;
5347 }
5348
5349 /*
5350  * Set attributes (cache operations)
5351  * Stores the attributes for cache operation to be used in Define Extend (DE).
5352  */
5353 static int
5354 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5355 {
5356         struct dasd_eckd_private *private = device->private;
5357         struct attrib_data_t attrib;
5358
5359         if (!capable(CAP_SYS_ADMIN))
5360                 return -EACCES;
5361         if (!argp)
5362                 return -EINVAL;
5363
5364         if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5365                 return -EFAULT;
5366         private->attrib = attrib;
5367
5368         dev_info(&device->cdev->dev,
5369                  "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5370                  private->attrib.operation, private->attrib.nr_cyl);
5371         return 0;
5372 }
5373
5374 /*
5375  * Issue syscall I/O to EMC Symmetrix array.
5376  * CCWs are PSF and RSSD
5377  */
5378 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5379 {
5380         struct dasd_symmio_parms usrparm;
5381         char *psf_data, *rssd_result;
5382         struct dasd_ccw_req *cqr;
5383         struct ccw1 *ccw;
5384         char psf0, psf1;
5385         int rc;
5386
5387         if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5388                 return -EACCES;
5389         psf0 = psf1 = 0;
5390
5391         /* Copy parms from caller */
5392         rc = -EFAULT;
5393         if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5394                 goto out;
5395         if (is_compat_task()) {
5396                 /* Make sure pointers are sane even on 31 bit. */
5397                 rc = -EINVAL;
5398                 if ((usrparm.psf_data >> 32) != 0)
5399                         goto out;
5400                 if ((usrparm.rssd_result >> 32) != 0)
5401                         goto out;
5402                 usrparm.psf_data &= 0x7fffffffULL;
5403                 usrparm.rssd_result &= 0x7fffffffULL;
5404         }
5405         /* at least 2 bytes are accessed and should be allocated */
5406         if (usrparm.psf_data_len < 2) {
5407                 DBF_DEV_EVENT(DBF_WARNING, device,
5408                               "Symmetrix ioctl invalid data length %d",
5409                               usrparm.psf_data_len);
5410                 rc = -EINVAL;
5411                 goto out;
5412         }
5413         /* alloc I/O data area */
5414         psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5415         rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5416         if (!psf_data || !rssd_result) {
5417                 rc = -ENOMEM;
5418                 goto out_free;
5419         }
5420
5421         /* get syscall header from user space */
5422         rc = -EFAULT;
5423         if (copy_from_user(psf_data,
5424                            (void __user *)(unsigned long) usrparm.psf_data,
5425                            usrparm.psf_data_len))
5426                 goto out_free;
5427         psf0 = psf_data[0];
5428         psf1 = psf_data[1];
5429
5430         /* setup CCWs for PSF + RSSD */
5431         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5432         if (IS_ERR(cqr)) {
5433                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5434                         "Could not allocate initialization request");
5435                 rc = PTR_ERR(cqr);
5436                 goto out_free;
5437         }
5438
5439         cqr->startdev = device;
5440         cqr->memdev = device;
5441         cqr->retries = 3;
5442         cqr->expires = 10 * HZ;
5443         cqr->buildclk = get_tod_clock();
5444         cqr->status = DASD_CQR_FILLED;
5445
5446         /* Build the ccws */
5447         ccw = cqr->cpaddr;
5448
5449         /* PSF ccw */
5450         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5451         ccw->count = usrparm.psf_data_len;
5452         ccw->flags |= CCW_FLAG_CC;
5453         ccw->cda = (__u32)virt_to_phys(psf_data);
5454
5455         ccw++;
5456
5457         /* RSSD ccw  */
5458         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5459         ccw->count = usrparm.rssd_result_len;
5460         ccw->flags = CCW_FLAG_SLI ;
5461         ccw->cda = (__u32)virt_to_phys(rssd_result);
5462
5463         rc = dasd_sleep_on(cqr);
5464         if (rc)
5465                 goto out_sfree;
5466
5467         rc = -EFAULT;
5468         if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5469                            rssd_result, usrparm.rssd_result_len))
5470                 goto out_sfree;
5471         rc = 0;
5472
5473 out_sfree:
5474         dasd_sfree_request(cqr, cqr->memdev);
5475 out_free:
5476         kfree(rssd_result);
5477         kfree(psf_data);
5478 out:
5479         DBF_DEV_EVENT(DBF_WARNING, device,
5480                       "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5481                       (int) psf0, (int) psf1, rc);
5482         return rc;
5483 }
5484
5485 static int
5486 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5487 {
5488         struct dasd_device *device = block->base;
5489
5490         switch (cmd) {
5491         case BIODASDGATTR:
5492                 return dasd_eckd_get_attrib(device, argp);
5493         case BIODASDSATTR:
5494                 return dasd_eckd_set_attrib(device, argp);
5495         case BIODASDPSRD:
5496                 return dasd_eckd_performance(device, argp);
5497         case BIODASDRLSE:
5498                 return dasd_eckd_release(device);
5499         case BIODASDRSRV:
5500                 return dasd_eckd_reserve(device);
5501         case BIODASDSLCK:
5502                 return dasd_eckd_steal_lock(device);
5503         case BIODASDSNID:
5504                 return dasd_eckd_snid(device, argp);
5505         case BIODASDSYMMIO:
5506                 return dasd_symm_io(device, argp);
5507         default:
5508                 return -ENOTTY;
5509         }
5510 }
5511
5512 /*
5513  * Dump the range of CCWs into 'page' buffer
5514  * and return number of printed chars.
5515  */
5516 static void
5517 dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
5518                          struct ccw1 *to, char *page)
5519 {
5520         int len, count;
5521         char *datap;
5522
5523         len = 0;
5524         while (from <= to) {
5525                 len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
5526                                from, ((int *) from)[0], ((int *) from)[1]);
5527
5528                 /* get pointer to data (consider IDALs) */
5529                 if (from->flags & CCW_FLAG_IDA)
5530                         datap = (char *)*((addr_t *)phys_to_virt(from->cda));
5531                 else
5532                         datap = phys_to_virt(from->cda);
5533
5534                 /* dump data (max 128 bytes) */
5535                 for (count = 0; count < from->count && count < 128; count++) {
5536                         if (count % 32 == 0)
5537                                 len += sprintf(page + len, "\n");
5538                         if (count % 8 == 0)
5539                                 len += sprintf(page + len, " ");
5540                         if (count % 4 == 0)
5541                                 len += sprintf(page + len, " ");
5542                         len += sprintf(page + len, "%02x", datap[count]);
5543                 }
5544                 len += sprintf(page + len, "\n");
5545                 from++;
5546         }
5547         if (len > 0)
5548                 dev_err(&device->cdev->dev, "%s", page);
5549 }
5550
5551 static void
5552 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5553                          char *reason)
5554 {
5555         u64 *sense;
5556         u64 *stat;
5557
5558         sense = (u64 *) dasd_get_sense(irb);
5559         stat = (u64 *) &irb->scsw;
5560         if (sense) {
5561                 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5562                               "%016llx %016llx %016llx %016llx",
5563                               reason, *stat, *((u32 *) (stat + 1)),
5564                               sense[0], sense[1], sense[2], sense[3]);
5565         } else {
5566                 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5567                               reason, *stat, *((u32 *) (stat + 1)),
5568                               "NO VALID SENSE");
5569         }
5570 }
5571
5572 /*
5573  * Print sense data and related channel program.
5574  * Parts are printed because printk buffer is only 1024 bytes.
5575  */
5576 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5577                                  struct dasd_ccw_req *req, struct irb *irb)
5578 {
5579         struct ccw1 *first, *last, *fail, *from, *to;
5580         struct device *dev;
5581         int len, sl, sct;
5582         char *page;
5583
5584         dev = &device->cdev->dev;
5585
5586         page = (char *) get_zeroed_page(GFP_ATOMIC);
5587         if (page == NULL) {
5588                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5589                               "No memory to dump sense data\n");
5590                 return;
5591         }
5592         /* dump the sense data */
5593         len = sprintf(page, "I/O status report:\n");
5594         len += sprintf(page + len,
5595                        "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n",
5596                        req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5597                        scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5598                        scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5599                        req ? req->intrc : 0);
5600         len += sprintf(page + len, "Failing CCW: %px\n",
5601                        phys_to_virt(irb->scsw.cmd.cpa));
5602         if (irb->esw.esw0.erw.cons) {
5603                 for (sl = 0; sl < 4; sl++) {
5604                         len += sprintf(page + len, "Sense(hex) %2d-%2d:",
5605                                        (8 * sl), ((8 * sl) + 7));
5606
5607                         for (sct = 0; sct < 8; sct++) {
5608                                 len += sprintf(page + len, " %02x",
5609                                                irb->ecw[8 * sl + sct]);
5610                         }
5611                         len += sprintf(page + len, "\n");
5612                 }
5613
5614                 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5615                         /* 24 Byte Sense Data */
5616                         sprintf(page + len,
5617                                 "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
5618                                 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5619                                 irb->ecw[1] & 0x10 ? "" : "no");
5620                 } else {
5621                         /* 32 Byte Sense Data */
5622                         sprintf(page + len,
5623                                 "32 Byte: Format: %x Exception class %x\n",
5624                                 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5625                 }
5626         } else {
5627                 sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
5628         }
5629         dev_err(dev, "%s", page);
5630
5631         if (req) {
5632                 /* req == NULL for unsolicited interrupts */
5633                 /* dump the Channel Program (max 140 Bytes per line) */
5634                 /* Count CCW and print first CCWs (maximum 7) */
5635                 first = req->cpaddr;
5636                 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5637                 to = min(first + 6, last);
5638                 dev_err(dev, "Related CP in req: %px\n", req);
5639                 dasd_eckd_dump_ccw_range(device, first, to, page);
5640
5641                 /* print failing CCW area (maximum 4) */
5642                 /* scsw->cda is either valid or zero  */
5643                 from = ++to;
5644                 fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
5645                 if (from <  fail - 2) {
5646                         from = fail - 2;     /* there is a gap - print header */
5647                         dev_err(dev, "......\n");
5648                 }
5649                 to = min(fail + 1, last);
5650                 dasd_eckd_dump_ccw_range(device, from, to, page + len);
5651
5652                 /* print last CCWs (maximum 2) */
5653                 len = 0;
5654                 from = max(from, ++to);
5655                 if (from < last - 1) {
5656                         from = last - 1;     /* there is a gap - print header */
5657                         dev_err(dev, "......\n");
5658                 }
5659                 dasd_eckd_dump_ccw_range(device, from, last, page + len);
5660         }
5661         free_page((unsigned long) page);
5662 }
5663
5664
5665 /*
5666  * Print sense data from a tcw.
5667  */
5668 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5669                                  struct dasd_ccw_req *req, struct irb *irb)
5670 {
5671         char *page;
5672         int len, sl, sct, residual;
5673         struct tsb *tsb;
5674         u8 *sense, *rcq;
5675
5676         page = (char *) get_zeroed_page(GFP_ATOMIC);
5677         if (page == NULL) {
5678                 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5679                             "No memory to dump sense data");
5680                 return;
5681         }
5682         /* dump the sense data */
5683         len = sprintf(page, "I/O status report:\n");
5684         len += sprintf(page + len,
5685                        "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5686                        "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5687                        req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5688                        scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5689                        scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5690                        irb->scsw.tm.fcxs,
5691                        (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5692                        req ? req->intrc : 0);
5693         len += sprintf(page + len, "Failing TCW: %px\n",
5694                        phys_to_virt(irb->scsw.tm.tcw));
5695
5696         tsb = NULL;
5697         sense = NULL;
5698         if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5699                 tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
5700
5701         if (tsb) {
5702                 len += sprintf(page + len, "tsb->length %d\n", tsb->length);
5703                 len += sprintf(page + len, "tsb->flags %x\n", tsb->flags);
5704                 len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset);
5705                 len += sprintf(page + len, "tsb->count %d\n", tsb->count);
5706                 residual = tsb->count - 28;
5707                 len += sprintf(page + len, "residual %d\n", residual);
5708
5709                 switch (tsb->flags & 0x07) {
5710                 case 1: /* tsa_iostat */
5711                         len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n",
5712                                        tsb->tsa.iostat.dev_time);
5713                         len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n",
5714                                        tsb->tsa.iostat.def_time);
5715                         len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n",
5716                                        tsb->tsa.iostat.queue_time);
5717                         len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n",
5718                                        tsb->tsa.iostat.dev_busy_time);
5719                         len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n",
5720                                        tsb->tsa.iostat.dev_act_time);
5721                         sense = tsb->tsa.iostat.sense;
5722                         break;
5723                 case 2: /* ts_ddpc */
5724                         len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n",
5725                                        tsb->tsa.ddpc.rc);
5726                         for (sl = 0; sl < 2; sl++) {
5727                                 len += sprintf(page + len,
5728                                                "tsb->tsa.ddpc.rcq %2d-%2d: ",
5729                                                (8 * sl), ((8 * sl) + 7));
5730                                 rcq = tsb->tsa.ddpc.rcq;
5731                                 for (sct = 0; sct < 8; sct++) {
5732                                         len += sprintf(page + len, "%02x",
5733                                                        rcq[8 * sl + sct]);
5734                                 }
5735                                 len += sprintf(page + len, "\n");
5736                         }
5737                         sense = tsb->tsa.ddpc.sense;
5738                         break;
5739                 case 3: /* tsa_intrg */
5740                         len += sprintf(page + len,
5741                                       "tsb->tsa.intrg.: not supported yet\n");
5742                         break;
5743                 }
5744
5745                 if (sense) {
5746                         for (sl = 0; sl < 4; sl++) {
5747                                 len += sprintf(page + len,
5748                                                "Sense(hex) %2d-%2d:",
5749                                                (8 * sl), ((8 * sl) + 7));
5750                                 for (sct = 0; sct < 8; sct++) {
5751                                         len += sprintf(page + len, " %02x",
5752                                                        sense[8 * sl + sct]);
5753                                 }
5754                                 len += sprintf(page + len, "\n");
5755                         }
5756
5757                         if (sense[27] & DASD_SENSE_BIT_0) {
5758                                 /* 24 Byte Sense Data */
5759                                 sprintf(page + len,
5760                                         "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
5761                                         sense[7] >> 4, sense[7] & 0x0f,
5762                                         sense[1] & 0x10 ? "" : "no");
5763                         } else {
5764                                 /* 32 Byte Sense Data */
5765                                 sprintf(page + len,
5766                                         "32 Byte: Format: %x Exception class %x\n",
5767                                         sense[6] & 0x0f, sense[22] >> 4);
5768                         }
5769                 } else {
5770                         sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
5771                 }
5772         } else {
5773                 sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n");
5774         }
5775         dev_err(&device->cdev->dev, "%s", page);
5776         free_page((unsigned long) page);
5777 }
5778
5779 static void dasd_eckd_dump_sense(struct dasd_device *device,
5780                                  struct dasd_ccw_req *req, struct irb *irb)
5781 {
5782         u8 *sense = dasd_get_sense(irb);
5783
5784         if (scsw_is_tm(&irb->scsw)) {
5785                 /*
5786                  * In some cases the 'File Protected' or 'Incorrect Length'
5787                  * error might be expected and log messages shouldn't be written
5788                  * then. Check if the according suppress bit is set.
5789                  */
5790                 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5791                     test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5792                         return;
5793                 if (scsw_cstat(&irb->scsw) == 0x40 &&
5794                     test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5795                         return;
5796
5797                 dasd_eckd_dump_sense_tcw(device, req, irb);
5798         } else {
5799                 /*
5800                  * In some cases the 'Command Reject' or 'No Record Found'
5801                  * error might be expected and log messages shouldn't be
5802                  * written then. Check if the according suppress bit is set.
5803                  */
5804                 if (sense && sense[0] & SNS0_CMD_REJECT &&
5805                     test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5806                         return;
5807
5808                 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5809                     test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5810                         return;
5811
5812                 dasd_eckd_dump_sense_ccw(device, req, irb);
5813         }
5814 }
5815
5816 static int dasd_eckd_reload_device(struct dasd_device *device)
5817 {
5818         struct dasd_eckd_private *private = device->private;
5819         char print_uid[DASD_UID_STRLEN];
5820         int rc, old_base;
5821         struct dasd_uid uid;
5822         unsigned long flags;
5823
5824         /*
5825          * remove device from alias handling to prevent new requests
5826          * from being scheduled on the wrong alias device
5827          */
5828         dasd_alias_remove_device(device);
5829
5830         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5831         old_base = private->uid.base_unit_addr;
5832         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5833
5834         /* Read Configuration Data */
5835         rc = dasd_eckd_read_conf(device);
5836         if (rc)
5837                 goto out_err;
5838
5839         dasd_eckd_read_fc_security(device);
5840
5841         rc = dasd_eckd_generate_uid(device);
5842         if (rc)
5843                 goto out_err;
5844         /*
5845          * update unit address configuration and
5846          * add device to alias management
5847          */
5848         dasd_alias_update_add_device(device);
5849
5850         dasd_eckd_get_uid(device, &uid);
5851
5852         if (old_base != uid.base_unit_addr) {
5853                 dasd_eckd_get_uid_string(&private->conf, print_uid);
5854                 dev_info(&device->cdev->dev,
5855                          "An Alias device was reassigned to a new base device "
5856                          "with UID: %s\n", print_uid);
5857         }
5858         return 0;
5859
5860 out_err:
5861         return -1;
5862 }
5863
5864 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5865                                          struct dasd_rssd_messages *messages,
5866                                          __u8 lpum)
5867 {
5868         struct dasd_rssd_messages *message_buf;
5869         struct dasd_psf_prssd_data *prssdp;
5870         struct dasd_ccw_req *cqr;
5871         struct ccw1 *ccw;
5872         int rc;
5873
5874         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5875                                    (sizeof(struct dasd_psf_prssd_data) +
5876                                     sizeof(struct dasd_rssd_messages)),
5877                                    device, NULL);
5878         if (IS_ERR(cqr)) {
5879                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5880                                 "Could not allocate read message buffer request");
5881                 return PTR_ERR(cqr);
5882         }
5883
5884         cqr->lpm = lpum;
5885 retry:
5886         cqr->startdev = device;
5887         cqr->memdev = device;
5888         cqr->block = NULL;
5889         cqr->expires = 10 * HZ;
5890         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5891         /* dasd_sleep_on_immediatly does not do complex error
5892          * recovery so clear erp flag and set retry counter to
5893          * do basic erp */
5894         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5895         cqr->retries = 256;
5896
5897         /* Prepare for Read Subsystem Data */
5898         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5899         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5900         prssdp->order = PSF_ORDER_PRSSD;
5901         prssdp->suborder = 0x03;        /* Message Buffer */
5902         /* all other bytes of prssdp must be zero */
5903
5904         ccw = cqr->cpaddr;
5905         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5906         ccw->count = sizeof(struct dasd_psf_prssd_data);
5907         ccw->flags |= CCW_FLAG_CC;
5908         ccw->flags |= CCW_FLAG_SLI;
5909         ccw->cda = (__u32)virt_to_phys(prssdp);
5910
5911         /* Read Subsystem Data - message buffer */
5912         message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5913         memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5914
5915         ccw++;
5916         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5917         ccw->count = sizeof(struct dasd_rssd_messages);
5918         ccw->flags |= CCW_FLAG_SLI;
5919         ccw->cda = (__u32)virt_to_phys(message_buf);
5920
5921         cqr->buildclk = get_tod_clock();
5922         cqr->status = DASD_CQR_FILLED;
5923         rc = dasd_sleep_on_immediatly(cqr);
5924         if (rc == 0) {
5925                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5926                 message_buf = (struct dasd_rssd_messages *)
5927                         (prssdp + 1);
5928                 memcpy(messages, message_buf,
5929                        sizeof(struct dasd_rssd_messages));
5930         } else if (cqr->lpm) {
5931                 /*
5932                  * on z/VM we might not be able to do I/O on the requested path
5933                  * but instead we get the required information on any path
5934                  * so retry with open path mask
5935                  */
5936                 cqr->lpm = 0;
5937                 goto retry;
5938         } else
5939                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5940                                 "Reading messages failed with rc=%d\n"
5941                                 , rc);
5942         dasd_sfree_request(cqr, cqr->memdev);
5943         return rc;
5944 }
5945
5946 static int dasd_eckd_query_host_access(struct dasd_device *device,
5947                                        struct dasd_psf_query_host_access *data)
5948 {
5949         struct dasd_eckd_private *private = device->private;
5950         struct dasd_psf_query_host_access *host_access;
5951         struct dasd_psf_prssd_data *prssdp;
5952         struct dasd_ccw_req *cqr;
5953         struct ccw1 *ccw;
5954         int rc;
5955
5956         /* not available for HYPER PAV alias devices */
5957         if (!device->block && private->lcu->pav == HYPER_PAV)
5958                 return -EOPNOTSUPP;
5959
5960         /* may not be supported by the storage server */
5961         if (!(private->features.feature[14] & 0x80))
5962                 return -EOPNOTSUPP;
5963
5964         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5965                                    sizeof(struct dasd_psf_prssd_data) + 1,
5966                                    device, NULL);
5967         if (IS_ERR(cqr)) {
5968                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5969                                 "Could not allocate read message buffer request");
5970                 return PTR_ERR(cqr);
5971         }
5972         host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5973         if (!host_access) {
5974                 dasd_sfree_request(cqr, device);
5975                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5976                                 "Could not allocate host_access buffer");
5977                 return -ENOMEM;
5978         }
5979         cqr->startdev = device;
5980         cqr->memdev = device;
5981         cqr->block = NULL;
5982         cqr->retries = 256;
5983         cqr->expires = 10 * HZ;
5984
5985         /* Prepare for Read Subsystem Data */
5986         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5987         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5988         prssdp->order = PSF_ORDER_PRSSD;
5989         prssdp->suborder = PSF_SUBORDER_QHA;    /* query host access */
5990         /* LSS and Volume that will be queried */
5991         prssdp->lss = private->conf.ned->ID;
5992         prssdp->volume = private->conf.ned->unit_addr;
5993         /* all other bytes of prssdp must be zero */
5994
5995         ccw = cqr->cpaddr;
5996         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5997         ccw->count = sizeof(struct dasd_psf_prssd_data);
5998         ccw->flags |= CCW_FLAG_CC;
5999         ccw->flags |= CCW_FLAG_SLI;
6000         ccw->cda = (__u32)virt_to_phys(prssdp);
6001
6002         /* Read Subsystem Data - query host access */
6003         ccw++;
6004         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
6005         ccw->count = sizeof(struct dasd_psf_query_host_access);
6006         ccw->flags |= CCW_FLAG_SLI;
6007         ccw->cda = (__u32)virt_to_phys(host_access);
6008
6009         cqr->buildclk = get_tod_clock();
6010         cqr->status = DASD_CQR_FILLED;
6011         /* the command might not be supported, suppress error message */
6012         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
6013         rc = dasd_sleep_on_interruptible(cqr);
6014         if (rc == 0) {
6015                 *data = *host_access;
6016         } else {
6017                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6018                                 "Reading host access data failed with rc=%d\n",
6019                                 rc);
6020                 rc = -EOPNOTSUPP;
6021         }
6022
6023         dasd_sfree_request(cqr, cqr->memdev);
6024         kfree(host_access);
6025         return rc;
6026 }
6027 /*
6028  * return number of grouped devices
6029  */
6030 static int dasd_eckd_host_access_count(struct dasd_device *device)
6031 {
6032         struct dasd_psf_query_host_access *access;
6033         struct dasd_ckd_path_group_entry *entry;
6034         struct dasd_ckd_host_information *info;
6035         int count = 0;
6036         int rc, i;
6037
6038         access = kzalloc(sizeof(*access), GFP_NOIO);
6039         if (!access) {
6040                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6041                                 "Could not allocate access buffer");
6042                 return -ENOMEM;
6043         }
6044         rc = dasd_eckd_query_host_access(device, access);
6045         if (rc) {
6046                 kfree(access);
6047                 return rc;
6048         }
6049
6050         info = (struct dasd_ckd_host_information *)
6051                 access->host_access_information;
6052         for (i = 0; i < info->entry_count; i++) {
6053                 entry = (struct dasd_ckd_path_group_entry *)
6054                         (info->entry + i * info->entry_size);
6055                 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
6056                         count++;
6057         }
6058
6059         kfree(access);
6060         return count;
6061 }
6062
6063 /*
6064  * write host access information to a sequential file
6065  */
6066 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
6067 {
6068         struct dasd_psf_query_host_access *access;
6069         struct dasd_ckd_path_group_entry *entry;
6070         struct dasd_ckd_host_information *info;
6071         char sysplex[9] = "";
6072         int rc, i;
6073
6074         access = kzalloc(sizeof(*access), GFP_NOIO);
6075         if (!access) {
6076                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6077                                 "Could not allocate access buffer");
6078                 return -ENOMEM;
6079         }
6080         rc = dasd_eckd_query_host_access(device, access);
6081         if (rc) {
6082                 kfree(access);
6083                 return rc;
6084         }
6085
6086         info = (struct dasd_ckd_host_information *)
6087                 access->host_access_information;
6088         for (i = 0; i < info->entry_count; i++) {
6089                 entry = (struct dasd_ckd_path_group_entry *)
6090                         (info->entry + i * info->entry_size);
6091                 /* PGID */
6092                 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
6093                 /* FLAGS */
6094                 seq_printf(m, "status_flags %02x\n", entry->status_flags);
6095                 /* SYSPLEX NAME */
6096                 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
6097                 EBCASC(sysplex, sizeof(sysplex));
6098                 seq_printf(m, "sysplex_name %8s\n", sysplex);
6099                 /* SUPPORTED CYLINDER */
6100                 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
6101                 /* TIMESTAMP */
6102                 seq_printf(m, "timestamp %lu\n", (unsigned long)
6103                            entry->timestamp);
6104         }
6105         kfree(access);
6106
6107         return 0;
6108 }
6109
6110 static struct dasd_device
6111 *copy_relation_find_device(struct dasd_copy_relation *copy,
6112                            char *busid)
6113 {
6114         int i;
6115
6116         for (i = 0; i < DASD_CP_ENTRIES; i++) {
6117                 if (copy->entry[i].configured &&
6118                     strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
6119                         return copy->entry[i].device;
6120         }
6121         return NULL;
6122 }
6123
6124 /*
6125  * set the new active/primary device
6126  */
6127 static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid,
6128                                  char *old_busid)
6129 {
6130         int i;
6131
6132         for (i = 0; i < DASD_CP_ENTRIES; i++) {
6133                 if (copy->entry[i].configured &&
6134                     strncmp(copy->entry[i].busid, new_busid,
6135                             DASD_BUS_ID_SIZE) == 0) {
6136                         copy->active = &copy->entry[i];
6137                         copy->entry[i].primary = true;
6138                 } else if (copy->entry[i].configured &&
6139                            strncmp(copy->entry[i].busid, old_busid,
6140                                    DASD_BUS_ID_SIZE) == 0) {
6141                         copy->entry[i].primary = false;
6142                 }
6143         }
6144 }
6145
6146 /*
6147  * The function will swap the role of a given copy pair.
6148  * During the swap operation the relation of the blockdevice is disconnected
6149  * from the old primary and connected to the new.
6150  *
6151  * IO is paused on the block queue before swap and may be resumed afterwards.
6152  */
6153 static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
6154                                     char *sec_busid)
6155 {
6156         struct dasd_device *primary, *secondary;
6157         struct dasd_copy_relation *copy;
6158         struct dasd_block *block;
6159         struct gendisk *gdp;
6160
6161         copy = device->copy;
6162         if (!copy)
6163                 return DASD_COPYPAIRSWAP_INVALID;
6164         primary = copy->active->device;
6165         if (!primary)
6166                 return DASD_COPYPAIRSWAP_INVALID;
6167         /* double check if swap has correct primary */
6168         if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0)
6169                 return DASD_COPYPAIRSWAP_PRIMARY;
6170
6171         secondary = copy_relation_find_device(copy, sec_busid);
6172         if (!secondary)
6173                 return DASD_COPYPAIRSWAP_SECONDARY;
6174
6175         /*
6176          * usually the device should be quiesced for swap
6177          * for paranoia stop device and requeue requests again
6178          */
6179         dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC);
6180         dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC);
6181         dasd_generic_requeue_all_requests(primary);
6182
6183         /* swap DASD internal device <> block assignment */
6184         block = primary->block;
6185         primary->block = NULL;
6186         secondary->block = block;
6187         block->base = secondary;
6188         /* set new primary device in COPY relation */
6189         copy_pair_set_active(copy, sec_busid, prim_busid);
6190
6191         /* swap blocklayer device link */
6192         gdp = block->gdp;
6193         dasd_add_link_to_gendisk(gdp, secondary);
6194
6195         /* re-enable device */
6196         dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
6197         dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
6198         dasd_schedule_device_bh(secondary);
6199
6200         return DASD_COPYPAIRSWAP_SUCCESS;
6201 }
6202
6203 /*
6204  * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
6205  */
6206 static int dasd_eckd_query_pprc_status(struct dasd_device *device,
6207                                        struct dasd_pprc_data_sc4 *data)
6208 {
6209         struct dasd_pprc_data_sc4 *pprc_data;
6210         struct dasd_psf_prssd_data *prssdp;
6211         struct dasd_ccw_req *cqr;
6212         struct ccw1 *ccw;
6213         int rc;
6214
6215         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
6216                                    sizeof(*prssdp) + sizeof(*pprc_data) + 1,
6217                                    device, NULL);
6218         if (IS_ERR(cqr)) {
6219                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6220                                 "Could not allocate query PPRC status request");
6221                 return PTR_ERR(cqr);
6222         }
6223         cqr->startdev = device;
6224         cqr->memdev = device;
6225         cqr->block = NULL;
6226         cqr->retries = 256;
6227         cqr->expires = 10 * HZ;
6228
6229         /* Prepare for Read Subsystem Data */
6230         prssdp = (struct dasd_psf_prssd_data *)cqr->data;
6231         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
6232         prssdp->order = PSF_ORDER_PRSSD;
6233         prssdp->suborder = PSF_SUBORDER_PPRCEQ;
6234         prssdp->varies[0] = PPRCEQ_SCOPE_4;
6235         pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1);
6236
6237         ccw = cqr->cpaddr;
6238         ccw->cmd_code = DASD_ECKD_CCW_PSF;
6239         ccw->count = sizeof(struct dasd_psf_prssd_data);
6240         ccw->flags |= CCW_FLAG_CC;
6241         ccw->flags |= CCW_FLAG_SLI;
6242         ccw->cda = (__u32)(addr_t)prssdp;
6243
6244         /* Read Subsystem Data - query host access */
6245         ccw++;
6246         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
6247         ccw->count = sizeof(*pprc_data);
6248         ccw->flags |= CCW_FLAG_SLI;
6249         ccw->cda = (__u32)(addr_t)pprc_data;
6250
6251         cqr->buildclk = get_tod_clock();
6252         cqr->status = DASD_CQR_FILLED;
6253
6254         rc = dasd_sleep_on_interruptible(cqr);
6255         if (rc == 0) {
6256                 *data = *pprc_data;
6257         } else {
6258                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6259                                 "PPRC Extended Query failed with rc=%d\n",
6260                                 rc);
6261                 rc = -EOPNOTSUPP;
6262         }
6263
6264         dasd_sfree_request(cqr, cqr->memdev);
6265         return rc;
6266 }
6267
6268 /*
6269  * ECKD NOP - no operation
6270  */
6271 static int dasd_eckd_nop(struct dasd_device *device)
6272 {
6273         struct dasd_ccw_req *cqr;
6274         struct ccw1 *ccw;
6275         int rc;
6276
6277         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
6278         if (IS_ERR(cqr)) {
6279                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6280                                 "Could not allocate NOP request");
6281                 return PTR_ERR(cqr);
6282         }
6283         cqr->startdev = device;
6284         cqr->memdev = device;
6285         cqr->block = NULL;
6286         cqr->retries = 1;
6287         cqr->expires = 10 * HZ;
6288
6289         ccw = cqr->cpaddr;
6290         ccw->cmd_code = DASD_ECKD_CCW_NOP;
6291         ccw->flags |= CCW_FLAG_SLI;
6292
6293         cqr->buildclk = get_tod_clock();
6294         cqr->status = DASD_CQR_FILLED;
6295
6296         rc = dasd_sleep_on_interruptible(cqr);
6297         if (rc != 0) {
6298                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
6299                                 "NOP failed with rc=%d\n", rc);
6300                 rc = -EOPNOTSUPP;
6301         }
6302         dasd_sfree_request(cqr, cqr->memdev);
6303         return rc;
6304 }
6305
6306 static int dasd_eckd_device_ping(struct dasd_device *device)
6307 {
6308         return dasd_eckd_nop(device);
6309 }
6310
6311 /*
6312  * Perform Subsystem Function - CUIR response
6313  */
6314 static int
6315 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
6316                             __u32 message_id, __u8 lpum)
6317 {
6318         struct dasd_psf_cuir_response *psf_cuir;
6319         int pos = pathmask_to_pos(lpum);
6320         struct dasd_ccw_req *cqr;
6321         struct ccw1 *ccw;
6322         int rc;
6323
6324         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
6325                                    sizeof(struct dasd_psf_cuir_response),
6326                                    device, NULL);
6327
6328         if (IS_ERR(cqr)) {
6329                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6330                            "Could not allocate PSF-CUIR request");
6331                 return PTR_ERR(cqr);
6332         }
6333
6334         psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6335         psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
6336         psf_cuir->cc = response;
6337         psf_cuir->chpid = device->path[pos].chpid;
6338         psf_cuir->message_id = message_id;
6339         psf_cuir->cssid = device->path[pos].cssid;
6340         psf_cuir->ssid = device->path[pos].ssid;
6341         ccw = cqr->cpaddr;
6342         ccw->cmd_code = DASD_ECKD_CCW_PSF;
6343         ccw->cda = (__u32)virt_to_phys(psf_cuir);
6344         ccw->flags = CCW_FLAG_SLI;
6345         ccw->count = sizeof(struct dasd_psf_cuir_response);
6346
6347         cqr->startdev = device;
6348         cqr->memdev = device;
6349         cqr->block = NULL;
6350         cqr->retries = 256;
6351         cqr->expires = 10*HZ;
6352         cqr->buildclk = get_tod_clock();
6353         cqr->status = DASD_CQR_FILLED;
6354         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6355
6356         rc = dasd_sleep_on(cqr);
6357
6358         dasd_sfree_request(cqr, cqr->memdev);
6359         return rc;
6360 }
6361
6362 /*
6363  * return configuration data that is referenced by record selector
6364  * if a record selector is specified or per default return the
6365  * conf_data pointer for the path specified by lpum
6366  */
6367 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6368                                                      __u8 lpum,
6369                                                      struct dasd_cuir_message *cuir)
6370 {
6371         struct dasd_conf_data *conf_data;
6372         int path, pos;
6373
6374         if (cuir->record_selector == 0)
6375                 goto out;
6376         for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6377                 conf_data = device->path[pos].conf_data;
6378                 if (conf_data->gneq.record_selector ==
6379                     cuir->record_selector)
6380                         return conf_data;
6381         }
6382 out:
6383         return device->path[pathmask_to_pos(lpum)].conf_data;
6384 }
6385
6386 /*
6387  * This function determines the scope of a reconfiguration request by
6388  * analysing the path and device selection data provided in the CUIR request.
6389  * Returns a path mask containing CUIR affected paths for the give device.
6390  *
6391  * If the CUIR request does not contain the required information return the
6392  * path mask of the path the attention message for the CUIR request was reveived
6393  * on.
6394  */
6395 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6396                                 struct dasd_cuir_message *cuir)
6397 {
6398         struct dasd_conf_data *ref_conf_data;
6399         unsigned long bitmask = 0, mask = 0;
6400         struct dasd_conf_data *conf_data;
6401         unsigned int pos, path;
6402         char *ref_gneq, *gneq;
6403         char *ref_ned, *ned;
6404         int tbcpm = 0;
6405
6406         /* if CUIR request does not specify the scope use the path
6407            the attention message was presented on */
6408         if (!cuir->ned_map ||
6409             !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6410                 return lpum;
6411
6412         /* get reference conf data */
6413         ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6414         /* reference ned is determined by ned_map field */
6415         pos = 8 - ffs(cuir->ned_map);
6416         ref_ned = (char *)&ref_conf_data->neds[pos];
6417         ref_gneq = (char *)&ref_conf_data->gneq;
6418         /* transfer 24 bit neq_map to mask */
6419         mask = cuir->neq_map[2];
6420         mask |= cuir->neq_map[1] << 8;
6421         mask |= cuir->neq_map[0] << 16;
6422
6423         for (path = 0; path < 8; path++) {
6424                 /* initialise data per path */
6425                 bitmask = mask;
6426                 conf_data = device->path[path].conf_data;
6427                 pos = 8 - ffs(cuir->ned_map);
6428                 ned = (char *) &conf_data->neds[pos];
6429                 /* compare reference ned and per path ned */
6430                 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6431                         continue;
6432                 gneq = (char *)&conf_data->gneq;
6433                 /* compare reference gneq and per_path gneq under
6434                    24 bit mask where mask bit 0 equals byte 7 of
6435                    the gneq and mask bit 24 equals byte 31 */
6436                 while (bitmask) {
6437                         pos = ffs(bitmask) - 1;
6438                         if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6439                             != 0)
6440                                 break;
6441                         clear_bit(pos, &bitmask);
6442                 }
6443                 if (bitmask)
6444                         continue;
6445                 /* device and path match the reference values
6446                    add path to CUIR scope */
6447                 tbcpm |= 0x80 >> path;
6448         }
6449         return tbcpm;
6450 }
6451
6452 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6453                                        unsigned long paths, int action)
6454 {
6455         int pos;
6456
6457         while (paths) {
6458                 /* get position of bit in mask */
6459                 pos = 8 - ffs(paths);
6460                 /* get channel path descriptor from this position */
6461                 if (action == CUIR_QUIESCE)
6462                         pr_warn("Service on the storage server caused path %x.%02x to go offline",
6463                                 device->path[pos].cssid,
6464                                 device->path[pos].chpid);
6465                 else if (action == CUIR_RESUME)
6466                         pr_info("Path %x.%02x is back online after service on the storage server",
6467                                 device->path[pos].cssid,
6468                                 device->path[pos].chpid);
6469                 clear_bit(7 - pos, &paths);
6470         }
6471 }
6472
6473 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6474                                       struct dasd_cuir_message *cuir)
6475 {
6476         unsigned long tbcpm;
6477
6478         tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6479         /* nothing to do if path is not in use */
6480         if (!(dasd_path_get_opm(device) & tbcpm))
6481                 return 0;
6482         if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6483                 /* no path would be left if the CUIR action is taken
6484                    return error */
6485                 return -EINVAL;
6486         }
6487         /* remove device from operational path mask */
6488         dasd_path_remove_opm(device, tbcpm);
6489         dasd_path_add_cuirpm(device, tbcpm);
6490         return tbcpm;
6491 }
6492
6493 /*
6494  * walk through all devices and build a path mask to quiesce them
6495  * return an error if the last path to a device would be removed
6496  *
6497  * if only part of the devices are quiesced and an error
6498  * occurs no onlining necessary, the storage server will
6499  * notify the already set offline devices again
6500  */
6501 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6502                                   struct dasd_cuir_message *cuir)
6503 {
6504         struct dasd_eckd_private *private = device->private;
6505         struct alias_pav_group *pavgroup, *tempgroup;
6506         struct dasd_device *dev, *n;
6507         unsigned long paths = 0;
6508         unsigned long flags;
6509         int tbcpm;
6510
6511         /* active devices */
6512         list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6513                                  alias_list) {
6514                 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6515                 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6516                 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6517                 if (tbcpm < 0)
6518                         goto out_err;
6519                 paths |= tbcpm;
6520         }
6521         /* inactive devices */
6522         list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6523                                  alias_list) {
6524                 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6525                 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6526                 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6527                 if (tbcpm < 0)
6528                         goto out_err;
6529                 paths |= tbcpm;
6530         }
6531         /* devices in PAV groups */
6532         list_for_each_entry_safe(pavgroup, tempgroup,
6533                                  &private->lcu->grouplist, group) {
6534                 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6535                                          alias_list) {
6536                         spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6537                         tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6538                         spin_unlock_irqrestore(
6539                                 get_ccwdev_lock(dev->cdev), flags);
6540                         if (tbcpm < 0)
6541                                 goto out_err;
6542                         paths |= tbcpm;
6543                 }
6544                 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6545                                          alias_list) {
6546                         spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6547                         tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6548                         spin_unlock_irqrestore(
6549                                 get_ccwdev_lock(dev->cdev), flags);
6550                         if (tbcpm < 0)
6551                                 goto out_err;
6552                         paths |= tbcpm;
6553                 }
6554         }
6555         /* notify user about all paths affected by CUIR action */
6556         dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6557         return 0;
6558 out_err:
6559         return tbcpm;
6560 }
6561
6562 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6563                                  struct dasd_cuir_message *cuir)
6564 {
6565         struct dasd_eckd_private *private = device->private;
6566         struct alias_pav_group *pavgroup, *tempgroup;
6567         struct dasd_device *dev, *n;
6568         unsigned long paths = 0;
6569         int tbcpm;
6570
6571         /*
6572          * the path may have been added through a generic path event before
6573          * only trigger path verification if the path is not already in use
6574          */
6575         list_for_each_entry_safe(dev, n,
6576                                  &private->lcu->active_devices,
6577                                  alias_list) {
6578                 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6579                 paths |= tbcpm;
6580                 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6581                         dasd_path_add_tbvpm(dev, tbcpm);
6582                         dasd_schedule_device_bh(dev);
6583                 }
6584         }
6585         list_for_each_entry_safe(dev, n,
6586                                  &private->lcu->inactive_devices,
6587                                  alias_list) {
6588                 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6589                 paths |= tbcpm;
6590                 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6591                         dasd_path_add_tbvpm(dev, tbcpm);
6592                         dasd_schedule_device_bh(dev);
6593                 }
6594         }
6595         /* devices in PAV groups */
6596         list_for_each_entry_safe(pavgroup, tempgroup,
6597                                  &private->lcu->grouplist,
6598                                  group) {
6599                 list_for_each_entry_safe(dev, n,
6600                                          &pavgroup->baselist,
6601                                          alias_list) {
6602                         tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6603                         paths |= tbcpm;
6604                         if (!(dasd_path_get_opm(dev) & tbcpm)) {
6605                                 dasd_path_add_tbvpm(dev, tbcpm);
6606                                 dasd_schedule_device_bh(dev);
6607                         }
6608                 }
6609                 list_for_each_entry_safe(dev, n,
6610                                          &pavgroup->aliaslist,
6611                                          alias_list) {
6612                         tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6613                         paths |= tbcpm;
6614                         if (!(dasd_path_get_opm(dev) & tbcpm)) {
6615                                 dasd_path_add_tbvpm(dev, tbcpm);
6616                                 dasd_schedule_device_bh(dev);
6617                         }
6618                 }
6619         }
6620         /* notify user about all paths affected by CUIR action */
6621         dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6622         return 0;
6623 }
6624
6625 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6626                                  __u8 lpum)
6627 {
6628         struct dasd_cuir_message *cuir = messages;
6629         int response;
6630
6631         DBF_DEV_EVENT(DBF_WARNING, device,
6632                       "CUIR request: %016llx %016llx %016llx %08x",
6633                       ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6634                       ((u32 *)cuir)[3]);
6635
6636         if (cuir->code == CUIR_QUIESCE) {
6637                 /* quiesce */
6638                 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6639                         response = PSF_CUIR_LAST_PATH;
6640                 else
6641                         response = PSF_CUIR_COMPLETED;
6642         } else if (cuir->code == CUIR_RESUME) {
6643                 /* resume */
6644                 dasd_eckd_cuir_resume(device, lpum, cuir);
6645                 response = PSF_CUIR_COMPLETED;
6646         } else
6647                 response = PSF_CUIR_NOT_SUPPORTED;
6648
6649         dasd_eckd_psf_cuir_response(device, response,
6650                                     cuir->message_id, lpum);
6651         DBF_DEV_EVENT(DBF_WARNING, device,
6652                       "CUIR response: %d on message ID %08x", response,
6653                       cuir->message_id);
6654         /* to make sure there is no attention left schedule work again */
6655         device->discipline->check_attention(device, lpum);
6656 }
6657
6658 static void dasd_eckd_oos_resume(struct dasd_device *device)
6659 {
6660         struct dasd_eckd_private *private = device->private;
6661         struct alias_pav_group *pavgroup, *tempgroup;
6662         struct dasd_device *dev, *n;
6663         unsigned long flags;
6664
6665         spin_lock_irqsave(&private->lcu->lock, flags);
6666         list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6667                                  alias_list) {
6668                 if (dev->stopped & DASD_STOPPED_NOSPC)
6669                         dasd_generic_space_avail(dev);
6670         }
6671         list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6672                                  alias_list) {
6673                 if (dev->stopped & DASD_STOPPED_NOSPC)
6674                         dasd_generic_space_avail(dev);
6675         }
6676         /* devices in PAV groups */
6677         list_for_each_entry_safe(pavgroup, tempgroup,
6678                                  &private->lcu->grouplist,
6679                                  group) {
6680                 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6681                                          alias_list) {
6682                         if (dev->stopped & DASD_STOPPED_NOSPC)
6683                                 dasd_generic_space_avail(dev);
6684                 }
6685                 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6686                                          alias_list) {
6687                         if (dev->stopped & DASD_STOPPED_NOSPC)
6688                                 dasd_generic_space_avail(dev);
6689                 }
6690         }
6691         spin_unlock_irqrestore(&private->lcu->lock, flags);
6692 }
6693
6694 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6695                                  __u8 lpum)
6696 {
6697         struct dasd_oos_message *oos = messages;
6698
6699         switch (oos->code) {
6700         case REPO_WARN:
6701         case POOL_WARN:
6702                 dev_warn(&device->cdev->dev,
6703                          "Extent pool usage has reached a critical value\n");
6704                 dasd_eckd_oos_resume(device);
6705                 break;
6706         case REPO_EXHAUST:
6707         case POOL_EXHAUST:
6708                 dev_warn(&device->cdev->dev,
6709                          "Extent pool is exhausted\n");
6710                 break;
6711         case REPO_RELIEVE:
6712         case POOL_RELIEVE:
6713                 dev_info(&device->cdev->dev,
6714                          "Extent pool physical space constraint has been relieved\n");
6715                 break;
6716         }
6717
6718         /* In any case, update related data */
6719         dasd_eckd_read_ext_pool_info(device);
6720
6721         /* to make sure there is no attention left schedule work again */
6722         device->discipline->check_attention(device, lpum);
6723 }
6724
6725 static void dasd_eckd_check_attention_work(struct work_struct *work)
6726 {
6727         struct check_attention_work_data *data;
6728         struct dasd_rssd_messages *messages;
6729         struct dasd_device *device;
6730         int rc;
6731
6732         data = container_of(work, struct check_attention_work_data, worker);
6733         device = data->device;
6734         messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6735         if (!messages) {
6736                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6737                               "Could not allocate attention message buffer");
6738                 goto out;
6739         }
6740         rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6741         if (rc)
6742                 goto out;
6743
6744         if (messages->length == ATTENTION_LENGTH_CUIR &&
6745             messages->format == ATTENTION_FORMAT_CUIR)
6746                 dasd_eckd_handle_cuir(device, messages, data->lpum);
6747         if (messages->length == ATTENTION_LENGTH_OOS &&
6748             messages->format == ATTENTION_FORMAT_OOS)
6749                 dasd_eckd_handle_oos(device, messages, data->lpum);
6750
6751 out:
6752         dasd_put_device(device);
6753         kfree(messages);
6754         kfree(data);
6755 }
6756
6757 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6758 {
6759         struct check_attention_work_data *data;
6760
6761         data = kzalloc(sizeof(*data), GFP_ATOMIC);
6762         if (!data)
6763                 return -ENOMEM;
6764         INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6765         dasd_get_device(device);
6766         data->device = device;
6767         data->lpum = lpum;
6768         schedule_work(&data->worker);
6769         return 0;
6770 }
6771
6772 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6773 {
6774         if (~lpum & dasd_path_get_opm(device)) {
6775                 dasd_path_add_nohpfpm(device, lpum);
6776                 dasd_path_remove_opm(device, lpum);
6777                 dev_err(&device->cdev->dev,
6778                         "Channel path %02X lost HPF functionality and is disabled\n",
6779                         lpum);
6780                 return 1;
6781         }
6782         return 0;
6783 }
6784
6785 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6786 {
6787         struct dasd_eckd_private *private = device->private;
6788
6789         dev_err(&device->cdev->dev,
6790                 "High Performance FICON disabled\n");
6791         private->fcx_max_data = 0;
6792 }
6793
6794 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6795 {
6796         struct dasd_eckd_private *private = device->private;
6797
6798         return private->fcx_max_data ? 1 : 0;
6799 }
6800
6801 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6802                                        struct irb *irb)
6803 {
6804         struct dasd_eckd_private *private = device->private;
6805
6806         if (!private->fcx_max_data) {
6807                 /* sanity check for no HPF, the error makes no sense */
6808                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6809                               "Trying to disable HPF for a non HPF device");
6810                 return;
6811         }
6812         if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6813                 dasd_eckd_disable_hpf_device(device);
6814         } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6815                 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6816                         return;
6817                 dasd_eckd_disable_hpf_device(device);
6818                 dasd_path_set_tbvpm(device,
6819                                   dasd_path_get_hpfpm(device));
6820         }
6821         /*
6822          * prevent that any new I/O ist started on the device and schedule a
6823          * requeue of existing requests
6824          */
6825         dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6826         dasd_schedule_requeue(device);
6827 }
6828
6829 static unsigned int dasd_eckd_max_sectors(struct dasd_block *block)
6830 {
6831         if (block->base->features & DASD_FEATURE_USERAW) {
6832                 /*
6833                  * the max_blocks value for raw_track access is 256
6834                  * it is higher than the native ECKD value because we
6835                  * only need one ccw per track
6836                  * so the max_hw_sectors are
6837                  * 2048 x 512B = 1024kB = 16 tracks
6838                  */
6839                 return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6840         }
6841
6842         return DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6843 }
6844
6845 static struct ccw_driver dasd_eckd_driver = {
6846         .driver = {
6847                 .name   = "dasd-eckd",
6848                 .owner  = THIS_MODULE,
6849                 .dev_groups = dasd_dev_groups,
6850         },
6851         .ids         = dasd_eckd_ids,
6852         .probe       = dasd_eckd_probe,
6853         .remove      = dasd_generic_remove,
6854         .set_offline = dasd_generic_set_offline,
6855         .set_online  = dasd_eckd_set_online,
6856         .notify      = dasd_generic_notify,
6857         .path_event  = dasd_generic_path_event,
6858         .shutdown    = dasd_generic_shutdown,
6859         .uc_handler  = dasd_generic_uc_handler,
6860         .int_class   = IRQIO_DAS,
6861 };
6862
6863 static struct dasd_discipline dasd_eckd_discipline = {
6864         .owner = THIS_MODULE,
6865         .name = "ECKD",
6866         .ebcname = "ECKD",
6867         .check_device = dasd_eckd_check_characteristics,
6868         .uncheck_device = dasd_eckd_uncheck_device,
6869         .do_analysis = dasd_eckd_do_analysis,
6870         .pe_handler = dasd_eckd_pe_handler,
6871         .basic_to_ready = dasd_eckd_basic_to_ready,
6872         .online_to_ready = dasd_eckd_online_to_ready,
6873         .basic_to_known = dasd_eckd_basic_to_known,
6874         .max_sectors = dasd_eckd_max_sectors,
6875         .fill_geometry = dasd_eckd_fill_geometry,
6876         .start_IO = dasd_start_IO,
6877         .term_IO = dasd_term_IO,
6878         .handle_terminated_request = dasd_eckd_handle_terminated_request,
6879         .format_device = dasd_eckd_format_device,
6880         .check_device_format = dasd_eckd_check_device_format,
6881         .erp_action = dasd_eckd_erp_action,
6882         .erp_postaction = dasd_eckd_erp_postaction,
6883         .check_for_device_change = dasd_eckd_check_for_device_change,
6884         .build_cp = dasd_eckd_build_alias_cp,
6885         .free_cp = dasd_eckd_free_alias_cp,
6886         .dump_sense = dasd_eckd_dump_sense,
6887         .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6888         .fill_info = dasd_eckd_fill_info,
6889         .ioctl = dasd_eckd_ioctl,
6890         .reload = dasd_eckd_reload_device,
6891         .get_uid = dasd_eckd_get_uid,
6892         .kick_validate = dasd_eckd_kick_validate_server,
6893         .check_attention = dasd_eckd_check_attention,
6894         .host_access_count = dasd_eckd_host_access_count,
6895         .hosts_print = dasd_hosts_print,
6896         .handle_hpf_error = dasd_eckd_handle_hpf_error,
6897         .disable_hpf = dasd_eckd_disable_hpf_device,
6898         .hpf_enabled = dasd_eckd_hpf_enabled,
6899         .reset_path = dasd_eckd_reset_path,
6900         .is_ese = dasd_eckd_is_ese,
6901         .space_allocated = dasd_eckd_space_allocated,
6902         .space_configured = dasd_eckd_space_configured,
6903         .logical_capacity = dasd_eckd_logical_capacity,
6904         .release_space = dasd_eckd_release_space,
6905         .ext_pool_id = dasd_eckd_ext_pool_id,
6906         .ext_size = dasd_eckd_ext_size,
6907         .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6908         .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6909         .ext_pool_oos = dasd_eckd_ext_pool_oos,
6910         .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6911         .ese_format = dasd_eckd_ese_format,
6912         .ese_read = dasd_eckd_ese_read,
6913         .pprc_status = dasd_eckd_query_pprc_status,
6914         .pprc_enabled = dasd_eckd_pprc_enabled,
6915         .copy_pair_swap = dasd_eckd_copy_pair_swap,
6916         .device_ping = dasd_eckd_device_ping,
6917 };
6918
6919 static int __init
6920 dasd_eckd_init(void)
6921 {
6922         int ret;
6923
6924         ASCEBC(dasd_eckd_discipline.ebcname, 4);
6925         dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6926                                    GFP_KERNEL | GFP_DMA);
6927         if (!dasd_reserve_req)
6928                 return -ENOMEM;
6929         dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6930                                     GFP_KERNEL | GFP_DMA);
6931         if (!dasd_vol_info_req) {
6932                 kfree(dasd_reserve_req);
6933                 return -ENOMEM;
6934         }
6935         pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
6936                                     GFP_KERNEL | GFP_DMA);
6937         if (!pe_handler_worker) {
6938                 kfree(dasd_reserve_req);
6939                 kfree(dasd_vol_info_req);
6940                 return -ENOMEM;
6941         }
6942         rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6943         if (!rawpadpage) {
6944                 kfree(pe_handler_worker);
6945                 kfree(dasd_reserve_req);
6946                 kfree(dasd_vol_info_req);
6947                 return -ENOMEM;
6948         }
6949         ret = ccw_driver_register(&dasd_eckd_driver);
6950         if (!ret)
6951                 wait_for_device_probe();
6952         else {
6953                 kfree(pe_handler_worker);
6954                 kfree(dasd_reserve_req);
6955                 kfree(dasd_vol_info_req);
6956                 free_page((unsigned long)rawpadpage);
6957         }
6958         return ret;
6959 }
6960
6961 static void __exit
6962 dasd_eckd_cleanup(void)
6963 {
6964         ccw_driver_unregister(&dasd_eckd_driver);
6965         kfree(pe_handler_worker);
6966         kfree(dasd_reserve_req);
6967         free_page((unsigned long)rawpadpage);
6968 }
6969
6970 module_init(dasd_eckd_init);
6971 module_exit(dasd_eckd_cleanup);