]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/s390/net/qeth_core_main.c
net: hns: add phy_attached_info() to the hns driver
[linux.git] / drivers / s390 / net / qeth_core_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2007, 2009
4  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5  *               Frank Pavlic <fpavlic@de.ibm.com>,
6  *               Thomas Spatzier <tspat@de.ibm.com>,
7  *               Frank Blaschka <frank.blaschka@de.ibm.com>
8  */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
23 #include <linux/mm.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/skbuff.h>
30 #include <linux/vmalloc.h>
31
32 #include <net/iucv/af_iucv.h>
33 #include <net/dsfield.h>
34
35 #include <asm/ebcdic.h>
36 #include <asm/chpid.h>
37 #include <asm/io.h>
38 #include <asm/sysinfo.h>
39 #include <asm/diag.h>
40 #include <asm/cio.h>
41 #include <asm/ccwdev.h>
42 #include <asm/cpcmd.h>
43
44 #include "qeth_core.h"
45
46 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
47         /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
48         /*                   N  P  A    M  L  V                      H  */
49         [QETH_DBF_SETUP] = {"qeth_setup",
50                                 8, 1,   8, 5, &debug_hex_ascii_view, NULL},
51         [QETH_DBF_MSG]   = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
52                             &debug_sprintf_view, NULL},
53         [QETH_DBF_CTRL]  = {"qeth_control",
54                 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
55 };
56 EXPORT_SYMBOL_GPL(qeth_dbf);
57
58 struct kmem_cache *qeth_core_header_cache;
59 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
60 static struct kmem_cache *qeth_qdio_outbuf_cache;
61
62 static struct device *qeth_core_root_dev;
63 static struct lock_class_key qdio_out_skb_queue_key;
64
65 static void qeth_issue_next_read_cb(struct qeth_card *card,
66                                     struct qeth_cmd_buffer *iob);
67 static void qeth_free_buffer_pool(struct qeth_card *);
68 static int qeth_qdio_establish(struct qeth_card *);
69 static void qeth_free_qdio_queues(struct qeth_card *card);
70 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
71                 struct qeth_qdio_out_buffer *buf,
72                 enum iucv_tx_notify notification);
73 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
74 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
75
76 static void qeth_close_dev_handler(struct work_struct *work)
77 {
78         struct qeth_card *card;
79
80         card = container_of(work, struct qeth_card, close_dev_work);
81         QETH_CARD_TEXT(card, 2, "cldevhdl");
82         ccwgroup_set_offline(card->gdev);
83 }
84
85 static const char *qeth_get_cardname(struct qeth_card *card)
86 {
87         if (IS_VM_NIC(card)) {
88                 switch (card->info.type) {
89                 case QETH_CARD_TYPE_OSD:
90                         return " Virtual NIC QDIO";
91                 case QETH_CARD_TYPE_IQD:
92                         return " Virtual NIC Hiper";
93                 case QETH_CARD_TYPE_OSM:
94                         return " Virtual NIC QDIO - OSM";
95                 case QETH_CARD_TYPE_OSX:
96                         return " Virtual NIC QDIO - OSX";
97                 default:
98                         return " unknown";
99                 }
100         } else {
101                 switch (card->info.type) {
102                 case QETH_CARD_TYPE_OSD:
103                         return " OSD Express";
104                 case QETH_CARD_TYPE_IQD:
105                         return " HiperSockets";
106                 case QETH_CARD_TYPE_OSN:
107                         return " OSN QDIO";
108                 case QETH_CARD_TYPE_OSM:
109                         return " OSM QDIO";
110                 case QETH_CARD_TYPE_OSX:
111                         return " OSX QDIO";
112                 default:
113                         return " unknown";
114                 }
115         }
116         return " n/a";
117 }
118
119 /* max length to be returned: 14 */
120 const char *qeth_get_cardname_short(struct qeth_card *card)
121 {
122         if (IS_VM_NIC(card)) {
123                 switch (card->info.type) {
124                 case QETH_CARD_TYPE_OSD:
125                         return "Virt.NIC QDIO";
126                 case QETH_CARD_TYPE_IQD:
127                         return "Virt.NIC Hiper";
128                 case QETH_CARD_TYPE_OSM:
129                         return "Virt.NIC OSM";
130                 case QETH_CARD_TYPE_OSX:
131                         return "Virt.NIC OSX";
132                 default:
133                         return "unknown";
134                 }
135         } else {
136                 switch (card->info.type) {
137                 case QETH_CARD_TYPE_OSD:
138                         switch (card->info.link_type) {
139                         case QETH_LINK_TYPE_FAST_ETH:
140                                 return "OSD_100";
141                         case QETH_LINK_TYPE_HSTR:
142                                 return "HSTR";
143                         case QETH_LINK_TYPE_GBIT_ETH:
144                                 return "OSD_1000";
145                         case QETH_LINK_TYPE_10GBIT_ETH:
146                                 return "OSD_10GIG";
147                         case QETH_LINK_TYPE_25GBIT_ETH:
148                                 return "OSD_25GIG";
149                         case QETH_LINK_TYPE_LANE_ETH100:
150                                 return "OSD_FE_LANE";
151                         case QETH_LINK_TYPE_LANE_TR:
152                                 return "OSD_TR_LANE";
153                         case QETH_LINK_TYPE_LANE_ETH1000:
154                                 return "OSD_GbE_LANE";
155                         case QETH_LINK_TYPE_LANE:
156                                 return "OSD_ATM_LANE";
157                         default:
158                                 return "OSD_Express";
159                         }
160                 case QETH_CARD_TYPE_IQD:
161                         return "HiperSockets";
162                 case QETH_CARD_TYPE_OSN:
163                         return "OSN";
164                 case QETH_CARD_TYPE_OSM:
165                         return "OSM_1000";
166                 case QETH_CARD_TYPE_OSX:
167                         return "OSX_10GIG";
168                 default:
169                         return "unknown";
170                 }
171         }
172         return "n/a";
173 }
174
175 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
176                          int clear_start_mask)
177 {
178         unsigned long flags;
179
180         spin_lock_irqsave(&card->thread_mask_lock, flags);
181         card->thread_allowed_mask = threads;
182         if (clear_start_mask)
183                 card->thread_start_mask &= threads;
184         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
185         wake_up(&card->wait_q);
186 }
187 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
188
189 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
190 {
191         unsigned long flags;
192         int rc = 0;
193
194         spin_lock_irqsave(&card->thread_mask_lock, flags);
195         rc = (card->thread_running_mask & threads);
196         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
197         return rc;
198 }
199 EXPORT_SYMBOL_GPL(qeth_threads_running);
200
201 void qeth_clear_working_pool_list(struct qeth_card *card)
202 {
203         struct qeth_buffer_pool_entry *pool_entry, *tmp;
204
205         QETH_CARD_TEXT(card, 5, "clwrklst");
206         list_for_each_entry_safe(pool_entry, tmp,
207                             &card->qdio.in_buf_pool.entry_list, list){
208                         list_del(&pool_entry->list);
209         }
210 }
211 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
212
213 static int qeth_alloc_buffer_pool(struct qeth_card *card)
214 {
215         struct qeth_buffer_pool_entry *pool_entry;
216         void *ptr;
217         int i, j;
218
219         QETH_CARD_TEXT(card, 5, "alocpool");
220         for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
221                 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
222                 if (!pool_entry) {
223                         qeth_free_buffer_pool(card);
224                         return -ENOMEM;
225                 }
226                 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
227                         ptr = (void *) __get_free_page(GFP_KERNEL);
228                         if (!ptr) {
229                                 while (j > 0)
230                                         free_page((unsigned long)
231                                                   pool_entry->elements[--j]);
232                                 kfree(pool_entry);
233                                 qeth_free_buffer_pool(card);
234                                 return -ENOMEM;
235                         }
236                         pool_entry->elements[j] = ptr;
237                 }
238                 list_add(&pool_entry->init_list,
239                          &card->qdio.init_pool.entry_list);
240         }
241         return 0;
242 }
243
244 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
245 {
246         QETH_CARD_TEXT(card, 2, "realcbp");
247
248         if (card->state != CARD_STATE_DOWN)
249                 return -EPERM;
250
251         /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
252         qeth_clear_working_pool_list(card);
253         qeth_free_buffer_pool(card);
254         card->qdio.in_buf_pool.buf_count = bufcnt;
255         card->qdio.init_pool.buf_count = bufcnt;
256         return qeth_alloc_buffer_pool(card);
257 }
258 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
259
260 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
261 {
262         if (!q)
263                 return;
264
265         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
266         kfree(q);
267 }
268
269 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
270 {
271         struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
272         int i;
273
274         if (!q)
275                 return NULL;
276
277         if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
278                 kfree(q);
279                 return NULL;
280         }
281
282         for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
283                 q->bufs[i].buffer = q->qdio_bufs[i];
284
285         QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
286         return q;
287 }
288
289 static int qeth_cq_init(struct qeth_card *card)
290 {
291         int rc;
292
293         if (card->options.cq == QETH_CQ_ENABLED) {
294                 QETH_CARD_TEXT(card, 2, "cqinit");
295                 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
296                                    QDIO_MAX_BUFFERS_PER_Q);
297                 card->qdio.c_q->next_buf_to_init = 127;
298                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
299                              card->qdio.no_in_queues - 1, 0,
300                              127);
301                 if (rc) {
302                         QETH_CARD_TEXT_(card, 2, "1err%d", rc);
303                         goto out;
304                 }
305         }
306         rc = 0;
307 out:
308         return rc;
309 }
310
311 static int qeth_alloc_cq(struct qeth_card *card)
312 {
313         int rc;
314
315         if (card->options.cq == QETH_CQ_ENABLED) {
316                 int i;
317                 struct qdio_outbuf_state *outbuf_states;
318
319                 QETH_CARD_TEXT(card, 2, "cqon");
320                 card->qdio.c_q = qeth_alloc_qdio_queue();
321                 if (!card->qdio.c_q) {
322                         rc = -1;
323                         goto kmsg_out;
324                 }
325                 card->qdio.no_in_queues = 2;
326                 card->qdio.out_bufstates =
327                         kcalloc(card->qdio.no_out_queues *
328                                         QDIO_MAX_BUFFERS_PER_Q,
329                                 sizeof(struct qdio_outbuf_state),
330                                 GFP_KERNEL);
331                 outbuf_states = card->qdio.out_bufstates;
332                 if (outbuf_states == NULL) {
333                         rc = -1;
334                         goto free_cq_out;
335                 }
336                 for (i = 0; i < card->qdio.no_out_queues; ++i) {
337                         card->qdio.out_qs[i]->bufstates = outbuf_states;
338                         outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
339                 }
340         } else {
341                 QETH_CARD_TEXT(card, 2, "nocq");
342                 card->qdio.c_q = NULL;
343                 card->qdio.no_in_queues = 1;
344         }
345         QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
346         rc = 0;
347 out:
348         return rc;
349 free_cq_out:
350         qeth_free_qdio_queue(card->qdio.c_q);
351         card->qdio.c_q = NULL;
352 kmsg_out:
353         dev_err(&card->gdev->dev, "Failed to create completion queue\n");
354         goto out;
355 }
356
357 static void qeth_free_cq(struct qeth_card *card)
358 {
359         if (card->qdio.c_q) {
360                 --card->qdio.no_in_queues;
361                 qeth_free_qdio_queue(card->qdio.c_q);
362                 card->qdio.c_q = NULL;
363         }
364         kfree(card->qdio.out_bufstates);
365         card->qdio.out_bufstates = NULL;
366 }
367
368 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
369                                                         int delayed)
370 {
371         enum iucv_tx_notify n;
372
373         switch (sbalf15) {
374         case 0:
375                 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
376                 break;
377         case 4:
378         case 16:
379         case 17:
380         case 18:
381                 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
382                         TX_NOTIFY_UNREACHABLE;
383                 break;
384         default:
385                 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
386                         TX_NOTIFY_GENERALERROR;
387                 break;
388         }
389
390         return n;
391 }
392
393 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
394                                          int forced_cleanup)
395 {
396         if (q->card->options.cq != QETH_CQ_ENABLED)
397                 return;
398
399         if (q->bufs[bidx]->next_pending != NULL) {
400                 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
401                 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
402
403                 while (c) {
404                         if (forced_cleanup ||
405                             atomic_read(&c->state) ==
406                               QETH_QDIO_BUF_HANDLED_DELAYED) {
407                                 struct qeth_qdio_out_buffer *f = c;
408                                 QETH_CARD_TEXT(f->q->card, 5, "fp");
409                                 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
410                                 /* release here to avoid interleaving between
411                                    outbound tasklet and inbound tasklet
412                                    regarding notifications and lifecycle */
413                                 qeth_release_skbs(c);
414
415                                 c = f->next_pending;
416                                 WARN_ON_ONCE(head->next_pending != f);
417                                 head->next_pending = c;
418                                 kmem_cache_free(qeth_qdio_outbuf_cache, f);
419                         } else {
420                                 head = c;
421                                 c = c->next_pending;
422                         }
423
424                 }
425         }
426         if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
427                                         QETH_QDIO_BUF_HANDLED_DELAYED)) {
428                 /* for recovery situations */
429                 qeth_init_qdio_out_buf(q, bidx);
430                 QETH_CARD_TEXT(q->card, 2, "clprecov");
431         }
432 }
433
434
435 static void qeth_qdio_handle_aob(struct qeth_card *card,
436                                  unsigned long phys_aob_addr)
437 {
438         struct qaob *aob;
439         struct qeth_qdio_out_buffer *buffer;
440         enum iucv_tx_notify notification;
441         unsigned int i;
442
443         aob = (struct qaob *) phys_to_virt(phys_aob_addr);
444         QETH_CARD_TEXT(card, 5, "haob");
445         QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
446         buffer = (struct qeth_qdio_out_buffer *) aob->user1;
447         QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
448
449         if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
450                            QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
451                 notification = TX_NOTIFY_OK;
452         } else {
453                 WARN_ON_ONCE(atomic_read(&buffer->state) !=
454                                                         QETH_QDIO_BUF_PENDING);
455                 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
456                 notification = TX_NOTIFY_DELAYED_OK;
457         }
458
459         if (aob->aorc != 0)  {
460                 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
461                 notification = qeth_compute_cq_notification(aob->aorc, 1);
462         }
463         qeth_notify_skbs(buffer->q, buffer, notification);
464
465         /* Free dangling allocations. The attached skbs are handled by
466          * qeth_cleanup_handled_pending().
467          */
468         for (i = 0;
469              i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
470              i++) {
471                 if (aob->sba[i] && buffer->is_header[i])
472                         kmem_cache_free(qeth_core_header_cache,
473                                         (void *) aob->sba[i]);
474         }
475         atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
476
477         qdio_release_aob(aob);
478 }
479
480 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
481 {
482         return card->options.cq == QETH_CQ_ENABLED &&
483             card->qdio.c_q != NULL &&
484             queue != 0 &&
485             queue == card->qdio.no_in_queues - 1;
486 }
487
488 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
489                            void *data)
490 {
491         ccw->cmd_code = cmd_code;
492         ccw->flags = flags | CCW_FLAG_SLI;
493         ccw->count = len;
494         ccw->cda = (__u32) __pa(data);
495 }
496
497 static int __qeth_issue_next_read(struct qeth_card *card)
498 {
499         struct qeth_cmd_buffer *iob = card->read_cmd;
500         struct qeth_channel *channel = iob->channel;
501         struct ccw1 *ccw = __ccw_from_cmd(iob);
502         int rc;
503
504         QETH_CARD_TEXT(card, 5, "issnxrd");
505         if (channel->state != CH_STATE_UP)
506                 return -EIO;
507
508         memset(iob->data, 0, iob->length);
509         qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
510         iob->callback = qeth_issue_next_read_cb;
511         /* keep the cmd alive after completion: */
512         qeth_get_cmd(iob);
513
514         QETH_CARD_TEXT(card, 6, "noirqpnd");
515         rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
516         if (rc) {
517                 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
518                                  rc, CARD_DEVID(card));
519                 atomic_set(&channel->irq_pending, 0);
520                 qeth_put_cmd(iob);
521                 card->read_or_write_problem = 1;
522                 qeth_schedule_recovery(card);
523                 wake_up(&card->wait_q);
524         }
525         return rc;
526 }
527
528 static int qeth_issue_next_read(struct qeth_card *card)
529 {
530         int ret;
531
532         spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
533         ret = __qeth_issue_next_read(card);
534         spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
535
536         return ret;
537 }
538
539 static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
540 {
541         struct qeth_reply *reply;
542
543         reply = kzalloc(sizeof(*reply), GFP_KERNEL);
544         if (reply) {
545                 refcount_set(&reply->refcnt, 1);
546                 init_completion(&reply->received);
547         }
548         return reply;
549 }
550
551 static void qeth_get_reply(struct qeth_reply *reply)
552 {
553         refcount_inc(&reply->refcnt);
554 }
555
556 static void qeth_put_reply(struct qeth_reply *reply)
557 {
558         if (refcount_dec_and_test(&reply->refcnt))
559                 kfree(reply);
560 }
561
562 static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply)
563 {
564         spin_lock_irq(&card->lock);
565         list_add_tail(&reply->list, &card->cmd_waiter_list);
566         spin_unlock_irq(&card->lock);
567 }
568
569 static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
570 {
571         spin_lock_irq(&card->lock);
572         list_del(&reply->list);
573         spin_unlock_irq(&card->lock);
574 }
575
576 void qeth_notify_reply(struct qeth_reply *reply, int reason)
577 {
578         reply->rc = reason;
579         complete(&reply->received);
580 }
581 EXPORT_SYMBOL_GPL(qeth_notify_reply);
582
583 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
584                 struct qeth_card *card)
585 {
586         const char *ipa_name;
587         int com = cmd->hdr.command;
588         ipa_name = qeth_get_ipa_cmd_name(com);
589
590         if (rc)
591                 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
592                                  ipa_name, com, CARD_DEVID(card), rc,
593                                  qeth_get_ipa_msg(rc));
594         else
595                 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
596                                  ipa_name, com, CARD_DEVID(card));
597 }
598
599 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
600                                                 struct qeth_ipa_cmd *cmd)
601 {
602         QETH_CARD_TEXT(card, 5, "chkipad");
603
604         if (IS_IPA_REPLY(cmd)) {
605                 if (cmd->hdr.command != IPA_CMD_SETCCID &&
606                     cmd->hdr.command != IPA_CMD_DELCCID &&
607                     cmd->hdr.command != IPA_CMD_MODCCID &&
608                     cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
609                         qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
610                 return cmd;
611         }
612
613         /* handle unsolicited event: */
614         switch (cmd->hdr.command) {
615         case IPA_CMD_STOPLAN:
616                 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
617                         dev_err(&card->gdev->dev,
618                                 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
619                                 QETH_CARD_IFNAME(card));
620                         schedule_work(&card->close_dev_work);
621                 } else {
622                         dev_warn(&card->gdev->dev,
623                                  "The link for interface %s on CHPID 0x%X failed\n",
624                                  QETH_CARD_IFNAME(card), card->info.chpid);
625                         qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
626                         netif_carrier_off(card->dev);
627                 }
628                 return NULL;
629         case IPA_CMD_STARTLAN:
630                 dev_info(&card->gdev->dev,
631                          "The link for %s on CHPID 0x%X has been restored\n",
632                          QETH_CARD_IFNAME(card), card->info.chpid);
633                 if (card->info.hwtrap)
634                         card->info.hwtrap = 2;
635                 qeth_schedule_recovery(card);
636                 return NULL;
637         case IPA_CMD_SETBRIDGEPORT_IQD:
638         case IPA_CMD_SETBRIDGEPORT_OSA:
639         case IPA_CMD_ADDRESS_CHANGE_NOTIF:
640                 if (card->discipline->control_event_handler(card, cmd))
641                         return cmd;
642                 return NULL;
643         case IPA_CMD_MODCCID:
644                 return cmd;
645         case IPA_CMD_REGISTER_LOCAL_ADDR:
646                 QETH_CARD_TEXT(card, 3, "irla");
647                 return NULL;
648         case IPA_CMD_UNREGISTER_LOCAL_ADDR:
649                 QETH_CARD_TEXT(card, 3, "urla");
650                 return NULL;
651         default:
652                 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
653                 return cmd;
654         }
655 }
656
657 void qeth_clear_ipacmd_list(struct qeth_card *card)
658 {
659         struct qeth_reply *reply;
660         unsigned long flags;
661
662         QETH_CARD_TEXT(card, 4, "clipalst");
663
664         spin_lock_irqsave(&card->lock, flags);
665         list_for_each_entry(reply, &card->cmd_waiter_list, list)
666                 qeth_notify_reply(reply, -EIO);
667         spin_unlock_irqrestore(&card->lock, flags);
668 }
669 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
670
671 static int qeth_check_idx_response(struct qeth_card *card,
672         unsigned char *buffer)
673 {
674         QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
675         if ((buffer[2] & 0xc0) == 0xc0) {
676                 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
677                                  buffer[4]);
678                 QETH_CARD_TEXT(card, 2, "ckidxres");
679                 QETH_CARD_TEXT(card, 2, " idxterm");
680                 QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
681                 if (buffer[4] == 0xf6) {
682                         dev_err(&card->gdev->dev,
683                         "The qeth device is not configured "
684                         "for the OSI layer required by z/VM\n");
685                         return -EPERM;
686                 }
687                 return -EIO;
688         }
689         return 0;
690 }
691
692 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
693 {
694         if (refcount_dec_and_test(&iob->ref_count)) {
695                 if (iob->reply)
696                         qeth_put_reply(iob->reply);
697                 kfree(iob->data);
698                 kfree(iob);
699         }
700 }
701 EXPORT_SYMBOL_GPL(qeth_put_cmd);
702
703 static void qeth_release_buffer_cb(struct qeth_card *card,
704                                    struct qeth_cmd_buffer *iob)
705 {
706         qeth_put_cmd(iob);
707 }
708
709 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
710 {
711         struct qeth_reply *reply = iob->reply;
712
713         if (reply)
714                 qeth_notify_reply(reply, rc);
715         qeth_put_cmd(iob);
716 }
717
718 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
719                                        unsigned int length, unsigned int ccws,
720                                        long timeout)
721 {
722         struct qeth_cmd_buffer *iob;
723
724         if (length > QETH_BUFSIZE)
725                 return NULL;
726
727         iob = kzalloc(sizeof(*iob), GFP_KERNEL);
728         if (!iob)
729                 return NULL;
730
731         iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
732                             GFP_KERNEL | GFP_DMA);
733         if (!iob->data) {
734                 kfree(iob);
735                 return NULL;
736         }
737
738         refcount_set(&iob->ref_count, 1);
739         iob->channel = channel;
740         iob->timeout = timeout;
741         iob->length = length;
742         return iob;
743 }
744 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
745
746 static void qeth_issue_next_read_cb(struct qeth_card *card,
747                                     struct qeth_cmd_buffer *iob)
748 {
749         struct qeth_ipa_cmd *cmd = NULL;
750         struct qeth_reply *reply = NULL;
751         struct qeth_reply *r;
752         unsigned long flags;
753         int rc = 0;
754
755         QETH_CARD_TEXT(card, 4, "sndctlcb");
756         rc = qeth_check_idx_response(card, iob->data);
757         switch (rc) {
758         case 0:
759                 break;
760         case -EIO:
761                 qeth_clear_ipacmd_list(card);
762                 qeth_schedule_recovery(card);
763                 /* fall through */
764         default:
765                 goto out;
766         }
767
768         if (IS_IPA(iob->data)) {
769                 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
770                 cmd = qeth_check_ipa_data(card, cmd);
771                 if (!cmd)
772                         goto out;
773                 if (IS_OSN(card) && card->osn_info.assist_cb &&
774                     cmd->hdr.command != IPA_CMD_STARTLAN) {
775                         card->osn_info.assist_cb(card->dev, cmd);
776                         goto out;
777                 }
778         } else {
779                 /* non-IPA commands should only flow during initialization */
780                 if (card->state != CARD_STATE_DOWN)
781                         goto out;
782         }
783
784         /* match against pending cmd requests */
785         spin_lock_irqsave(&card->lock, flags);
786         list_for_each_entry(r, &card->cmd_waiter_list, list) {
787                 if ((r->seqno == QETH_IDX_COMMAND_SEQNO) ||
788                     (cmd && (r->seqno == cmd->hdr.seqno))) {
789                         reply = r;
790                         /* take the object outside the lock */
791                         qeth_get_reply(reply);
792                         break;
793                 }
794         }
795         spin_unlock_irqrestore(&card->lock, flags);
796
797         if (!reply)
798                 goto out;
799
800         if (!reply->callback) {
801                 rc = 0;
802         } else {
803                 if (cmd) {
804                         reply->offset = (u16)((char *)cmd - (char *)iob->data);
805                         rc = reply->callback(card, reply, (unsigned long)cmd);
806                 } else {
807                         rc = reply->callback(card, reply, (unsigned long)iob);
808                 }
809         }
810
811         if (rc <= 0)
812                 qeth_notify_reply(reply, rc);
813         qeth_put_reply(reply);
814
815 out:
816         memcpy(&card->seqno.pdu_hdr_ack,
817                 QETH_PDU_HEADER_SEQ_NO(iob->data),
818                 QETH_SEQ_NO_LENGTH);
819         qeth_put_cmd(iob);
820         __qeth_issue_next_read(card);
821 }
822
823 static int qeth_set_thread_start_bit(struct qeth_card *card,
824                 unsigned long thread)
825 {
826         unsigned long flags;
827
828         spin_lock_irqsave(&card->thread_mask_lock, flags);
829         if (!(card->thread_allowed_mask & thread) ||
830               (card->thread_start_mask & thread)) {
831                 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
832                 return -EPERM;
833         }
834         card->thread_start_mask |= thread;
835         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
836         return 0;
837 }
838
839 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
840 {
841         unsigned long flags;
842
843         spin_lock_irqsave(&card->thread_mask_lock, flags);
844         card->thread_start_mask &= ~thread;
845         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
846         wake_up(&card->wait_q);
847 }
848 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
849
850 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
851 {
852         unsigned long flags;
853
854         spin_lock_irqsave(&card->thread_mask_lock, flags);
855         card->thread_running_mask &= ~thread;
856         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
857         wake_up_all(&card->wait_q);
858 }
859 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
860
861 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
862 {
863         unsigned long flags;
864         int rc = 0;
865
866         spin_lock_irqsave(&card->thread_mask_lock, flags);
867         if (card->thread_start_mask & thread) {
868                 if ((card->thread_allowed_mask & thread) &&
869                     !(card->thread_running_mask & thread)) {
870                         rc = 1;
871                         card->thread_start_mask &= ~thread;
872                         card->thread_running_mask |= thread;
873                 } else
874                         rc = -EPERM;
875         }
876         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
877         return rc;
878 }
879
880 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
881 {
882         int rc = 0;
883
884         wait_event(card->wait_q,
885                    (rc = __qeth_do_run_thread(card, thread)) >= 0);
886         return rc;
887 }
888 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
889
890 void qeth_schedule_recovery(struct qeth_card *card)
891 {
892         QETH_CARD_TEXT(card, 2, "startrec");
893         if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
894                 schedule_work(&card->kernel_thread_starter);
895 }
896 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
897
898 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
899                             struct irb *irb)
900 {
901         int dstat, cstat;
902         char *sense;
903
904         sense = (char *) irb->ecw;
905         cstat = irb->scsw.cmd.cstat;
906         dstat = irb->scsw.cmd.dstat;
907
908         if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
909                      SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
910                      SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
911                 QETH_CARD_TEXT(card, 2, "CGENCHK");
912                 dev_warn(&cdev->dev, "The qeth device driver "
913                         "failed to recover an error on the device\n");
914                 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
915                                  CCW_DEVID(cdev), dstat, cstat);
916                 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
917                                 16, 1, irb, 64, 1);
918                 return 1;
919         }
920
921         if (dstat & DEV_STAT_UNIT_CHECK) {
922                 if (sense[SENSE_RESETTING_EVENT_BYTE] &
923                     SENSE_RESETTING_EVENT_FLAG) {
924                         QETH_CARD_TEXT(card, 2, "REVIND");
925                         return 1;
926                 }
927                 if (sense[SENSE_COMMAND_REJECT_BYTE] &
928                     SENSE_COMMAND_REJECT_FLAG) {
929                         QETH_CARD_TEXT(card, 2, "CMDREJi");
930                         return 1;
931                 }
932                 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
933                         QETH_CARD_TEXT(card, 2, "AFFE");
934                         return 1;
935                 }
936                 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
937                         QETH_CARD_TEXT(card, 2, "ZEROSEN");
938                         return 0;
939                 }
940                 QETH_CARD_TEXT(card, 2, "DGENCHK");
941                         return 1;
942         }
943         return 0;
944 }
945
946 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
947                                 struct irb *irb)
948 {
949         if (!IS_ERR(irb))
950                 return 0;
951
952         switch (PTR_ERR(irb)) {
953         case -EIO:
954                 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
955                                  CCW_DEVID(cdev));
956                 QETH_CARD_TEXT(card, 2, "ckirberr");
957                 QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
958                 return -EIO;
959         case -ETIMEDOUT:
960                 dev_warn(&cdev->dev, "A hardware operation timed out"
961                         " on the device\n");
962                 QETH_CARD_TEXT(card, 2, "ckirberr");
963                 QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
964                 return -ETIMEDOUT;
965         default:
966                 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
967                                  PTR_ERR(irb), CCW_DEVID(cdev));
968                 QETH_CARD_TEXT(card, 2, "ckirberr");
969                 QETH_CARD_TEXT(card, 2, "  rc???");
970                 return PTR_ERR(irb);
971         }
972 }
973
974 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
975                 struct irb *irb)
976 {
977         int rc;
978         int cstat, dstat;
979         struct qeth_cmd_buffer *iob = NULL;
980         struct ccwgroup_device *gdev;
981         struct qeth_channel *channel;
982         struct qeth_card *card;
983
984         /* while we hold the ccwdev lock, this stays valid: */
985         gdev = dev_get_drvdata(&cdev->dev);
986         card = dev_get_drvdata(&gdev->dev);
987         if (!card)
988                 return;
989
990         QETH_CARD_TEXT(card, 5, "irq");
991
992         if (card->read.ccwdev == cdev) {
993                 channel = &card->read;
994                 QETH_CARD_TEXT(card, 5, "read");
995         } else if (card->write.ccwdev == cdev) {
996                 channel = &card->write;
997                 QETH_CARD_TEXT(card, 5, "write");
998         } else {
999                 channel = &card->data;
1000                 QETH_CARD_TEXT(card, 5, "data");
1001         }
1002
1003         if (qeth_intparm_is_iob(intparm))
1004                 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1005
1006         rc = qeth_check_irb_error(card, cdev, irb);
1007         if (rc) {
1008                 /* IO was terminated, free its resources. */
1009                 if (iob)
1010                         qeth_cancel_cmd(iob, rc);
1011                 atomic_set(&channel->irq_pending, 0);
1012                 wake_up(&card->wait_q);
1013                 return;
1014         }
1015
1016         atomic_set(&channel->irq_pending, 0);
1017
1018         if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1019                 channel->state = CH_STATE_STOPPED;
1020
1021         if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1022                 channel->state = CH_STATE_HALTED;
1023
1024         if (intparm == QETH_CLEAR_CHANNEL_PARM) {
1025                 QETH_CARD_TEXT(card, 6, "clrchpar");
1026                 /* we don't have to handle this further */
1027                 intparm = 0;
1028         }
1029         if (intparm == QETH_HALT_CHANNEL_PARM) {
1030                 QETH_CARD_TEXT(card, 6, "hltchpar");
1031                 /* we don't have to handle this further */
1032                 intparm = 0;
1033         }
1034
1035         cstat = irb->scsw.cmd.cstat;
1036         dstat = irb->scsw.cmd.dstat;
1037
1038         if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1039             (dstat & DEV_STAT_UNIT_CHECK) ||
1040             (cstat)) {
1041                 if (irb->esw.esw0.erw.cons) {
1042                         dev_warn(&channel->ccwdev->dev,
1043                                 "The qeth device driver failed to recover "
1044                                 "an error on the device\n");
1045                         QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1046                                          CCW_DEVID(channel->ccwdev), cstat,
1047                                          dstat);
1048                         print_hex_dump(KERN_WARNING, "qeth: irb ",
1049                                 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1050                         print_hex_dump(KERN_WARNING, "qeth: sense data ",
1051                                 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1052                 }
1053
1054                 rc = qeth_get_problem(card, cdev, irb);
1055                 if (rc) {
1056                         card->read_or_write_problem = 1;
1057                         if (iob)
1058                                 qeth_cancel_cmd(iob, rc);
1059                         qeth_clear_ipacmd_list(card);
1060                         qeth_schedule_recovery(card);
1061                         goto out;
1062                 }
1063         }
1064
1065         if (iob && iob->callback)
1066                 iob->callback(card, iob);
1067
1068 out:
1069         wake_up(&card->wait_q);
1070         return;
1071 }
1072
1073 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1074                 struct qeth_qdio_out_buffer *buf,
1075                 enum iucv_tx_notify notification)
1076 {
1077         struct sk_buff *skb;
1078
1079         skb_queue_walk(&buf->skb_list, skb) {
1080                 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1081                 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1082                 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1083                         iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1084         }
1085 }
1086
1087 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1088 {
1089         struct sk_buff *skb;
1090
1091         /* release may never happen from within CQ tasklet scope */
1092         WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1093
1094         if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1095                 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
1096
1097         while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
1098                 consume_skb(skb);
1099 }
1100
1101 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1102                                      struct qeth_qdio_out_buffer *buf)
1103 {
1104         int i;
1105
1106         /* is PCI flag set on buffer? */
1107         if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1108                 atomic_dec(&queue->set_pci_flags_count);
1109
1110         qeth_release_skbs(buf);
1111
1112         for (i = 0; i < queue->max_elements; ++i) {
1113                 if (buf->buffer->element[i].addr && buf->is_header[i])
1114                         kmem_cache_free(qeth_core_header_cache,
1115                                 buf->buffer->element[i].addr);
1116                 buf->is_header[i] = 0;
1117         }
1118
1119         qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1120         buf->next_element_to_fill = 0;
1121         atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1122 }
1123
1124 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1125 {
1126         int j;
1127
1128         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1129                 if (!q->bufs[j])
1130                         continue;
1131                 qeth_cleanup_handled_pending(q, j, 1);
1132                 qeth_clear_output_buffer(q, q->bufs[j]);
1133                 if (free) {
1134                         kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1135                         q->bufs[j] = NULL;
1136                 }
1137         }
1138 }
1139
1140 void qeth_drain_output_queues(struct qeth_card *card)
1141 {
1142         int i;
1143
1144         QETH_CARD_TEXT(card, 2, "clearqdbf");
1145         /* clear outbound buffers to free skbs */
1146         for (i = 0; i < card->qdio.no_out_queues; ++i) {
1147                 if (card->qdio.out_qs[i])
1148                         qeth_drain_output_queue(card->qdio.out_qs[i], false);
1149         }
1150 }
1151 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
1152
1153 static void qeth_free_buffer_pool(struct qeth_card *card)
1154 {
1155         struct qeth_buffer_pool_entry *pool_entry, *tmp;
1156         int i = 0;
1157         list_for_each_entry_safe(pool_entry, tmp,
1158                                  &card->qdio.init_pool.entry_list, init_list){
1159                 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1160                         free_page((unsigned long)pool_entry->elements[i]);
1161                 list_del(&pool_entry->init_list);
1162                 kfree(pool_entry);
1163         }
1164 }
1165
1166 static void qeth_clean_channel(struct qeth_channel *channel)
1167 {
1168         struct ccw_device *cdev = channel->ccwdev;
1169
1170         QETH_DBF_TEXT(SETUP, 2, "freech");
1171
1172         spin_lock_irq(get_ccwdev_lock(cdev));
1173         cdev->handler = NULL;
1174         spin_unlock_irq(get_ccwdev_lock(cdev));
1175 }
1176
1177 static void qeth_setup_channel(struct qeth_channel *channel)
1178 {
1179         struct ccw_device *cdev = channel->ccwdev;
1180
1181         QETH_DBF_TEXT(SETUP, 2, "setupch");
1182
1183         channel->state = CH_STATE_DOWN;
1184         atomic_set(&channel->irq_pending, 0);
1185
1186         spin_lock_irq(get_ccwdev_lock(cdev));
1187         cdev->handler = qeth_irq;
1188         spin_unlock_irq(get_ccwdev_lock(cdev));
1189 }
1190
1191 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1192 {
1193         unsigned int count = single ? 1 : card->dev->num_tx_queues;
1194         int rc;
1195
1196         rtnl_lock();
1197         rc = netif_set_real_num_tx_queues(card->dev, count);
1198         rtnl_unlock();
1199
1200         if (rc)
1201                 return rc;
1202
1203         if (card->qdio.no_out_queues == count)
1204                 return 0;
1205
1206         if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1207                 qeth_free_qdio_queues(card);
1208
1209         if (count == 1)
1210                 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1211
1212         card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
1213         card->qdio.no_out_queues = count;
1214         return 0;
1215 }
1216
1217 static int qeth_update_from_chp_desc(struct qeth_card *card)
1218 {
1219         struct ccw_device *ccwdev;
1220         struct channel_path_desc_fmt0 *chp_dsc;
1221         int rc = 0;
1222
1223         QETH_CARD_TEXT(card, 2, "chp_desc");
1224
1225         ccwdev = card->data.ccwdev;
1226         chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1227         if (!chp_dsc)
1228                 return -ENOMEM;
1229
1230         card->info.func_level = 0x4100 + chp_dsc->desc;
1231
1232         if (IS_OSD(card) || IS_OSX(card))
1233                 /* CHPP field bit 6 == 1 -> single queue */
1234                 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1235
1236         kfree(chp_dsc);
1237         QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1238         QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1239         return rc;
1240 }
1241
1242 static void qeth_init_qdio_info(struct qeth_card *card)
1243 {
1244         QETH_CARD_TEXT(card, 4, "intqdinf");
1245         atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1246         card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1247         card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1248
1249         /* inbound */
1250         card->qdio.no_in_queues = 1;
1251         card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1252         if (IS_IQD(card))
1253                 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1254         else
1255                 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1256         card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1257         INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1258         INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1259 }
1260
1261 static void qeth_set_initial_options(struct qeth_card *card)
1262 {
1263         card->options.route4.type = NO_ROUTER;
1264         card->options.route6.type = NO_ROUTER;
1265         card->options.rx_sg_cb = QETH_RX_SG_CB;
1266         card->options.isolation = ISOLATION_MODE_NONE;
1267         card->options.cq = QETH_CQ_DISABLED;
1268         card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1269 }
1270
1271 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1272 {
1273         unsigned long flags;
1274         int rc = 0;
1275
1276         spin_lock_irqsave(&card->thread_mask_lock, flags);
1277         QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1278                         (u8) card->thread_start_mask,
1279                         (u8) card->thread_allowed_mask,
1280                         (u8) card->thread_running_mask);
1281         rc = (card->thread_start_mask & thread);
1282         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1283         return rc;
1284 }
1285
1286 static void qeth_start_kernel_thread(struct work_struct *work)
1287 {
1288         struct task_struct *ts;
1289         struct qeth_card *card = container_of(work, struct qeth_card,
1290                                         kernel_thread_starter);
1291         QETH_CARD_TEXT(card , 2, "strthrd");
1292
1293         if (card->read.state != CH_STATE_UP &&
1294             card->write.state != CH_STATE_UP)
1295                 return;
1296         if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1297                 ts = kthread_run(card->discipline->recover, (void *)card,
1298                                 "qeth_recover");
1299                 if (IS_ERR(ts)) {
1300                         qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1301                         qeth_clear_thread_running_bit(card,
1302                                 QETH_RECOVER_THREAD);
1303                 }
1304         }
1305 }
1306
1307 static void qeth_buffer_reclaim_work(struct work_struct *);
1308 static void qeth_setup_card(struct qeth_card *card)
1309 {
1310         QETH_CARD_TEXT(card, 2, "setupcrd");
1311
1312         card->info.type = CARD_RDEV(card)->id.driver_info;
1313         card->state = CARD_STATE_DOWN;
1314         spin_lock_init(&card->lock);
1315         spin_lock_init(&card->thread_mask_lock);
1316         mutex_init(&card->conf_mutex);
1317         mutex_init(&card->discipline_mutex);
1318         INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1319         INIT_LIST_HEAD(&card->cmd_waiter_list);
1320         init_waitqueue_head(&card->wait_q);
1321         qeth_set_initial_options(card);
1322         /* IP address takeover */
1323         INIT_LIST_HEAD(&card->ipato.entries);
1324         qeth_init_qdio_info(card);
1325         INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1326         INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1327 }
1328
1329 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1330 {
1331         struct qeth_card *card = container_of(slr, struct qeth_card,
1332                                         qeth_service_level);
1333         if (card->info.mcl_level[0])
1334                 seq_printf(m, "qeth: %s firmware level %s\n",
1335                         CARD_BUS_ID(card), card->info.mcl_level);
1336 }
1337
1338 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1339 {
1340         struct qeth_card *card;
1341
1342         QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1343         card = kzalloc(sizeof(*card), GFP_KERNEL);
1344         if (!card)
1345                 goto out;
1346         QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1347
1348         card->gdev = gdev;
1349         dev_set_drvdata(&gdev->dev, card);
1350         CARD_RDEV(card) = gdev->cdev[0];
1351         CARD_WDEV(card) = gdev->cdev[1];
1352         CARD_DDEV(card) = gdev->cdev[2];
1353
1354         card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1355                                                  dev_name(&gdev->dev));
1356         if (!card->event_wq)
1357                 goto out_wq;
1358
1359         card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1360         if (!card->read_cmd)
1361                 goto out_read_cmd;
1362
1363         qeth_setup_channel(&card->read);
1364         qeth_setup_channel(&card->write);
1365         qeth_setup_channel(&card->data);
1366         card->qeth_service_level.seq_print = qeth_core_sl_print;
1367         register_service_level(&card->qeth_service_level);
1368         return card;
1369
1370 out_read_cmd:
1371         destroy_workqueue(card->event_wq);
1372 out_wq:
1373         dev_set_drvdata(&gdev->dev, NULL);
1374         kfree(card);
1375 out:
1376         return NULL;
1377 }
1378
1379 static int qeth_clear_channel(struct qeth_card *card,
1380                               struct qeth_channel *channel)
1381 {
1382         int rc;
1383
1384         QETH_CARD_TEXT(card, 3, "clearch");
1385         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1386         rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1387         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1388
1389         if (rc)
1390                 return rc;
1391         rc = wait_event_interruptible_timeout(card->wait_q,
1392                         channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1393         if (rc == -ERESTARTSYS)
1394                 return rc;
1395         if (channel->state != CH_STATE_STOPPED)
1396                 return -ETIME;
1397         channel->state = CH_STATE_DOWN;
1398         return 0;
1399 }
1400
1401 static int qeth_halt_channel(struct qeth_card *card,
1402                              struct qeth_channel *channel)
1403 {
1404         int rc;
1405
1406         QETH_CARD_TEXT(card, 3, "haltch");
1407         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1408         rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1409         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1410
1411         if (rc)
1412                 return rc;
1413         rc = wait_event_interruptible_timeout(card->wait_q,
1414                         channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1415         if (rc == -ERESTARTSYS)
1416                 return rc;
1417         if (channel->state != CH_STATE_HALTED)
1418                 return -ETIME;
1419         return 0;
1420 }
1421
1422 static int qeth_halt_channels(struct qeth_card *card)
1423 {
1424         int rc1 = 0, rc2 = 0, rc3 = 0;
1425
1426         QETH_CARD_TEXT(card, 3, "haltchs");
1427         rc1 = qeth_halt_channel(card, &card->read);
1428         rc2 = qeth_halt_channel(card, &card->write);
1429         rc3 = qeth_halt_channel(card, &card->data);
1430         if (rc1)
1431                 return rc1;
1432         if (rc2)
1433                 return rc2;
1434         return rc3;
1435 }
1436
1437 static int qeth_clear_channels(struct qeth_card *card)
1438 {
1439         int rc1 = 0, rc2 = 0, rc3 = 0;
1440
1441         QETH_CARD_TEXT(card, 3, "clearchs");
1442         rc1 = qeth_clear_channel(card, &card->read);
1443         rc2 = qeth_clear_channel(card, &card->write);
1444         rc3 = qeth_clear_channel(card, &card->data);
1445         if (rc1)
1446                 return rc1;
1447         if (rc2)
1448                 return rc2;
1449         return rc3;
1450 }
1451
1452 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1453 {
1454         int rc = 0;
1455
1456         QETH_CARD_TEXT(card, 3, "clhacrd");
1457
1458         if (halt)
1459                 rc = qeth_halt_channels(card);
1460         if (rc)
1461                 return rc;
1462         return qeth_clear_channels(card);
1463 }
1464
1465 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1466 {
1467         int rc = 0;
1468
1469         QETH_CARD_TEXT(card, 3, "qdioclr");
1470         switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1471                 QETH_QDIO_CLEANING)) {
1472         case QETH_QDIO_ESTABLISHED:
1473                 if (IS_IQD(card))
1474                         rc = qdio_shutdown(CARD_DDEV(card),
1475                                 QDIO_FLAG_CLEANUP_USING_HALT);
1476                 else
1477                         rc = qdio_shutdown(CARD_DDEV(card),
1478                                 QDIO_FLAG_CLEANUP_USING_CLEAR);
1479                 if (rc)
1480                         QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1481                 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1482                 break;
1483         case QETH_QDIO_CLEANING:
1484                 return rc;
1485         default:
1486                 break;
1487         }
1488         rc = qeth_clear_halt_card(card, use_halt);
1489         if (rc)
1490                 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1491         card->state = CARD_STATE_DOWN;
1492         return rc;
1493 }
1494 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1495
1496 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1497 {
1498         enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1499         struct diag26c_vnic_resp *response = NULL;
1500         struct diag26c_vnic_req *request = NULL;
1501         struct ccw_dev_id id;
1502         char userid[80];
1503         int rc = 0;
1504
1505         QETH_CARD_TEXT(card, 2, "vmlayer");
1506
1507         cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1508         if (rc)
1509                 goto out;
1510
1511         request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1512         response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1513         if (!request || !response) {
1514                 rc = -ENOMEM;
1515                 goto out;
1516         }
1517
1518         ccw_device_get_id(CARD_RDEV(card), &id);
1519         request->resp_buf_len = sizeof(*response);
1520         request->resp_version = DIAG26C_VERSION6_VM65918;
1521         request->req_format = DIAG26C_VNIC_INFO;
1522         ASCEBC(userid, 8);
1523         memcpy(&request->sys_name, userid, 8);
1524         request->devno = id.devno;
1525
1526         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1527         rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1528         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1529         if (rc)
1530                 goto out;
1531         QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1532
1533         if (request->resp_buf_len < sizeof(*response) ||
1534             response->version != request->resp_version) {
1535                 rc = -EIO;
1536                 goto out;
1537         }
1538
1539         if (response->protocol == VNIC_INFO_PROT_L2)
1540                 disc = QETH_DISCIPLINE_LAYER2;
1541         else if (response->protocol == VNIC_INFO_PROT_L3)
1542                 disc = QETH_DISCIPLINE_LAYER3;
1543
1544 out:
1545         kfree(response);
1546         kfree(request);
1547         if (rc)
1548                 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1549         return disc;
1550 }
1551
1552 /* Determine whether the device requires a specific layer discipline */
1553 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1554 {
1555         enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1556
1557         if (IS_OSM(card) || IS_OSN(card))
1558                 disc = QETH_DISCIPLINE_LAYER2;
1559         else if (IS_VM_NIC(card))
1560                 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1561                                       qeth_vm_detect_layer(card);
1562
1563         switch (disc) {
1564         case QETH_DISCIPLINE_LAYER2:
1565                 QETH_CARD_TEXT(card, 3, "force l2");
1566                 break;
1567         case QETH_DISCIPLINE_LAYER3:
1568                 QETH_CARD_TEXT(card, 3, "force l3");
1569                 break;
1570         default:
1571                 QETH_CARD_TEXT(card, 3, "force no");
1572         }
1573
1574         return disc;
1575 }
1576
1577 static void qeth_set_blkt_defaults(struct qeth_card *card)
1578 {
1579         QETH_CARD_TEXT(card, 2, "cfgblkt");
1580
1581         if (card->info.use_v1_blkt) {
1582                 card->info.blkt.time_total = 0;
1583                 card->info.blkt.inter_packet = 0;
1584                 card->info.blkt.inter_packet_jumbo = 0;
1585         } else {
1586                 card->info.blkt.time_total = 250;
1587                 card->info.blkt.inter_packet = 5;
1588                 card->info.blkt.inter_packet_jumbo = 15;
1589         }
1590 }
1591
1592 static void qeth_init_tokens(struct qeth_card *card)
1593 {
1594         card->token.issuer_rm_w = 0x00010103UL;
1595         card->token.cm_filter_w = 0x00010108UL;
1596         card->token.cm_connection_w = 0x0001010aUL;
1597         card->token.ulp_filter_w = 0x0001010bUL;
1598         card->token.ulp_connection_w = 0x0001010dUL;
1599 }
1600
1601 static void qeth_init_func_level(struct qeth_card *card)
1602 {
1603         switch (card->info.type) {
1604         case QETH_CARD_TYPE_IQD:
1605                 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1606                 break;
1607         case QETH_CARD_TYPE_OSD:
1608         case QETH_CARD_TYPE_OSN:
1609                 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1610                 break;
1611         default:
1612                 break;
1613         }
1614 }
1615
1616 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1617                                   struct qeth_cmd_buffer *iob)
1618 {
1619         memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1620                QETH_SEQ_NO_LENGTH);
1621         if (iob->channel == &card->write)
1622                 card->seqno.trans_hdr++;
1623 }
1624
1625 static int qeth_peer_func_level(int level)
1626 {
1627         if ((level & 0xff) == 8)
1628                 return (level & 0xff) + 0x400;
1629         if (((level >> 8) & 3) == 1)
1630                 return (level & 0xff) + 0x200;
1631         return level;
1632 }
1633
1634 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1635                                   struct qeth_cmd_buffer *iob)
1636 {
1637         qeth_idx_finalize_cmd(card, iob);
1638
1639         memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1640                &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1641         card->seqno.pdu_hdr++;
1642         memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1643                &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1644
1645         iob->reply->seqno = QETH_IDX_COMMAND_SEQNO;
1646         iob->callback = qeth_release_buffer_cb;
1647 }
1648
1649 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1650                                                   void *data,
1651                                                   unsigned int data_length)
1652 {
1653         struct qeth_cmd_buffer *iob;
1654
1655         iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1656         if (!iob)
1657                 return NULL;
1658
1659         memcpy(iob->data, data, data_length);
1660         qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1661                        iob->data);
1662         iob->finalize = qeth_mpc_finalize_cmd;
1663         return iob;
1664 }
1665
1666 /**
1667  * qeth_send_control_data() -   send control command to the card
1668  * @card:                       qeth_card structure pointer
1669  * @iob:                        qeth_cmd_buffer pointer
1670  * @reply_cb:                   callback function pointer
1671  * @cb_card:                    pointer to the qeth_card structure
1672  * @cb_reply:                   pointer to the qeth_reply structure
1673  * @cb_cmd:                     pointer to the original iob for non-IPA
1674  *                              commands, or to the qeth_ipa_cmd structure
1675  *                              for the IPA commands.
1676  * @reply_param:                private pointer passed to the callback
1677  *
1678  * Callback function gets called one or more times, with cb_cmd
1679  * pointing to the response returned by the hardware. Callback
1680  * function must return
1681  *   > 0 if more reply blocks are expected,
1682  *     0 if the last or only reply block is received, and
1683  *   < 0 on error.
1684  * Callback function can get the value of the reply_param pointer from the
1685  * field 'param' of the structure qeth_reply.
1686  */
1687
1688 static int qeth_send_control_data(struct qeth_card *card,
1689                                   struct qeth_cmd_buffer *iob,
1690                                   int (*reply_cb)(struct qeth_card *cb_card,
1691                                                   struct qeth_reply *cb_reply,
1692                                                   unsigned long cb_cmd),
1693                                   void *reply_param)
1694 {
1695         struct qeth_channel *channel = iob->channel;
1696         long timeout = iob->timeout;
1697         int rc;
1698         struct qeth_reply *reply = NULL;
1699
1700         QETH_CARD_TEXT(card, 2, "sendctl");
1701
1702         reply = qeth_alloc_reply(card);
1703         if (!reply) {
1704                 qeth_put_cmd(iob);
1705                 return -ENOMEM;
1706         }
1707         reply->callback = reply_cb;
1708         reply->param = reply_param;
1709
1710         /* pairs with qeth_put_cmd(): */
1711         qeth_get_reply(reply);
1712         iob->reply = reply;
1713
1714         timeout = wait_event_interruptible_timeout(card->wait_q,
1715                                                    qeth_trylock_channel(channel),
1716                                                    timeout);
1717         if (timeout <= 0) {
1718                 qeth_put_reply(reply);
1719                 qeth_put_cmd(iob);
1720                 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1721         }
1722
1723         if (iob->finalize)
1724                 iob->finalize(card, iob);
1725         QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1726
1727         qeth_enqueue_reply(card, reply);
1728
1729         QETH_CARD_TEXT(card, 6, "noirqpnd");
1730         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1731         rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1732                                       (addr_t) iob, 0, 0, timeout);
1733         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1734         if (rc) {
1735                 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1736                                  CARD_DEVID(card), rc);
1737                 QETH_CARD_TEXT_(card, 2, " err%d", rc);
1738                 qeth_dequeue_reply(card, reply);
1739                 qeth_put_reply(reply);
1740                 qeth_put_cmd(iob);
1741                 atomic_set(&channel->irq_pending, 0);
1742                 wake_up(&card->wait_q);
1743                 return rc;
1744         }
1745
1746         timeout = wait_for_completion_interruptible_timeout(&reply->received,
1747                                                             timeout);
1748         if (timeout <= 0)
1749                 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1750
1751         qeth_dequeue_reply(card, reply);
1752         if (!rc)
1753                 rc = reply->rc;
1754         qeth_put_reply(reply);
1755         return rc;
1756 }
1757
1758 static void qeth_read_conf_data_cb(struct qeth_card *card,
1759                                    struct qeth_cmd_buffer *iob)
1760 {
1761         unsigned char *prcd = iob->data;
1762
1763         QETH_CARD_TEXT(card, 2, "cfgunit");
1764         card->info.chpid = prcd[30];
1765         card->info.unit_addr2 = prcd[31];
1766         card->info.cula = prcd[63];
1767         card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) &&
1768                                 (prcd[0x11] == _ascebc['M']));
1769         card->info.use_v1_blkt = prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
1770                                  prcd[76] >= 0xF1 && prcd[76] <= 0xF4;
1771
1772         qeth_notify_reply(iob->reply, 0);
1773         qeth_put_cmd(iob);
1774 }
1775
1776 static int qeth_read_conf_data(struct qeth_card *card)
1777 {
1778         struct qeth_channel *channel = &card->data;
1779         struct qeth_cmd_buffer *iob;
1780         struct ciw *ciw;
1781
1782         /* scan for RCD command in extended SenseID data */
1783         ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1784         if (!ciw || ciw->cmd == 0)
1785                 return -EOPNOTSUPP;
1786
1787         iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
1788         if (!iob)
1789                 return -ENOMEM;
1790
1791         iob->callback = qeth_read_conf_data_cb;
1792         qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
1793                        iob->data);
1794
1795         return qeth_send_control_data(card, iob, NULL, NULL);
1796 }
1797
1798 static int qeth_idx_check_activate_response(struct qeth_card *card,
1799                                             struct qeth_channel *channel,
1800                                             struct qeth_cmd_buffer *iob)
1801 {
1802         int rc;
1803
1804         rc = qeth_check_idx_response(card, iob->data);
1805         if (rc)
1806                 return rc;
1807
1808         if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
1809                 return 0;
1810
1811         /* negative reply: */
1812         QETH_CARD_TEXT_(card, 2, "idxneg%c",
1813                         QETH_IDX_ACT_CAUSE_CODE(iob->data));
1814
1815         switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1816         case QETH_IDX_ACT_ERR_EXCL:
1817                 dev_err(&channel->ccwdev->dev,
1818                         "The adapter is used exclusively by another host\n");
1819                 return -EBUSY;
1820         case QETH_IDX_ACT_ERR_AUTH:
1821         case QETH_IDX_ACT_ERR_AUTH_USER:
1822                 dev_err(&channel->ccwdev->dev,
1823                         "Setting the device online failed because of insufficient authorization\n");
1824                 return -EPERM;
1825         default:
1826                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1827                                  CCW_DEVID(channel->ccwdev));
1828                 return -EIO;
1829         }
1830 }
1831
1832 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
1833                                               struct qeth_cmd_buffer *iob)
1834 {
1835         struct qeth_channel *channel = iob->channel;
1836         u16 peer_level;
1837         int rc;
1838
1839         QETH_CARD_TEXT(card, 2, "idxrdcb");
1840
1841         rc = qeth_idx_check_activate_response(card, channel, iob);
1842         if (rc)
1843                 goto out;
1844
1845         memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1846         if (peer_level != qeth_peer_func_level(card->info.func_level)) {
1847                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1848                                  CCW_DEVID(channel->ccwdev),
1849                                  card->info.func_level, peer_level);
1850                 rc = -EINVAL;
1851                 goto out;
1852         }
1853
1854         memcpy(&card->token.issuer_rm_r,
1855                QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1856                QETH_MPC_TOKEN_LENGTH);
1857         memcpy(&card->info.mcl_level[0],
1858                QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1859
1860 out:
1861         qeth_notify_reply(iob->reply, rc);
1862         qeth_put_cmd(iob);
1863 }
1864
1865 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
1866                                                struct qeth_cmd_buffer *iob)
1867 {
1868         struct qeth_channel *channel = iob->channel;
1869         u16 peer_level;
1870         int rc;
1871
1872         QETH_CARD_TEXT(card, 2, "idxwrcb");
1873
1874         rc = qeth_idx_check_activate_response(card, channel, iob);
1875         if (rc)
1876                 goto out;
1877
1878         memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1879         if ((peer_level & ~0x0100) !=
1880             qeth_peer_func_level(card->info.func_level)) {
1881                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1882                                  CCW_DEVID(channel->ccwdev),
1883                                  card->info.func_level, peer_level);
1884                 rc = -EINVAL;
1885         }
1886
1887 out:
1888         qeth_notify_reply(iob->reply, rc);
1889         qeth_put_cmd(iob);
1890 }
1891
1892 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
1893                                         struct qeth_cmd_buffer *iob)
1894 {
1895         u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
1896         u8 port = ((u8)card->dev->dev_port) | 0x80;
1897         struct ccw1 *ccw = __ccw_from_cmd(iob);
1898         struct ccw_dev_id dev_id;
1899
1900         qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
1901                        iob->data);
1902         qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
1903         ccw_device_get_id(CARD_DDEV(card), &dev_id);
1904         iob->finalize = qeth_idx_finalize_cmd;
1905
1906         memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
1907         memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1908                &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1909         memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1910                &card->info.func_level, 2);
1911         memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
1912         memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
1913 }
1914
1915 static int qeth_idx_activate_read_channel(struct qeth_card *card)
1916 {
1917         struct qeth_channel *channel = &card->read;
1918         struct qeth_cmd_buffer *iob;
1919         int rc;
1920
1921         QETH_CARD_TEXT(card, 2, "idxread");
1922
1923         iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
1924         if (!iob)
1925                 return -ENOMEM;
1926
1927         memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1928         qeth_idx_setup_activate_cmd(card, iob);
1929         iob->callback = qeth_idx_activate_read_channel_cb;
1930
1931         rc = qeth_send_control_data(card, iob, NULL, NULL);
1932         if (rc)
1933                 return rc;
1934
1935         channel->state = CH_STATE_UP;
1936         return 0;
1937 }
1938
1939 static int qeth_idx_activate_write_channel(struct qeth_card *card)
1940 {
1941         struct qeth_channel *channel = &card->write;
1942         struct qeth_cmd_buffer *iob;
1943         int rc;
1944
1945         QETH_CARD_TEXT(card, 2, "idxwrite");
1946
1947         iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
1948         if (!iob)
1949                 return -ENOMEM;
1950
1951         memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1952         qeth_idx_setup_activate_cmd(card, iob);
1953         iob->callback = qeth_idx_activate_write_channel_cb;
1954
1955         rc = qeth_send_control_data(card, iob, NULL, NULL);
1956         if (rc)
1957                 return rc;
1958
1959         channel->state = CH_STATE_UP;
1960         return 0;
1961 }
1962
1963 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1964                 unsigned long data)
1965 {
1966         struct qeth_cmd_buffer *iob;
1967
1968         QETH_CARD_TEXT(card, 2, "cmenblcb");
1969
1970         iob = (struct qeth_cmd_buffer *) data;
1971         memcpy(&card->token.cm_filter_r,
1972                QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1973                QETH_MPC_TOKEN_LENGTH);
1974         return 0;
1975 }
1976
1977 static int qeth_cm_enable(struct qeth_card *card)
1978 {
1979         struct qeth_cmd_buffer *iob;
1980
1981         QETH_CARD_TEXT(card, 2, "cmenable");
1982
1983         iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
1984         if (!iob)
1985                 return -ENOMEM;
1986
1987         memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1988                &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1989         memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1990                &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1991
1992         return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
1993 }
1994
1995 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1996                 unsigned long data)
1997 {
1998         struct qeth_cmd_buffer *iob;
1999
2000         QETH_CARD_TEXT(card, 2, "cmsetpcb");
2001
2002         iob = (struct qeth_cmd_buffer *) data;
2003         memcpy(&card->token.cm_connection_r,
2004                QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2005                QETH_MPC_TOKEN_LENGTH);
2006         return 0;
2007 }
2008
2009 static int qeth_cm_setup(struct qeth_card *card)
2010 {
2011         struct qeth_cmd_buffer *iob;
2012
2013         QETH_CARD_TEXT(card, 2, "cmsetup");
2014
2015         iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2016         if (!iob)
2017                 return -ENOMEM;
2018
2019         memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2020                &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2021         memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2022                &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2023         memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2024                &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2025         return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2026 }
2027
2028 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2029 {
2030         struct net_device *dev = card->dev;
2031         unsigned int new_mtu;
2032
2033         if (!max_mtu) {
2034                 /* IQD needs accurate max MTU to set up its RX buffers: */
2035                 if (IS_IQD(card))
2036                         return -EINVAL;
2037                 /* tolerate quirky HW: */
2038                 max_mtu = ETH_MAX_MTU;
2039         }
2040
2041         rtnl_lock();
2042         if (IS_IQD(card)) {
2043                 /* move any device with default MTU to new max MTU: */
2044                 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2045
2046                 /* adjust RX buffer size to new max MTU: */
2047                 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2048                 if (dev->max_mtu && dev->max_mtu != max_mtu)
2049                         qeth_free_qdio_queues(card);
2050         } else {
2051                 if (dev->mtu)
2052                         new_mtu = dev->mtu;
2053                 /* default MTUs for first setup: */
2054                 else if (IS_LAYER2(card))
2055                         new_mtu = ETH_DATA_LEN;
2056                 else
2057                         new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2058         }
2059
2060         dev->max_mtu = max_mtu;
2061         dev->mtu = min(new_mtu, max_mtu);
2062         rtnl_unlock();
2063         return 0;
2064 }
2065
2066 static int qeth_get_mtu_outof_framesize(int framesize)
2067 {
2068         switch (framesize) {
2069         case 0x4000:
2070                 return 8192;
2071         case 0x6000:
2072                 return 16384;
2073         case 0xa000:
2074                 return 32768;
2075         case 0xffff:
2076                 return 57344;
2077         default:
2078                 return 0;
2079         }
2080 }
2081
2082 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2083                 unsigned long data)
2084 {
2085         __u16 mtu, framesize;
2086         __u16 len;
2087         __u8 link_type;
2088         struct qeth_cmd_buffer *iob;
2089
2090         QETH_CARD_TEXT(card, 2, "ulpenacb");
2091
2092         iob = (struct qeth_cmd_buffer *) data;
2093         memcpy(&card->token.ulp_filter_r,
2094                QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2095                QETH_MPC_TOKEN_LENGTH);
2096         if (IS_IQD(card)) {
2097                 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2098                 mtu = qeth_get_mtu_outof_framesize(framesize);
2099         } else {
2100                 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2101         }
2102         *(u16 *)reply->param = mtu;
2103
2104         memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2105         if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2106                 memcpy(&link_type,
2107                        QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2108                 card->info.link_type = link_type;
2109         } else
2110                 card->info.link_type = 0;
2111         QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2112         return 0;
2113 }
2114
2115 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2116 {
2117         if (IS_OSN(card))
2118                 return QETH_PROT_OSN2;
2119         return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2120 }
2121
2122 static int qeth_ulp_enable(struct qeth_card *card)
2123 {
2124         u8 prot_type = qeth_mpc_select_prot_type(card);
2125         struct qeth_cmd_buffer *iob;
2126         u16 max_mtu;
2127         int rc;
2128
2129         QETH_CARD_TEXT(card, 2, "ulpenabl");
2130
2131         iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2132         if (!iob)
2133                 return -ENOMEM;
2134
2135         *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2136         memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2137         memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2138                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2139         memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2140                &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2141         rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2142         if (rc)
2143                 return rc;
2144         return qeth_update_max_mtu(card, max_mtu);
2145 }
2146
2147 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2148                 unsigned long data)
2149 {
2150         struct qeth_cmd_buffer *iob;
2151
2152         QETH_CARD_TEXT(card, 2, "ulpstpcb");
2153
2154         iob = (struct qeth_cmd_buffer *) data;
2155         memcpy(&card->token.ulp_connection_r,
2156                QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2157                QETH_MPC_TOKEN_LENGTH);
2158         if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2159                      3)) {
2160                 QETH_CARD_TEXT(card, 2, "olmlimit");
2161                 dev_err(&card->gdev->dev, "A connection could not be "
2162                         "established because of an OLM limit\n");
2163                 return -EMLINK;
2164         }
2165         return 0;
2166 }
2167
2168 static int qeth_ulp_setup(struct qeth_card *card)
2169 {
2170         __u16 temp;
2171         struct qeth_cmd_buffer *iob;
2172         struct ccw_dev_id dev_id;
2173
2174         QETH_CARD_TEXT(card, 2, "ulpsetup");
2175
2176         iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2177         if (!iob)
2178                 return -ENOMEM;
2179
2180         memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2181                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2182         memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2183                &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2184         memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2185                &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2186
2187         ccw_device_get_id(CARD_DDEV(card), &dev_id);
2188         memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2189         temp = (card->info.cula << 8) + card->info.unit_addr2;
2190         memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2191         return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2192 }
2193
2194 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2195 {
2196         struct qeth_qdio_out_buffer *newbuf;
2197
2198         newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2199         if (!newbuf)
2200                 return -ENOMEM;
2201
2202         newbuf->buffer = q->qdio_bufs[bidx];
2203         skb_queue_head_init(&newbuf->skb_list);
2204         lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2205         newbuf->q = q;
2206         newbuf->next_pending = q->bufs[bidx];
2207         atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2208         q->bufs[bidx] = newbuf;
2209         return 0;
2210 }
2211
2212 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2213 {
2214         if (!q)
2215                 return;
2216
2217         qeth_drain_output_queue(q, true);
2218         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2219         kfree(q);
2220 }
2221
2222 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2223 {
2224         struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2225
2226         if (!q)
2227                 return NULL;
2228
2229         if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2230                 kfree(q);
2231                 return NULL;
2232         }
2233         return q;
2234 }
2235
2236 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2237 {
2238         int i, j;
2239
2240         QETH_CARD_TEXT(card, 2, "allcqdbf");
2241
2242         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2243                 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2244                 return 0;
2245
2246         QETH_CARD_TEXT(card, 2, "inq");
2247         card->qdio.in_q = qeth_alloc_qdio_queue();
2248         if (!card->qdio.in_q)
2249                 goto out_nomem;
2250
2251         /* inbound buffer pool */
2252         if (qeth_alloc_buffer_pool(card))
2253                 goto out_freeinq;
2254
2255         /* outbound */
2256         for (i = 0; i < card->qdio.no_out_queues; ++i) {
2257                 card->qdio.out_qs[i] = qeth_alloc_output_queue();
2258                 if (!card->qdio.out_qs[i])
2259                         goto out_freeoutq;
2260                 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2261                 QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *));
2262                 card->qdio.out_qs[i]->card = card;
2263                 card->qdio.out_qs[i]->queue_no = i;
2264                 /* give outbound qeth_qdio_buffers their qdio_buffers */
2265                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2266                         WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
2267                         if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
2268                                 goto out_freeoutqbufs;
2269                 }
2270         }
2271
2272         /* completion */
2273         if (qeth_alloc_cq(card))
2274                 goto out_freeoutq;
2275
2276         return 0;
2277
2278 out_freeoutqbufs:
2279         while (j > 0) {
2280                 --j;
2281                 kmem_cache_free(qeth_qdio_outbuf_cache,
2282                                 card->qdio.out_qs[i]->bufs[j]);
2283                 card->qdio.out_qs[i]->bufs[j] = NULL;
2284         }
2285 out_freeoutq:
2286         while (i > 0) {
2287                 qeth_free_output_queue(card->qdio.out_qs[--i]);
2288                 card->qdio.out_qs[i] = NULL;
2289         }
2290         qeth_free_buffer_pool(card);
2291 out_freeinq:
2292         qeth_free_qdio_queue(card->qdio.in_q);
2293         card->qdio.in_q = NULL;
2294 out_nomem:
2295         atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2296         return -ENOMEM;
2297 }
2298
2299 static void qeth_free_qdio_queues(struct qeth_card *card)
2300 {
2301         int i, j;
2302
2303         if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2304                 QETH_QDIO_UNINITIALIZED)
2305                 return;
2306
2307         qeth_free_cq(card);
2308         cancel_delayed_work_sync(&card->buffer_reclaim_work);
2309         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2310                 if (card->qdio.in_q->bufs[j].rx_skb)
2311                         dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2312         }
2313         qeth_free_qdio_queue(card->qdio.in_q);
2314         card->qdio.in_q = NULL;
2315         /* inbound buffer pool */
2316         qeth_free_buffer_pool(card);
2317         /* free outbound qdio_qs */
2318         for (i = 0; i < card->qdio.no_out_queues; i++) {
2319                 qeth_free_output_queue(card->qdio.out_qs[i]);
2320                 card->qdio.out_qs[i] = NULL;
2321         }
2322 }
2323
2324 static void qeth_create_qib_param_field(struct qeth_card *card,
2325                 char *param_field)
2326 {
2327
2328         param_field[0] = _ascebc['P'];
2329         param_field[1] = _ascebc['C'];
2330         param_field[2] = _ascebc['I'];
2331         param_field[3] = _ascebc['T'];
2332         *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2333         *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2334         *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2335 }
2336
2337 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2338                 char *param_field)
2339 {
2340         param_field[16] = _ascebc['B'];
2341         param_field[17] = _ascebc['L'];
2342         param_field[18] = _ascebc['K'];
2343         param_field[19] = _ascebc['T'];
2344         *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2345         *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2346         *((unsigned int *) (&param_field[28])) =
2347                 card->info.blkt.inter_packet_jumbo;
2348 }
2349
2350 static int qeth_qdio_activate(struct qeth_card *card)
2351 {
2352         QETH_CARD_TEXT(card, 3, "qdioact");
2353         return qdio_activate(CARD_DDEV(card));
2354 }
2355
2356 static int qeth_dm_act(struct qeth_card *card)
2357 {
2358         struct qeth_cmd_buffer *iob;
2359
2360         QETH_CARD_TEXT(card, 2, "dmact");
2361
2362         iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2363         if (!iob)
2364                 return -ENOMEM;
2365
2366         memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2367                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2368         memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2369                &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2370         return qeth_send_control_data(card, iob, NULL, NULL);
2371 }
2372
2373 static int qeth_mpc_initialize(struct qeth_card *card)
2374 {
2375         int rc;
2376
2377         QETH_CARD_TEXT(card, 2, "mpcinit");
2378
2379         rc = qeth_issue_next_read(card);
2380         if (rc) {
2381                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2382                 return rc;
2383         }
2384         rc = qeth_cm_enable(card);
2385         if (rc) {
2386                 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2387                 goto out_qdio;
2388         }
2389         rc = qeth_cm_setup(card);
2390         if (rc) {
2391                 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2392                 goto out_qdio;
2393         }
2394         rc = qeth_ulp_enable(card);
2395         if (rc) {
2396                 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2397                 goto out_qdio;
2398         }
2399         rc = qeth_ulp_setup(card);
2400         if (rc) {
2401                 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2402                 goto out_qdio;
2403         }
2404         rc = qeth_alloc_qdio_queues(card);
2405         if (rc) {
2406                 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2407                 goto out_qdio;
2408         }
2409         rc = qeth_qdio_establish(card);
2410         if (rc) {
2411                 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2412                 qeth_free_qdio_queues(card);
2413                 goto out_qdio;
2414         }
2415         rc = qeth_qdio_activate(card);
2416         if (rc) {
2417                 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2418                 goto out_qdio;
2419         }
2420         rc = qeth_dm_act(card);
2421         if (rc) {
2422                 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2423                 goto out_qdio;
2424         }
2425
2426         return 0;
2427 out_qdio:
2428         qeth_qdio_clear_card(card, !IS_IQD(card));
2429         qdio_free(CARD_DDEV(card));
2430         return rc;
2431 }
2432
2433 void qeth_print_status_message(struct qeth_card *card)
2434 {
2435         switch (card->info.type) {
2436         case QETH_CARD_TYPE_OSD:
2437         case QETH_CARD_TYPE_OSM:
2438         case QETH_CARD_TYPE_OSX:
2439                 /* VM will use a non-zero first character
2440                  * to indicate a HiperSockets like reporting
2441                  * of the level OSA sets the first character to zero
2442                  * */
2443                 if (!card->info.mcl_level[0]) {
2444                         sprintf(card->info.mcl_level, "%02x%02x",
2445                                 card->info.mcl_level[2],
2446                                 card->info.mcl_level[3]);
2447                         break;
2448                 }
2449                 /* fallthrough */
2450         case QETH_CARD_TYPE_IQD:
2451                 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2452                         card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2453                                 card->info.mcl_level[0]];
2454                         card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2455                                 card->info.mcl_level[1]];
2456                         card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2457                                 card->info.mcl_level[2]];
2458                         card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2459                                 card->info.mcl_level[3]];
2460                         card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2461                 }
2462                 break;
2463         default:
2464                 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2465         }
2466         dev_info(&card->gdev->dev,
2467                  "Device is a%s card%s%s%s\nwith link type %s.\n",
2468                  qeth_get_cardname(card),
2469                  (card->info.mcl_level[0]) ? " (level: " : "",
2470                  (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2471                  (card->info.mcl_level[0]) ? ")" : "",
2472                  qeth_get_cardname_short(card));
2473 }
2474 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2475
2476 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2477 {
2478         struct qeth_buffer_pool_entry *entry;
2479
2480         QETH_CARD_TEXT(card, 5, "inwrklst");
2481
2482         list_for_each_entry(entry,
2483                             &card->qdio.init_pool.entry_list, init_list) {
2484                 qeth_put_buffer_pool_entry(card, entry);
2485         }
2486 }
2487
2488 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2489                                         struct qeth_card *card)
2490 {
2491         struct list_head *plh;
2492         struct qeth_buffer_pool_entry *entry;
2493         int i, free;
2494         struct page *page;
2495
2496         if (list_empty(&card->qdio.in_buf_pool.entry_list))
2497                 return NULL;
2498
2499         list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2500                 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2501                 free = 1;
2502                 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2503                         if (page_count(virt_to_page(entry->elements[i])) > 1) {
2504                                 free = 0;
2505                                 break;
2506                         }
2507                 }
2508                 if (free) {
2509                         list_del_init(&entry->list);
2510                         return entry;
2511                 }
2512         }
2513
2514         /* no free buffer in pool so take first one and swap pages */
2515         entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2516                         struct qeth_buffer_pool_entry, list);
2517         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2518                 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2519                         page = alloc_page(GFP_ATOMIC);
2520                         if (!page) {
2521                                 return NULL;
2522                         } else {
2523                                 free_page((unsigned long)entry->elements[i]);
2524                                 entry->elements[i] = page_address(page);
2525                                 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2526                         }
2527                 }
2528         }
2529         list_del_init(&entry->list);
2530         return entry;
2531 }
2532
2533 static int qeth_init_input_buffer(struct qeth_card *card,
2534                 struct qeth_qdio_buffer *buf)
2535 {
2536         struct qeth_buffer_pool_entry *pool_entry;
2537         int i;
2538
2539         if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2540                 buf->rx_skb = netdev_alloc_skb(card->dev,
2541                                                QETH_RX_PULL_LEN + ETH_HLEN);
2542                 if (!buf->rx_skb)
2543                         return 1;
2544         }
2545
2546         pool_entry = qeth_find_free_buffer_pool_entry(card);
2547         if (!pool_entry)
2548                 return 1;
2549
2550         /*
2551          * since the buffer is accessed only from the input_tasklet
2552          * there shouldn't be a need to synchronize; also, since we use
2553          * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2554          * buffers
2555          */
2556
2557         buf->pool_entry = pool_entry;
2558         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2559                 buf->buffer->element[i].length = PAGE_SIZE;
2560                 buf->buffer->element[i].addr =  pool_entry->elements[i];
2561                 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2562                         buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2563                 else
2564                         buf->buffer->element[i].eflags = 0;
2565                 buf->buffer->element[i].sflags = 0;
2566         }
2567         return 0;
2568 }
2569
2570 int qeth_init_qdio_queues(struct qeth_card *card)
2571 {
2572         unsigned int i;
2573         int rc;
2574
2575         QETH_CARD_TEXT(card, 2, "initqdqs");
2576
2577         /* inbound queue */
2578         qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2579         memset(&card->rx, 0, sizeof(struct qeth_rx));
2580         qeth_initialize_working_pool_list(card);
2581         /*give only as many buffers to hardware as we have buffer pool entries*/
2582         for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2583                 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2584         card->qdio.in_q->next_buf_to_init =
2585                 card->qdio.in_buf_pool.buf_count - 1;
2586         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2587                      card->qdio.in_buf_pool.buf_count - 1);
2588         if (rc) {
2589                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2590                 return rc;
2591         }
2592
2593         /* completion */
2594         rc = qeth_cq_init(card);
2595         if (rc) {
2596                 return rc;
2597         }
2598
2599         /* outbound queue */
2600         for (i = 0; i < card->qdio.no_out_queues; ++i) {
2601                 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2602
2603                 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2604                 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2605                 queue->next_buf_to_fill = 0;
2606                 queue->do_pack = 0;
2607                 atomic_set(&queue->used_buffers, 0);
2608                 atomic_set(&queue->set_pci_flags_count, 0);
2609                 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2610         }
2611         return 0;
2612 }
2613 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2614
2615 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2616                                   struct qeth_cmd_buffer *iob)
2617 {
2618         qeth_mpc_finalize_cmd(card, iob);
2619
2620         /* override with IPA-specific values: */
2621         __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa;
2622         iob->reply->seqno = card->seqno.ipa++;
2623 }
2624
2625 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2626                           u16 cmd_length)
2627 {
2628         u8 prot_type = qeth_mpc_select_prot_type(card);
2629         u16 total_length = iob->length;
2630
2631         qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
2632                        iob->data);
2633         iob->finalize = qeth_ipa_finalize_cmd;
2634
2635         memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2636         memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2637         memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2638         memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2639         memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2640         memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2641                &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2642         memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2643 }
2644 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2645
2646 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
2647                                            enum qeth_ipa_cmds cmd_code,
2648                                            enum qeth_prot_versions prot,
2649                                            unsigned int data_length)
2650 {
2651         enum qeth_link_types link_type = card->info.link_type;
2652         struct qeth_cmd_buffer *iob;
2653         struct qeth_ipacmd_hdr *hdr;
2654
2655         data_length += offsetof(struct qeth_ipa_cmd, data);
2656         iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
2657                              QETH_IPA_TIMEOUT);
2658         if (!iob)
2659                 return NULL;
2660
2661         qeth_prepare_ipa_cmd(card, iob, data_length);
2662
2663         hdr = &__ipa_cmd(iob)->hdr;
2664         hdr->command = cmd_code;
2665         hdr->initiator = IPA_CMD_INITIATOR_HOST;
2666         /* hdr->seqno is set by qeth_send_control_data() */
2667         hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
2668         hdr->rel_adapter_no = (u8) card->dev->dev_port;
2669         hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
2670         hdr->param_count = 1;
2671         hdr->prot_version = prot;
2672         return iob;
2673 }
2674 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
2675
2676 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
2677                                 struct qeth_reply *reply, unsigned long data)
2678 {
2679         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2680
2681         return (cmd->hdr.return_code) ? -EIO : 0;
2682 }
2683
2684 /**
2685  * qeth_send_ipa_cmd() - send an IPA command
2686  *
2687  * See qeth_send_control_data() for explanation of the arguments.
2688  */
2689
2690 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2691                 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2692                         unsigned long),
2693                 void *reply_param)
2694 {
2695         int rc;
2696
2697         QETH_CARD_TEXT(card, 4, "sendipa");
2698
2699         if (card->read_or_write_problem) {
2700                 qeth_put_cmd(iob);
2701                 return -EIO;
2702         }
2703
2704         if (reply_cb == NULL)
2705                 reply_cb = qeth_send_ipa_cmd_cb;
2706         rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
2707         if (rc == -ETIME) {
2708                 qeth_clear_ipacmd_list(card);
2709                 qeth_schedule_recovery(card);
2710         }
2711         return rc;
2712 }
2713 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2714
2715 static int qeth_send_startlan_cb(struct qeth_card *card,
2716                                  struct qeth_reply *reply, unsigned long data)
2717 {
2718         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2719
2720         if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
2721                 return -ENETDOWN;
2722
2723         return (cmd->hdr.return_code) ? -EIO : 0;
2724 }
2725
2726 static int qeth_send_startlan(struct qeth_card *card)
2727 {
2728         struct qeth_cmd_buffer *iob;
2729
2730         QETH_CARD_TEXT(card, 2, "strtlan");
2731
2732         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
2733         if (!iob)
2734                 return -ENOMEM;
2735         return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
2736 }
2737
2738 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2739 {
2740         if (!cmd->hdr.return_code)
2741                 cmd->hdr.return_code =
2742                         cmd->data.setadapterparms.hdr.return_code;
2743         return cmd->hdr.return_code;
2744 }
2745
2746 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2747                 struct qeth_reply *reply, unsigned long data)
2748 {
2749         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2750
2751         QETH_CARD_TEXT(card, 3, "quyadpcb");
2752         if (qeth_setadpparms_inspect_rc(cmd))
2753                 return -EIO;
2754
2755         if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2756                 card->info.link_type =
2757                       cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2758                 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
2759         }
2760         card->options.adp.supported_funcs =
2761                 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2762         return 0;
2763 }
2764
2765 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2766                                                     enum qeth_ipa_setadp_cmd adp_cmd,
2767                                                     unsigned int data_length)
2768 {
2769         struct qeth_ipacmd_setadpparms_hdr *hdr;
2770         struct qeth_cmd_buffer *iob;
2771
2772         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
2773                                  data_length +
2774                                  offsetof(struct qeth_ipacmd_setadpparms,
2775                                           data));
2776         if (!iob)
2777                 return NULL;
2778
2779         hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
2780         hdr->cmdlength = sizeof(*hdr) + data_length;
2781         hdr->command_code = adp_cmd;
2782         hdr->used_total = 1;
2783         hdr->seq_no = 1;
2784         return iob;
2785 }
2786
2787 static int qeth_query_setadapterparms(struct qeth_card *card)
2788 {
2789         int rc;
2790         struct qeth_cmd_buffer *iob;
2791
2792         QETH_CARD_TEXT(card, 3, "queryadp");
2793         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2794                                    SETADP_DATA_SIZEOF(query_cmds_supp));
2795         if (!iob)
2796                 return -ENOMEM;
2797         rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2798         return rc;
2799 }
2800
2801 static int qeth_query_ipassists_cb(struct qeth_card *card,
2802                 struct qeth_reply *reply, unsigned long data)
2803 {
2804         struct qeth_ipa_cmd *cmd;
2805
2806         QETH_CARD_TEXT(card, 2, "qipasscb");
2807
2808         cmd = (struct qeth_ipa_cmd *) data;
2809
2810         switch (cmd->hdr.return_code) {
2811         case IPA_RC_SUCCESS:
2812                 break;
2813         case IPA_RC_NOTSUPP:
2814         case IPA_RC_L2_UNSUPPORTED_CMD:
2815                 QETH_CARD_TEXT(card, 2, "ipaunsup");
2816                 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2817                 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2818                 return -EOPNOTSUPP;
2819         default:
2820                 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2821                                  CARD_DEVID(card), cmd->hdr.return_code);
2822                 return -EIO;
2823         }
2824
2825         if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2826                 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2827                 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2828         } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
2829                 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2830                 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2831         } else
2832                 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
2833                                  CARD_DEVID(card));
2834         return 0;
2835 }
2836
2837 static int qeth_query_ipassists(struct qeth_card *card,
2838                                 enum qeth_prot_versions prot)
2839 {
2840         int rc;
2841         struct qeth_cmd_buffer *iob;
2842
2843         QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
2844         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
2845         if (!iob)
2846                 return -ENOMEM;
2847         rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
2848         return rc;
2849 }
2850
2851 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
2852                                 struct qeth_reply *reply, unsigned long data)
2853 {
2854         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2855         struct qeth_query_switch_attributes *attrs;
2856         struct qeth_switch_info *sw_info;
2857
2858         QETH_CARD_TEXT(card, 2, "qswiatcb");
2859         if (qeth_setadpparms_inspect_rc(cmd))
2860                 return -EIO;
2861
2862         sw_info = (struct qeth_switch_info *)reply->param;
2863         attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
2864         sw_info->capabilities = attrs->capabilities;
2865         sw_info->settings = attrs->settings;
2866         QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
2867                         sw_info->settings);
2868         return 0;
2869 }
2870
2871 int qeth_query_switch_attributes(struct qeth_card *card,
2872                                  struct qeth_switch_info *sw_info)
2873 {
2874         struct qeth_cmd_buffer *iob;
2875
2876         QETH_CARD_TEXT(card, 2, "qswiattr");
2877         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
2878                 return -EOPNOTSUPP;
2879         if (!netif_carrier_ok(card->dev))
2880                 return -ENOMEDIUM;
2881         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
2882         if (!iob)
2883                 return -ENOMEM;
2884         return qeth_send_ipa_cmd(card, iob,
2885                                 qeth_query_switch_attributes_cb, sw_info);
2886 }
2887
2888 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
2889                                           enum qeth_diags_cmds sub_cmd,
2890                                           unsigned int data_length)
2891 {
2892         struct qeth_ipacmd_diagass *cmd;
2893         struct qeth_cmd_buffer *iob;
2894
2895         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
2896                                  DIAG_HDR_LEN + data_length);
2897         if (!iob)
2898                 return NULL;
2899
2900         cmd = &__ipa_cmd(iob)->data.diagass;
2901         cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
2902         cmd->subcmd = sub_cmd;
2903         return iob;
2904 }
2905 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
2906
2907 static int qeth_query_setdiagass_cb(struct qeth_card *card,
2908                 struct qeth_reply *reply, unsigned long data)
2909 {
2910         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2911         u16 rc = cmd->hdr.return_code;
2912
2913         if (rc) {
2914                 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
2915                 return -EIO;
2916         }
2917
2918         card->info.diagass_support = cmd->data.diagass.ext;
2919         return 0;
2920 }
2921
2922 static int qeth_query_setdiagass(struct qeth_card *card)
2923 {
2924         struct qeth_cmd_buffer *iob;
2925
2926         QETH_CARD_TEXT(card, 2, "qdiagass");
2927         iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
2928         if (!iob)
2929                 return -ENOMEM;
2930         return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
2931 }
2932
2933 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
2934 {
2935         unsigned long info = get_zeroed_page(GFP_KERNEL);
2936         struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
2937         struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
2938         struct ccw_dev_id ccwid;
2939         int level;
2940
2941         tid->chpid = card->info.chpid;
2942         ccw_device_get_id(CARD_RDEV(card), &ccwid);
2943         tid->ssid = ccwid.ssid;
2944         tid->devno = ccwid.devno;
2945         if (!info)
2946                 return;
2947         level = stsi(NULL, 0, 0, 0);
2948         if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
2949                 tid->lparnr = info222->lpar_number;
2950         if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
2951                 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
2952                 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
2953         }
2954         free_page(info);
2955         return;
2956 }
2957
2958 static int qeth_hw_trap_cb(struct qeth_card *card,
2959                 struct qeth_reply *reply, unsigned long data)
2960 {
2961         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2962         u16 rc = cmd->hdr.return_code;
2963
2964         if (rc) {
2965                 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
2966                 return -EIO;
2967         }
2968         return 0;
2969 }
2970
2971 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
2972 {
2973         struct qeth_cmd_buffer *iob;
2974         struct qeth_ipa_cmd *cmd;
2975
2976         QETH_CARD_TEXT(card, 2, "diagtrap");
2977         iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
2978         if (!iob)
2979                 return -ENOMEM;
2980         cmd = __ipa_cmd(iob);
2981         cmd->data.diagass.type = 1;
2982         cmd->data.diagass.action = action;
2983         switch (action) {
2984         case QETH_DIAGS_TRAP_ARM:
2985                 cmd->data.diagass.options = 0x0003;
2986                 cmd->data.diagass.ext = 0x00010000 +
2987                         sizeof(struct qeth_trap_id);
2988                 qeth_get_trap_id(card,
2989                         (struct qeth_trap_id *)cmd->data.diagass.cdata);
2990                 break;
2991         case QETH_DIAGS_TRAP_DISARM:
2992                 cmd->data.diagass.options = 0x0001;
2993                 break;
2994         case QETH_DIAGS_TRAP_CAPTURE:
2995                 break;
2996         }
2997         return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
2998 }
2999 EXPORT_SYMBOL_GPL(qeth_hw_trap);
3000
3001 static int qeth_check_qdio_errors(struct qeth_card *card,
3002                                   struct qdio_buffer *buf,
3003                                   unsigned int qdio_error,
3004                                   const char *dbftext)
3005 {
3006         if (qdio_error) {
3007                 QETH_CARD_TEXT(card, 2, dbftext);
3008                 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3009                                buf->element[15].sflags);
3010                 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3011                                buf->element[14].sflags);
3012                 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3013                 if ((buf->element[15].sflags) == 0x12) {
3014                         QETH_CARD_STAT_INC(card, rx_dropped);
3015                         return 0;
3016                 } else
3017                         return 1;
3018         }
3019         return 0;
3020 }
3021
3022 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3023 {
3024         struct qeth_qdio_q *queue = card->qdio.in_q;
3025         struct list_head *lh;
3026         int count;
3027         int i;
3028         int rc;
3029         int newcount = 0;
3030
3031         count = (index < queue->next_buf_to_init)?
3032                 card->qdio.in_buf_pool.buf_count -
3033                 (queue->next_buf_to_init - index) :
3034                 card->qdio.in_buf_pool.buf_count -
3035                 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3036         /* only requeue at a certain threshold to avoid SIGAs */
3037         if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3038                 for (i = queue->next_buf_to_init;
3039                      i < queue->next_buf_to_init + count; ++i) {
3040                         if (qeth_init_input_buffer(card,
3041                                 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
3042                                 break;
3043                         } else {
3044                                 newcount++;
3045                         }
3046                 }
3047
3048                 if (newcount < count) {
3049                         /* we are in memory shortage so we switch back to
3050                            traditional skb allocation and drop packages */
3051                         atomic_set(&card->force_alloc_skb, 3);
3052                         count = newcount;
3053                 } else {
3054                         atomic_add_unless(&card->force_alloc_skb, -1, 0);
3055                 }
3056
3057                 if (!count) {
3058                         i = 0;
3059                         list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3060                                 i++;
3061                         if (i == card->qdio.in_buf_pool.buf_count) {
3062                                 QETH_CARD_TEXT(card, 2, "qsarbw");
3063                                 card->reclaim_index = index;
3064                                 schedule_delayed_work(
3065                                         &card->buffer_reclaim_work,
3066                                         QETH_RECLAIM_WORK_TIME);
3067                         }
3068                         return;
3069                 }
3070
3071                 /*
3072                  * according to old code it should be avoided to requeue all
3073                  * 128 buffers in order to benefit from PCI avoidance.
3074                  * this function keeps at least one buffer (the buffer at
3075                  * 'index') un-requeued -> this buffer is the first buffer that
3076                  * will be requeued the next time
3077                  */
3078                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3079                              queue->next_buf_to_init, count);
3080                 if (rc) {
3081                         QETH_CARD_TEXT(card, 2, "qinberr");
3082                 }
3083                 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
3084                                           QDIO_MAX_BUFFERS_PER_Q;
3085         }
3086 }
3087
3088 static void qeth_buffer_reclaim_work(struct work_struct *work)
3089 {
3090         struct qeth_card *card = container_of(work, struct qeth_card,
3091                 buffer_reclaim_work.work);
3092
3093         QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3094         qeth_queue_input_buffer(card, card->reclaim_index);
3095 }
3096
3097 static void qeth_handle_send_error(struct qeth_card *card,
3098                 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3099 {
3100         int sbalf15 = buffer->buffer->element[15].sflags;
3101
3102         QETH_CARD_TEXT(card, 6, "hdsnderr");
3103         qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3104
3105         if (!qdio_err)
3106                 return;
3107
3108         if ((sbalf15 >= 15) && (sbalf15 <= 31))
3109                 return;
3110
3111         QETH_CARD_TEXT(card, 1, "lnkfail");
3112         QETH_CARD_TEXT_(card, 1, "%04x %02x",
3113                        (u16)qdio_err, (u8)sbalf15);
3114 }
3115
3116 /**
3117  * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3118  * @queue: queue to check for packing buffer
3119  *
3120  * Returns number of buffers that were prepared for flush.
3121  */
3122 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3123 {
3124         struct qeth_qdio_out_buffer *buffer;
3125
3126         buffer = queue->bufs[queue->next_buf_to_fill];
3127         if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3128             (buffer->next_element_to_fill > 0)) {
3129                 /* it's a packing buffer */
3130                 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3131                 queue->next_buf_to_fill =
3132                         (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3133                 return 1;
3134         }
3135         return 0;
3136 }
3137
3138 /*
3139  * Switched to packing state if the number of used buffers on a queue
3140  * reaches a certain limit.
3141  */
3142 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3143 {
3144         if (!queue->do_pack) {
3145                 if (atomic_read(&queue->used_buffers)
3146                     >= QETH_HIGH_WATERMARK_PACK){
3147                         /* switch non-PACKING -> PACKING */
3148                         QETH_CARD_TEXT(queue->card, 6, "np->pack");
3149                         QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3150                         queue->do_pack = 1;
3151                 }
3152         }
3153 }
3154
3155 /*
3156  * Switches from packing to non-packing mode. If there is a packing
3157  * buffer on the queue this buffer will be prepared to be flushed.
3158  * In that case 1 is returned to inform the caller. If no buffer
3159  * has to be flushed, zero is returned.
3160  */
3161 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3162 {
3163         if (queue->do_pack) {
3164                 if (atomic_read(&queue->used_buffers)
3165                     <= QETH_LOW_WATERMARK_PACK) {
3166                         /* switch PACKING -> non-PACKING */
3167                         QETH_CARD_TEXT(queue->card, 6, "pack->np");
3168                         QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3169                         queue->do_pack = 0;
3170                         return qeth_prep_flush_pack_buffer(queue);
3171                 }
3172         }
3173         return 0;
3174 }
3175
3176 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3177                                int count)
3178 {
3179         struct qeth_qdio_out_buffer *buf;
3180         int rc;
3181         int i;
3182         unsigned int qdio_flags;
3183
3184         for (i = index; i < index + count; ++i) {
3185                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3186                 buf = queue->bufs[bidx];
3187                 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3188                                 SBAL_EFLAGS_LAST_ENTRY;
3189
3190                 if (queue->bufstates)
3191                         queue->bufstates[bidx].user = buf;
3192
3193                 if (IS_IQD(queue->card))
3194                         continue;
3195
3196                 if (!queue->do_pack) {
3197                         if ((atomic_read(&queue->used_buffers) >=
3198                                 (QETH_HIGH_WATERMARK_PACK -
3199                                  QETH_WATERMARK_PACK_FUZZ)) &&
3200                             !atomic_read(&queue->set_pci_flags_count)) {
3201                                 /* it's likely that we'll go to packing
3202                                  * mode soon */
3203                                 atomic_inc(&queue->set_pci_flags_count);
3204                                 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3205                         }
3206                 } else {
3207                         if (!atomic_read(&queue->set_pci_flags_count)) {
3208                                 /*
3209                                  * there's no outstanding PCI any more, so we
3210                                  * have to request a PCI to be sure the the PCI
3211                                  * will wake at some time in the future then we
3212                                  * can flush packed buffers that might still be
3213                                  * hanging around, which can happen if no
3214                                  * further send was requested by the stack
3215                                  */
3216                                 atomic_inc(&queue->set_pci_flags_count);
3217                                 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3218                         }
3219                 }
3220         }
3221
3222         QETH_TXQ_STAT_ADD(queue, bufs, count);
3223         qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3224         if (atomic_read(&queue->set_pci_flags_count))
3225                 qdio_flags |= QDIO_FLAG_PCI_OUT;
3226         rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3227                      queue->queue_no, index, count);
3228         if (rc) {
3229                 QETH_TXQ_STAT_ADD(queue, tx_errors, count);
3230                 /* ignore temporary SIGA errors without busy condition */
3231                 if (rc == -ENOBUFS)
3232                         return;
3233                 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3234                 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3235                 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3236                 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3237                 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3238
3239                 /* this must not happen under normal circumstances. if it
3240                  * happens something is really wrong -> recover */
3241                 qeth_schedule_recovery(queue->card);
3242                 return;
3243         }
3244 }
3245
3246 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3247 {
3248         int index;
3249         int flush_cnt = 0;
3250         int q_was_packing = 0;
3251
3252         /*
3253          * check if weed have to switch to non-packing mode or if
3254          * we have to get a pci flag out on the queue
3255          */
3256         if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3257             !atomic_read(&queue->set_pci_flags_count)) {
3258                 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3259                                 QETH_OUT_Q_UNLOCKED) {
3260                         /*
3261                          * If we get in here, there was no action in
3262                          * do_send_packet. So, we check if there is a
3263                          * packing buffer to be flushed here.
3264                          */
3265                         index = queue->next_buf_to_fill;
3266                         q_was_packing = queue->do_pack;
3267                         /* queue->do_pack may change */
3268                         barrier();
3269                         flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3270                         if (!flush_cnt &&
3271                             !atomic_read(&queue->set_pci_flags_count))
3272                                 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3273                         if (q_was_packing)
3274                                 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3275                         if (flush_cnt)
3276                                 qeth_flush_buffers(queue, index, flush_cnt);
3277                         atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3278                 }
3279         }
3280 }
3281
3282 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3283                                  unsigned long card_ptr)
3284 {
3285         struct qeth_card *card = (struct qeth_card *)card_ptr;
3286
3287         if (card->dev->flags & IFF_UP)
3288                 napi_schedule(&card->napi);
3289 }
3290
3291 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3292 {
3293         int rc;
3294
3295         if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3296                 rc = -1;
3297                 goto out;
3298         } else {
3299                 if (card->options.cq == cq) {
3300                         rc = 0;
3301                         goto out;
3302                 }
3303
3304                 if (card->state != CARD_STATE_DOWN) {
3305                         rc = -1;
3306                         goto out;
3307                 }
3308
3309                 qeth_free_qdio_queues(card);
3310                 card->options.cq = cq;
3311                 rc = 0;
3312         }
3313 out:
3314         return rc;
3315
3316 }
3317 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3318
3319 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3320                                  unsigned int queue, int first_element,
3321                                  int count)
3322 {
3323         struct qeth_qdio_q *cq = card->qdio.c_q;
3324         int i;
3325         int rc;
3326
3327         if (!qeth_is_cq(card, queue))
3328                 return;
3329
3330         QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3331         QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3332         QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3333
3334         if (qdio_err) {
3335                 netif_tx_stop_all_queues(card->dev);
3336                 qeth_schedule_recovery(card);
3337                 return;
3338         }
3339
3340         for (i = first_element; i < first_element + count; ++i) {
3341                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3342                 struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3343                 int e = 0;
3344
3345                 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3346                        buffer->element[e].addr) {
3347                         unsigned long phys_aob_addr;
3348
3349                         phys_aob_addr = (unsigned long) buffer->element[e].addr;
3350                         qeth_qdio_handle_aob(card, phys_aob_addr);
3351                         ++e;
3352                 }
3353                 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3354         }
3355         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3356                     card->qdio.c_q->next_buf_to_init,
3357                     count);
3358         if (rc) {
3359                 dev_warn(&card->gdev->dev,
3360                         "QDIO reported an error, rc=%i\n", rc);
3361                 QETH_CARD_TEXT(card, 2, "qcqherr");
3362         }
3363         card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3364                                    + count) % QDIO_MAX_BUFFERS_PER_Q;
3365 }
3366
3367 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3368                                     unsigned int qdio_err, int queue,
3369                                     int first_elem, int count,
3370                                     unsigned long card_ptr)
3371 {
3372         struct qeth_card *card = (struct qeth_card *)card_ptr;
3373
3374         QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3375         QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3376
3377         if (qeth_is_cq(card, queue))
3378                 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3379         else if (qdio_err)
3380                 qeth_schedule_recovery(card);
3381 }
3382
3383 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3384                                      unsigned int qdio_error, int __queue,
3385                                      int first_element, int count,
3386                                      unsigned long card_ptr)
3387 {
3388         struct qeth_card *card        = (struct qeth_card *) card_ptr;
3389         struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3390         struct qeth_qdio_out_buffer *buffer;
3391         struct net_device *dev = card->dev;
3392         struct netdev_queue *txq;
3393         int i;
3394
3395         QETH_CARD_TEXT(card, 6, "qdouhdl");
3396         if (qdio_error & QDIO_ERROR_FATAL) {
3397                 QETH_CARD_TEXT(card, 2, "achkcond");
3398                 netif_tx_stop_all_queues(dev);
3399                 qeth_schedule_recovery(card);
3400                 return;
3401         }
3402
3403         for (i = first_element; i < (first_element + count); ++i) {
3404                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3405                 buffer = queue->bufs[bidx];
3406                 qeth_handle_send_error(card, buffer, qdio_error);
3407
3408                 if (queue->bufstates &&
3409                     (queue->bufstates[bidx].flags &
3410                      QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3411                         WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
3412
3413                         if (atomic_cmpxchg(&buffer->state,
3414                                            QETH_QDIO_BUF_PRIMED,
3415                                            QETH_QDIO_BUF_PENDING) ==
3416                                 QETH_QDIO_BUF_PRIMED) {
3417                                 qeth_notify_skbs(queue, buffer,
3418                                                  TX_NOTIFY_PENDING);
3419                         }
3420                         QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
3421
3422                         /* prepare the queue slot for re-use: */
3423                         qeth_scrub_qdio_buffer(buffer->buffer,
3424                                                queue->max_elements);
3425                         if (qeth_init_qdio_out_buf(queue, bidx)) {
3426                                 QETH_CARD_TEXT(card, 2, "outofbuf");
3427                                 qeth_schedule_recovery(card);
3428                         }
3429                 } else {
3430                         if (card->options.cq == QETH_CQ_ENABLED) {
3431                                 enum iucv_tx_notify n;
3432
3433                                 n = qeth_compute_cq_notification(
3434                                         buffer->buffer->element[15].sflags, 0);
3435                                 qeth_notify_skbs(queue, buffer, n);
3436                         }
3437
3438                         qeth_clear_output_buffer(queue, buffer);
3439                 }
3440                 qeth_cleanup_handled_pending(queue, bidx, 0);
3441         }
3442         atomic_sub(count, &queue->used_buffers);
3443         /* check if we need to do something on this outbound queue */
3444         if (!IS_IQD(card))
3445                 qeth_check_outbound_queue(queue);
3446
3447         if (IS_IQD(card))
3448                 __queue = qeth_iqd_translate_txq(dev, __queue);
3449         txq = netdev_get_tx_queue(dev, __queue);
3450         /* xmit may have observed the full-condition, but not yet stopped the
3451          * txq. In which case the code below won't trigger. So before returning,
3452          * xmit will re-check the txq's fill level and wake it up if needed.
3453          */
3454         if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3455                 netif_tx_wake_queue(txq);
3456 }
3457
3458 /**
3459  * Note: Function assumes that we have 4 outbound queues.
3460  */
3461 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3462 {
3463         struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3464         u8 tos;
3465
3466         switch (card->qdio.do_prio_queueing) {
3467         case QETH_PRIO_Q_ING_TOS:
3468         case QETH_PRIO_Q_ING_PREC:
3469                 switch (qeth_get_ip_version(skb)) {
3470                 case 4:
3471                         tos = ipv4_get_dsfield(ip_hdr(skb));
3472                         break;
3473                 case 6:
3474                         tos = ipv6_get_dsfield(ipv6_hdr(skb));
3475                         break;
3476                 default:
3477                         return card->qdio.default_out_queue;
3478                 }
3479                 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3480                         return ~tos >> 6 & 3;
3481                 if (tos & IPTOS_MINCOST)
3482                         return 3;
3483                 if (tos & IPTOS_RELIABILITY)
3484                         return 2;
3485                 if (tos & IPTOS_THROUGHPUT)
3486                         return 1;
3487                 if (tos & IPTOS_LOWDELAY)
3488                         return 0;
3489                 break;
3490         case QETH_PRIO_Q_ING_SKB:
3491                 if (skb->priority > 5)
3492                         return 0;
3493                 return ~skb->priority >> 1 & 3;
3494         case QETH_PRIO_Q_ING_VLAN:
3495                 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3496                         return ~ntohs(veth->h_vlan_TCI) >>
3497                                (VLAN_PRIO_SHIFT + 1) & 3;
3498                 break;
3499         default:
3500                 break;
3501         }
3502         return card->qdio.default_out_queue;
3503 }
3504 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3505
3506 /**
3507  * qeth_get_elements_for_frags() -      find number of SBALEs for skb frags.
3508  * @skb:                                SKB address
3509  *
3510  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3511  * fragmented part of the SKB. Returns zero for linear SKB.
3512  */
3513 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3514 {
3515         int cnt, elements = 0;
3516
3517         for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3518                 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3519
3520                 elements += qeth_get_elements_for_range(
3521                         (addr_t)skb_frag_address(frag),
3522                         (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3523         }
3524         return elements;
3525 }
3526
3527 /**
3528  * qeth_count_elements() -      Counts the number of QDIO buffer elements needed
3529  *                              to transmit an skb.
3530  * @skb:                        the skb to operate on.
3531  * @data_offset:                skip this part of the skb's linear data
3532  *
3533  * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3534  * skb's data (both its linear part and paged fragments).
3535  */
3536 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3537 {
3538         unsigned int elements = qeth_get_elements_for_frags(skb);
3539         addr_t end = (addr_t)skb->data + skb_headlen(skb);
3540         addr_t start = (addr_t)skb->data + data_offset;
3541
3542         if (start != end)
3543                 elements += qeth_get_elements_for_range(start, end);
3544         return elements;
3545 }
3546 EXPORT_SYMBOL_GPL(qeth_count_elements);
3547
3548 #define QETH_HDR_CACHE_OBJ_SIZE         (sizeof(struct qeth_hdr_tso) + \
3549                                          MAX_TCP_HEADER)
3550
3551 /**
3552  * qeth_add_hw_header() - add a HW header to an skb.
3553  * @skb: skb that the HW header should be added to.
3554  * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3555  *       it contains a valid pointer to a qeth_hdr.
3556  * @hdr_len: length of the HW header.
3557  * @proto_len: length of protocol headers that need to be in same page as the
3558  *             HW header.
3559  *
3560  * Returns the pushed length. If the header can't be pushed on
3561  * (eg. because it would cross a page boundary), it is allocated from
3562  * the cache instead and 0 is returned.
3563  * The number of needed buffer elements is returned in @elements.
3564  * Error to create the hdr is indicated by returning with < 0.
3565  */
3566 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3567                               struct sk_buff *skb, struct qeth_hdr **hdr,
3568                               unsigned int hdr_len, unsigned int proto_len,
3569                               unsigned int *elements)
3570 {
3571         const unsigned int contiguous = proto_len ? proto_len : 1;
3572         const unsigned int max_elements = queue->max_elements;
3573         unsigned int __elements;
3574         addr_t start, end;
3575         bool push_ok;
3576         int rc;
3577
3578 check_layout:
3579         start = (addr_t)skb->data - hdr_len;
3580         end = (addr_t)skb->data;
3581
3582         if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3583                 /* Push HW header into same page as first protocol header. */
3584                 push_ok = true;
3585                 /* ... but TSO always needs a separate element for headers: */
3586                 if (skb_is_gso(skb))
3587                         __elements = 1 + qeth_count_elements(skb, proto_len);
3588                 else
3589                         __elements = qeth_count_elements(skb, 0);
3590         } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3591                 /* Push HW header into preceding page, flush with skb->data. */
3592                 push_ok = true;
3593                 __elements = 1 + qeth_count_elements(skb, 0);
3594         } else {
3595                 /* Use header cache, copy protocol headers up. */
3596                 push_ok = false;
3597                 __elements = 1 + qeth_count_elements(skb, proto_len);
3598         }
3599
3600         /* Compress skb to fit into one IO buffer: */
3601         if (__elements > max_elements) {
3602                 if (!skb_is_nonlinear(skb)) {
3603                         /* Drop it, no easy way of shrinking it further. */
3604                         QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3605                                          max_elements, __elements, skb->len);
3606                         return -E2BIG;
3607                 }
3608
3609                 rc = skb_linearize(skb);
3610                 if (rc) {
3611                         QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3612                         return rc;
3613                 }
3614
3615                 QETH_TXQ_STAT_INC(queue, skbs_linearized);
3616                 /* Linearization changed the layout, re-evaluate: */
3617                 goto check_layout;
3618         }
3619
3620         *elements = __elements;
3621         /* Add the header: */
3622         if (push_ok) {
3623                 *hdr = skb_push(skb, hdr_len);
3624                 return hdr_len;
3625         }
3626         /* fall back */
3627         if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3628                 return -E2BIG;
3629         *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3630         if (!*hdr)
3631                 return -ENOMEM;
3632         /* Copy protocol headers behind HW header: */
3633         skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3634         return 0;
3635 }
3636
3637 static void __qeth_fill_buffer(struct sk_buff *skb,
3638                                struct qeth_qdio_out_buffer *buf,
3639                                bool is_first_elem, unsigned int offset)
3640 {
3641         struct qdio_buffer *buffer = buf->buffer;
3642         int element = buf->next_element_to_fill;
3643         int length = skb_headlen(skb) - offset;
3644         char *data = skb->data + offset;
3645         unsigned int elem_length, cnt;
3646
3647         /* map linear part into buffer element(s) */
3648         while (length > 0) {
3649                 elem_length = min_t(unsigned int, length,
3650                                     PAGE_SIZE - offset_in_page(data));
3651
3652                 buffer->element[element].addr = data;
3653                 buffer->element[element].length = elem_length;
3654                 length -= elem_length;
3655                 if (is_first_elem) {
3656                         is_first_elem = false;
3657                         if (length || skb_is_nonlinear(skb))
3658                                 /* skb needs additional elements */
3659                                 buffer->element[element].eflags =
3660                                         SBAL_EFLAGS_FIRST_FRAG;
3661                         else
3662                                 buffer->element[element].eflags = 0;
3663                 } else {
3664                         buffer->element[element].eflags =
3665                                 SBAL_EFLAGS_MIDDLE_FRAG;
3666                 }
3667
3668                 data += elem_length;
3669                 element++;
3670         }
3671
3672         /* map page frags into buffer element(s) */
3673         for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3674                 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3675
3676                 data = skb_frag_address(frag);
3677                 length = skb_frag_size(frag);
3678                 while (length > 0) {
3679                         elem_length = min_t(unsigned int, length,
3680                                             PAGE_SIZE - offset_in_page(data));
3681
3682                         buffer->element[element].addr = data;
3683                         buffer->element[element].length = elem_length;
3684                         buffer->element[element].eflags =
3685                                 SBAL_EFLAGS_MIDDLE_FRAG;
3686
3687                         length -= elem_length;
3688                         data += elem_length;
3689                         element++;
3690                 }
3691         }
3692
3693         if (buffer->element[element - 1].eflags)
3694                 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3695         buf->next_element_to_fill = element;
3696 }
3697
3698 /**
3699  * qeth_fill_buffer() - map skb into an output buffer
3700  * @queue:      QDIO queue to submit the buffer on
3701  * @buf:        buffer to transport the skb
3702  * @skb:        skb to map into the buffer
3703  * @hdr:        qeth_hdr for this skb. Either at skb->data, or allocated
3704  *              from qeth_core_header_cache.
3705  * @offset:     when mapping the skb, start at skb->data + offset
3706  * @hd_len:     if > 0, build a dedicated header element of this size
3707  * flush:       Prepare the buffer to be flushed, regardless of its fill level.
3708  */
3709 static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3710                             struct qeth_qdio_out_buffer *buf,
3711                             struct sk_buff *skb, struct qeth_hdr *hdr,
3712                             unsigned int offset, unsigned int hd_len,
3713                             bool flush)
3714 {
3715         struct qdio_buffer *buffer = buf->buffer;
3716         bool is_first_elem = true;
3717
3718         __skb_queue_tail(&buf->skb_list, skb);
3719
3720         /* build dedicated header element */
3721         if (hd_len) {
3722                 int element = buf->next_element_to_fill;
3723                 is_first_elem = false;
3724
3725                 buffer->element[element].addr = hdr;
3726                 buffer->element[element].length = hd_len;
3727                 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3728                 /* remember to free cache-allocated qeth_hdr: */
3729                 buf->is_header[element] = ((void *)hdr != skb->data);
3730                 buf->next_element_to_fill++;
3731         }
3732
3733         __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3734
3735         if (!queue->do_pack) {
3736                 QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
3737         } else {
3738                 QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
3739
3740                 QETH_TXQ_STAT_INC(queue, skbs_pack);
3741                 /* If the buffer still has free elements, keep using it. */
3742                 if (!flush &&
3743                     buf->next_element_to_fill < queue->max_elements)
3744                         return 0;
3745         }
3746
3747         /* flush out the buffer */
3748         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3749         queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3750                                   QDIO_MAX_BUFFERS_PER_Q;
3751         return 1;
3752 }
3753
3754 static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
3755                                     struct sk_buff *skb, struct qeth_hdr *hdr,
3756                                     unsigned int offset, unsigned int hd_len)
3757 {
3758         int index = queue->next_buf_to_fill;
3759         struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
3760         struct netdev_queue *txq;
3761         bool stopped = false;
3762
3763         /* Just a sanity check, the wake/stop logic should ensure that we always
3764          * get a free buffer.
3765          */
3766         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3767                 return -EBUSY;
3768
3769         txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
3770
3771         if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3772                 /* If a TX completion happens right _here_ and misses to wake
3773                  * the txq, then our re-check below will catch the race.
3774                  */
3775                 QETH_TXQ_STAT_INC(queue, stopped);
3776                 netif_tx_stop_queue(txq);
3777                 stopped = true;
3778         }
3779
3780         qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
3781         qeth_flush_buffers(queue, index, 1);
3782
3783         if (stopped && !qeth_out_queue_is_full(queue))
3784                 netif_tx_start_queue(txq);
3785         return 0;
3786 }
3787
3788 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3789                         struct sk_buff *skb, struct qeth_hdr *hdr,
3790                         unsigned int offset, unsigned int hd_len,
3791                         int elements_needed)
3792 {
3793         struct qeth_qdio_out_buffer *buffer;
3794         struct netdev_queue *txq;
3795         bool stopped = false;
3796         int start_index;
3797         int flush_count = 0;
3798         int do_pack = 0;
3799         int tmp;
3800         int rc = 0;
3801
3802         /* spin until we get the queue ... */
3803         while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3804                               QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3805         start_index = queue->next_buf_to_fill;
3806         buffer = queue->bufs[queue->next_buf_to_fill];
3807
3808         /* Just a sanity check, the wake/stop logic should ensure that we always
3809          * get a free buffer.
3810          */
3811         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3812                 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3813                 return -EBUSY;
3814         }
3815
3816         txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3817
3818         /* check if we need to switch packing state of this queue */
3819         qeth_switch_to_packing_if_needed(queue);
3820         if (queue->do_pack) {
3821                 do_pack = 1;
3822                 /* does packet fit in current buffer? */
3823                 if (buffer->next_element_to_fill + elements_needed >
3824                     queue->max_elements) {
3825                         /* ... no -> set state PRIMED */
3826                         atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3827                         flush_count++;
3828                         queue->next_buf_to_fill =
3829                                 (queue->next_buf_to_fill + 1) %
3830                                 QDIO_MAX_BUFFERS_PER_Q;
3831                         buffer = queue->bufs[queue->next_buf_to_fill];
3832
3833                         /* We stepped forward, so sanity-check again: */
3834                         if (atomic_read(&buffer->state) !=
3835                             QETH_QDIO_BUF_EMPTY) {
3836                                 qeth_flush_buffers(queue, start_index,
3837                                                            flush_count);
3838                                 atomic_set(&queue->state,
3839                                                 QETH_OUT_Q_UNLOCKED);
3840                                 rc = -EBUSY;
3841                                 goto out;
3842                         }
3843                 }
3844         }
3845
3846         if (buffer->next_element_to_fill == 0 &&
3847             atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3848                 /* If a TX completion happens right _here_ and misses to wake
3849                  * the txq, then our re-check below will catch the race.
3850                  */
3851                 QETH_TXQ_STAT_INC(queue, stopped);
3852                 netif_tx_stop_queue(txq);
3853                 stopped = true;
3854         }
3855
3856         flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
3857                                         stopped);
3858         if (flush_count)
3859                 qeth_flush_buffers(queue, start_index, flush_count);
3860         else if (!atomic_read(&queue->set_pci_flags_count))
3861                 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3862         /*
3863          * queue->state will go from LOCKED -> UNLOCKED or from
3864          * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3865          * (switch packing state or flush buffer to get another pci flag out).
3866          * In that case we will enter this loop
3867          */
3868         while (atomic_dec_return(&queue->state)) {
3869                 start_index = queue->next_buf_to_fill;
3870                 /* check if we can go back to non-packing state */
3871                 tmp = qeth_switch_to_nonpacking_if_needed(queue);
3872                 /*
3873                  * check if we need to flush a packing buffer to get a pci
3874                  * flag out on the queue
3875                  */
3876                 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
3877                         tmp = qeth_prep_flush_pack_buffer(queue);
3878                 if (tmp) {
3879                         qeth_flush_buffers(queue, start_index, tmp);
3880                         flush_count += tmp;
3881                 }
3882         }
3883 out:
3884         /* at this point the queue is UNLOCKED again */
3885         if (do_pack)
3886                 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
3887
3888         if (stopped && !qeth_out_queue_is_full(queue))
3889                 netif_tx_start_queue(txq);
3890         return rc;
3891 }
3892 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
3893
3894 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
3895                               unsigned int payload_len, struct sk_buff *skb,
3896                               unsigned int proto_len)
3897 {
3898         struct qeth_hdr_ext_tso *ext = &hdr->ext;
3899
3900         ext->hdr_tot_len = sizeof(*ext);
3901         ext->imb_hdr_no = 1;
3902         ext->hdr_type = 1;
3903         ext->hdr_version = 1;
3904         ext->hdr_len = 28;
3905         ext->payload_len = payload_len;
3906         ext->mss = skb_shinfo(skb)->gso_size;
3907         ext->dg_hdr_len = proto_len;
3908 }
3909
3910 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
3911               struct qeth_qdio_out_q *queue, int ipv,
3912               void (*fill_header)(struct qeth_qdio_out_q *queue,
3913                                   struct qeth_hdr *hdr, struct sk_buff *skb,
3914                                   int ipv, unsigned int data_len))
3915 {
3916         unsigned int proto_len, hw_hdr_len;
3917         unsigned int frame_len = skb->len;
3918         bool is_tso = skb_is_gso(skb);
3919         unsigned int data_offset = 0;
3920         struct qeth_hdr *hdr = NULL;
3921         unsigned int hd_len = 0;
3922         unsigned int elements;
3923         int push_len, rc;
3924         bool is_sg;
3925
3926         if (is_tso) {
3927                 hw_hdr_len = sizeof(struct qeth_hdr_tso);
3928                 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3929         } else {
3930                 hw_hdr_len = sizeof(struct qeth_hdr);
3931                 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
3932         }
3933
3934         rc = skb_cow_head(skb, hw_hdr_len);
3935         if (rc)
3936                 return rc;
3937
3938         push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
3939                                       &elements);
3940         if (push_len < 0)
3941                 return push_len;
3942         if (is_tso || !push_len) {
3943                 /* HW header needs its own buffer element. */
3944                 hd_len = hw_hdr_len + proto_len;
3945                 data_offset = push_len + proto_len;
3946         }
3947         memset(hdr, 0, hw_hdr_len);
3948         fill_header(queue, hdr, skb, ipv, frame_len);
3949         if (is_tso)
3950                 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
3951                                   frame_len - proto_len, skb, proto_len);
3952
3953         is_sg = skb_is_nonlinear(skb);
3954         if (IS_IQD(card)) {
3955                 rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
3956                                               hd_len);
3957         } else {
3958                 /* TODO: drop skb_orphan() once TX completion is fast enough */
3959                 skb_orphan(skb);
3960                 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
3961                                          hd_len, elements);
3962         }
3963
3964         if (!rc) {
3965                 QETH_TXQ_STAT_ADD(queue, buf_elements, elements);
3966                 if (is_sg)
3967                         QETH_TXQ_STAT_INC(queue, skbs_sg);
3968                 if (is_tso) {
3969                         QETH_TXQ_STAT_INC(queue, skbs_tso);
3970                         QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len);
3971                 }
3972         } else {
3973                 if (!push_len)
3974                         kmem_cache_free(qeth_core_header_cache, hdr);
3975         }
3976         return rc;
3977 }
3978 EXPORT_SYMBOL_GPL(qeth_xmit);
3979
3980 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
3981                 struct qeth_reply *reply, unsigned long data)
3982 {
3983         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3984         struct qeth_ipacmd_setadpparms *setparms;
3985
3986         QETH_CARD_TEXT(card, 4, "prmadpcb");
3987
3988         setparms = &(cmd->data.setadapterparms);
3989         if (qeth_setadpparms_inspect_rc(cmd)) {
3990                 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
3991                 setparms->data.mode = SET_PROMISC_MODE_OFF;
3992         }
3993         card->info.promisc_mode = setparms->data.mode;
3994         return (cmd->hdr.return_code) ? -EIO : 0;
3995 }
3996
3997 void qeth_setadp_promisc_mode(struct qeth_card *card)
3998 {
3999         enum qeth_ipa_promisc_modes mode;
4000         struct net_device *dev = card->dev;
4001         struct qeth_cmd_buffer *iob;
4002         struct qeth_ipa_cmd *cmd;
4003
4004         QETH_CARD_TEXT(card, 4, "setprom");
4005
4006         if (((dev->flags & IFF_PROMISC) &&
4007              (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
4008             (!(dev->flags & IFF_PROMISC) &&
4009              (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
4010                 return;
4011         mode = SET_PROMISC_MODE_OFF;
4012         if (dev->flags & IFF_PROMISC)
4013                 mode = SET_PROMISC_MODE_ON;
4014         QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4015
4016         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4017                                    SETADP_DATA_SIZEOF(mode));
4018         if (!iob)
4019                 return;
4020         cmd = __ipa_cmd(iob);
4021         cmd->data.setadapterparms.data.mode = mode;
4022         qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4023 }
4024 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4025
4026 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4027                 struct qeth_reply *reply, unsigned long data)
4028 {
4029         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4030         struct qeth_ipacmd_setadpparms *adp_cmd;
4031
4032         QETH_CARD_TEXT(card, 4, "chgmaccb");
4033         if (qeth_setadpparms_inspect_rc(cmd))
4034                 return -EIO;
4035
4036         adp_cmd = &cmd->data.setadapterparms;
4037         if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4038                 return -EADDRNOTAVAIL;
4039
4040         if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4041             !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4042                 return -EADDRNOTAVAIL;
4043
4044         ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4045         return 0;
4046 }
4047
4048 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4049 {
4050         int rc;
4051         struct qeth_cmd_buffer *iob;
4052         struct qeth_ipa_cmd *cmd;
4053
4054         QETH_CARD_TEXT(card, 4, "chgmac");
4055
4056         iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4057                                    SETADP_DATA_SIZEOF(change_addr));
4058         if (!iob)
4059                 return -ENOMEM;
4060         cmd = __ipa_cmd(iob);
4061         cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4062         cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4063         ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4064                         card->dev->dev_addr);
4065         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4066                                NULL);
4067         return rc;
4068 }
4069 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4070
4071 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4072                 struct qeth_reply *reply, unsigned long data)
4073 {
4074         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4075         struct qeth_set_access_ctrl *access_ctrl_req;
4076         int fallback = *(int *)reply->param;
4077
4078         QETH_CARD_TEXT(card, 4, "setaccb");
4079         if (cmd->hdr.return_code)
4080                 return -EIO;
4081         qeth_setadpparms_inspect_rc(cmd);
4082
4083         access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4084         QETH_CARD_TEXT_(card, 2, "rc=%d",
4085                         cmd->data.setadapterparms.hdr.return_code);
4086         if (cmd->data.setadapterparms.hdr.return_code !=
4087                                                 SET_ACCESS_CTRL_RC_SUCCESS)
4088                 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4089                                  access_ctrl_req->subcmd_code, CARD_DEVID(card),
4090                                  cmd->data.setadapterparms.hdr.return_code);
4091         switch (cmd->data.setadapterparms.hdr.return_code) {
4092         case SET_ACCESS_CTRL_RC_SUCCESS:
4093                 if (card->options.isolation == ISOLATION_MODE_NONE) {
4094                         dev_info(&card->gdev->dev,
4095                             "QDIO data connection isolation is deactivated\n");
4096                 } else {
4097                         dev_info(&card->gdev->dev,
4098                             "QDIO data connection isolation is activated\n");
4099                 }
4100                 break;
4101         case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4102                 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4103                                  CARD_DEVID(card));
4104                 if (fallback)
4105                         card->options.isolation = card->options.prev_isolation;
4106                 break;
4107         case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4108                 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4109                                  CARD_DEVID(card));
4110                 if (fallback)
4111                         card->options.isolation = card->options.prev_isolation;
4112                 break;
4113         case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4114                 dev_err(&card->gdev->dev, "Adapter does not "
4115                         "support QDIO data connection isolation\n");
4116                 break;
4117         case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4118                 dev_err(&card->gdev->dev,
4119                         "Adapter is dedicated. "
4120                         "QDIO data connection isolation not supported\n");
4121                 if (fallback)
4122                         card->options.isolation = card->options.prev_isolation;
4123                 break;
4124         case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4125                 dev_err(&card->gdev->dev,
4126                         "TSO does not permit QDIO data connection isolation\n");
4127                 if (fallback)
4128                         card->options.isolation = card->options.prev_isolation;
4129                 break;
4130         case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4131                 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4132                         "support reflective relay mode\n");
4133                 if (fallback)
4134                         card->options.isolation = card->options.prev_isolation;
4135                 break;
4136         case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4137                 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4138                                         "enabled at the adjacent switch port");
4139                 if (fallback)
4140                         card->options.isolation = card->options.prev_isolation;
4141                 break;
4142         case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4143                 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4144                                         "at the adjacent switch failed\n");
4145                 break;
4146         default:
4147                 /* this should never happen */
4148                 if (fallback)
4149                         card->options.isolation = card->options.prev_isolation;
4150                 break;
4151         }
4152         return (cmd->hdr.return_code) ? -EIO : 0;
4153 }
4154
4155 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4156                 enum qeth_ipa_isolation_modes isolation, int fallback)
4157 {
4158         int rc;
4159         struct qeth_cmd_buffer *iob;
4160         struct qeth_ipa_cmd *cmd;
4161         struct qeth_set_access_ctrl *access_ctrl_req;
4162
4163         QETH_CARD_TEXT(card, 4, "setacctl");
4164
4165         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4166                                    SETADP_DATA_SIZEOF(set_access_ctrl));
4167         if (!iob)
4168                 return -ENOMEM;
4169         cmd = __ipa_cmd(iob);
4170         access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4171         access_ctrl_req->subcmd_code = isolation;
4172
4173         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4174                                &fallback);
4175         QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4176         return rc;
4177 }
4178
4179 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4180 {
4181         int rc = 0;
4182
4183         QETH_CARD_TEXT(card, 4, "setactlo");
4184
4185         if ((IS_OSD(card) || IS_OSX(card)) &&
4186             qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4187                 rc = qeth_setadpparms_set_access_ctrl(card,
4188                         card->options.isolation, fallback);
4189                 if (rc) {
4190                         QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4191                                          rc, CARD_DEVID(card));
4192                         rc = -EOPNOTSUPP;
4193                 }
4194         } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4195                 card->options.isolation = ISOLATION_MODE_NONE;
4196
4197                 dev_err(&card->gdev->dev, "Adapter does not "
4198                         "support QDIO data connection isolation\n");
4199                 rc = -EOPNOTSUPP;
4200         }
4201         return rc;
4202 }
4203 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
4204
4205 void qeth_tx_timeout(struct net_device *dev)
4206 {
4207         struct qeth_card *card;
4208
4209         card = dev->ml_priv;
4210         QETH_CARD_TEXT(card, 4, "txtimeo");
4211         qeth_schedule_recovery(card);
4212 }
4213 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4214
4215 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4216 {
4217         struct qeth_card *card = dev->ml_priv;
4218         int rc = 0;
4219
4220         switch (regnum) {
4221         case MII_BMCR: /* Basic mode control register */
4222                 rc = BMCR_FULLDPLX;
4223                 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4224                     (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4225                     (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4226                     (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4227                         rc |= BMCR_SPEED100;
4228                 break;
4229         case MII_BMSR: /* Basic mode status register */
4230                 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4231                      BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4232                      BMSR_100BASE4;
4233                 break;
4234         case MII_PHYSID1: /* PHYS ID 1 */
4235                 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4236                      dev->dev_addr[2];
4237                 rc = (rc >> 5) & 0xFFFF;
4238                 break;
4239         case MII_PHYSID2: /* PHYS ID 2 */
4240                 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4241                 break;
4242         case MII_ADVERTISE: /* Advertisement control reg */
4243                 rc = ADVERTISE_ALL;
4244                 break;
4245         case MII_LPA: /* Link partner ability reg */
4246                 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4247                      LPA_100BASE4 | LPA_LPACK;
4248                 break;
4249         case MII_EXPANSION: /* Expansion register */
4250                 break;
4251         case MII_DCOUNTER: /* disconnect counter */
4252                 break;
4253         case MII_FCSCOUNTER: /* false carrier counter */
4254                 break;
4255         case MII_NWAYTEST: /* N-way auto-neg test register */
4256                 break;
4257         case MII_RERRCOUNTER: /* rx error counter */
4258                 rc = card->stats.rx_errors;
4259                 break;
4260         case MII_SREVISION: /* silicon revision */
4261                 break;
4262         case MII_RESV1: /* reserved 1 */
4263                 break;
4264         case MII_LBRERROR: /* loopback, rx, bypass error */
4265                 break;
4266         case MII_PHYADDR: /* physical address */
4267                 break;
4268         case MII_RESV2: /* reserved 2 */
4269                 break;
4270         case MII_TPISTATUS: /* TPI status for 10mbps */
4271                 break;
4272         case MII_NCONFIG: /* network interface config */
4273                 break;
4274         default:
4275                 break;
4276         }
4277         return rc;
4278 }
4279
4280 static int qeth_snmp_command_cb(struct qeth_card *card,
4281                 struct qeth_reply *reply, unsigned long sdata)
4282 {
4283         struct qeth_ipa_cmd *cmd;
4284         struct qeth_arp_query_info *qinfo;
4285         unsigned char *data;
4286         void *snmp_data;
4287         __u16 data_len;
4288
4289         QETH_CARD_TEXT(card, 3, "snpcmdcb");
4290
4291         cmd = (struct qeth_ipa_cmd *) sdata;
4292         data = (unsigned char *)((char *)cmd - reply->offset);
4293         qinfo = (struct qeth_arp_query_info *) reply->param;
4294
4295         if (cmd->hdr.return_code) {
4296                 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4297                 return -EIO;
4298         }
4299         if (cmd->data.setadapterparms.hdr.return_code) {
4300                 cmd->hdr.return_code =
4301                         cmd->data.setadapterparms.hdr.return_code;
4302                 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4303                 return -EIO;
4304         }
4305         data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
4306         if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4307                 snmp_data = &cmd->data.setadapterparms.data.snmp;
4308                 data_len -= offsetof(struct qeth_ipa_cmd,
4309                                      data.setadapterparms.data.snmp);
4310         } else {
4311                 snmp_data = &cmd->data.setadapterparms.data.snmp.request;
4312                 data_len -= offsetof(struct qeth_ipa_cmd,
4313                                      data.setadapterparms.data.snmp.request);
4314         }
4315
4316         /* check if there is enough room in userspace */
4317         if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4318                 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4319                 return -ENOSPC;
4320         }
4321         QETH_CARD_TEXT_(card, 4, "snore%i",
4322                         cmd->data.setadapterparms.hdr.used_total);
4323         QETH_CARD_TEXT_(card, 4, "sseqn%i",
4324                         cmd->data.setadapterparms.hdr.seq_no);
4325         /*copy entries to user buffer*/
4326         memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4327         qinfo->udata_offset += data_len;
4328
4329         if (cmd->data.setadapterparms.hdr.seq_no <
4330             cmd->data.setadapterparms.hdr.used_total)
4331                 return 1;
4332         return 0;
4333 }
4334
4335 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4336 {
4337         struct qeth_snmp_ureq __user *ureq;
4338         struct qeth_cmd_buffer *iob;
4339         unsigned int req_len;
4340         struct qeth_arp_query_info qinfo = {0, };
4341         int rc = 0;
4342
4343         QETH_CARD_TEXT(card, 3, "snmpcmd");
4344
4345         if (IS_VM_NIC(card))
4346                 return -EOPNOTSUPP;
4347
4348         if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4349             IS_LAYER3(card))
4350                 return -EOPNOTSUPP;
4351
4352         ureq = (struct qeth_snmp_ureq __user *) udata;
4353         if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4354             get_user(req_len, &ureq->hdr.req_len))
4355                 return -EFAULT;
4356
4357         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4358         if (!iob)
4359                 return -ENOMEM;
4360
4361         if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4362                            &ureq->cmd, req_len)) {
4363                 qeth_put_cmd(iob);
4364                 return -EFAULT;
4365         }
4366
4367         qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4368         if (!qinfo.udata) {
4369                 qeth_put_cmd(iob);
4370                 return -ENOMEM;
4371         }
4372         qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4373
4374         rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4375         if (rc)
4376                 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4377                                  CARD_DEVID(card), rc);
4378         else {
4379                 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4380                         rc = -EFAULT;
4381         }
4382
4383         kfree(qinfo.udata);
4384         return rc;
4385 }
4386
4387 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4388                 struct qeth_reply *reply, unsigned long data)
4389 {
4390         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4391         struct qeth_qoat_priv *priv;
4392         char *resdata;
4393         int resdatalen;
4394
4395         QETH_CARD_TEXT(card, 3, "qoatcb");
4396         if (qeth_setadpparms_inspect_rc(cmd))
4397                 return -EIO;
4398
4399         priv = (struct qeth_qoat_priv *)reply->param;
4400         resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4401         resdata = (char *)data + 28;
4402
4403         if (resdatalen > (priv->buffer_len - priv->response_len))
4404                 return -ENOSPC;
4405
4406         memcpy((priv->buffer + priv->response_len), resdata,
4407                 resdatalen);
4408         priv->response_len += resdatalen;
4409
4410         if (cmd->data.setadapterparms.hdr.seq_no <
4411             cmd->data.setadapterparms.hdr.used_total)
4412                 return 1;
4413         return 0;
4414 }
4415
4416 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4417 {
4418         int rc = 0;
4419         struct qeth_cmd_buffer *iob;
4420         struct qeth_ipa_cmd *cmd;
4421         struct qeth_query_oat *oat_req;
4422         struct qeth_query_oat_data oat_data;
4423         struct qeth_qoat_priv priv;
4424         void __user *tmp;
4425
4426         QETH_CARD_TEXT(card, 3, "qoatcmd");
4427
4428         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4429                 rc = -EOPNOTSUPP;
4430                 goto out;
4431         }
4432
4433         if (copy_from_user(&oat_data, udata,
4434             sizeof(struct qeth_query_oat_data))) {
4435                         rc = -EFAULT;
4436                         goto out;
4437         }
4438
4439         priv.buffer_len = oat_data.buffer_len;
4440         priv.response_len = 0;
4441         priv.buffer = vzalloc(oat_data.buffer_len);
4442         if (!priv.buffer) {
4443                 rc = -ENOMEM;
4444                 goto out;
4445         }
4446
4447         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4448                                    SETADP_DATA_SIZEOF(query_oat));
4449         if (!iob) {
4450                 rc = -ENOMEM;
4451                 goto out_free;
4452         }
4453         cmd = __ipa_cmd(iob);
4454         oat_req = &cmd->data.setadapterparms.data.query_oat;
4455         oat_req->subcmd_code = oat_data.command;
4456
4457         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4458                                &priv);
4459         if (!rc) {
4460                 if (is_compat_task())
4461                         tmp = compat_ptr(oat_data.ptr);
4462                 else
4463                         tmp = (void __user *)(unsigned long)oat_data.ptr;
4464
4465                 if (copy_to_user(tmp, priv.buffer,
4466                     priv.response_len)) {
4467                         rc = -EFAULT;
4468                         goto out_free;
4469                 }
4470
4471                 oat_data.response_len = priv.response_len;
4472
4473                 if (copy_to_user(udata, &oat_data,
4474                     sizeof(struct qeth_query_oat_data)))
4475                         rc = -EFAULT;
4476         }
4477
4478 out_free:
4479         vfree(priv.buffer);
4480 out:
4481         return rc;
4482 }
4483
4484 static int qeth_query_card_info_cb(struct qeth_card *card,
4485                                    struct qeth_reply *reply, unsigned long data)
4486 {
4487         struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4488         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4489         struct qeth_query_card_info *card_info;
4490
4491         QETH_CARD_TEXT(card, 2, "qcrdincb");
4492         if (qeth_setadpparms_inspect_rc(cmd))
4493                 return -EIO;
4494
4495         card_info = &cmd->data.setadapterparms.data.card_info;
4496         carrier_info->card_type = card_info->card_type;
4497         carrier_info->port_mode = card_info->port_mode;
4498         carrier_info->port_speed = card_info->port_speed;
4499         return 0;
4500 }
4501
4502 int qeth_query_card_info(struct qeth_card *card,
4503                          struct carrier_info *carrier_info)
4504 {
4505         struct qeth_cmd_buffer *iob;
4506
4507         QETH_CARD_TEXT(card, 2, "qcrdinfo");
4508         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4509                 return -EOPNOTSUPP;
4510         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4511         if (!iob)
4512                 return -ENOMEM;
4513         return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4514                                         (void *)carrier_info);
4515 }
4516
4517 /**
4518  * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4519  * @card: pointer to a qeth_card
4520  *
4521  * Returns
4522  *      0, if a MAC address has been set for the card's netdevice
4523  *      a return code, for various error conditions
4524  */
4525 int qeth_vm_request_mac(struct qeth_card *card)
4526 {
4527         struct diag26c_mac_resp *response;
4528         struct diag26c_mac_req *request;
4529         struct ccw_dev_id id;
4530         int rc;
4531
4532         QETH_CARD_TEXT(card, 2, "vmreqmac");
4533
4534         request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4535         response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4536         if (!request || !response) {
4537                 rc = -ENOMEM;
4538                 goto out;
4539         }
4540
4541         ccw_device_get_id(CARD_DDEV(card), &id);
4542         request->resp_buf_len = sizeof(*response);
4543         request->resp_version = DIAG26C_VERSION2;
4544         request->op_code = DIAG26C_GET_MAC;
4545         request->devno = id.devno;
4546
4547         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4548         rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4549         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4550         if (rc)
4551                 goto out;
4552         QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4553
4554         if (request->resp_buf_len < sizeof(*response) ||
4555             response->version != request->resp_version) {
4556                 rc = -EIO;
4557                 QETH_CARD_TEXT(card, 2, "badresp");
4558                 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4559                               sizeof(request->resp_buf_len));
4560         } else if (!is_valid_ether_addr(response->mac)) {
4561                 rc = -EINVAL;
4562                 QETH_CARD_TEXT(card, 2, "badmac");
4563                 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4564         } else {
4565                 ether_addr_copy(card->dev->dev_addr, response->mac);
4566         }
4567
4568 out:
4569         kfree(response);
4570         kfree(request);
4571         return rc;
4572 }
4573 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4574
4575 static void qeth_determine_capabilities(struct qeth_card *card)
4576 {
4577         int rc;
4578         struct ccw_device *ddev;
4579         int ddev_offline = 0;
4580
4581         QETH_CARD_TEXT(card, 2, "detcapab");
4582         ddev = CARD_DDEV(card);
4583         if (!ddev->online) {
4584                 ddev_offline = 1;
4585                 rc = ccw_device_set_online(ddev);
4586                 if (rc) {
4587                         QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4588                         goto out;
4589                 }
4590         }
4591
4592         rc = qeth_read_conf_data(card);
4593         if (rc) {
4594                 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4595                                  CARD_DEVID(card), rc);
4596                 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4597                 goto out_offline;
4598         }
4599
4600         rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4601         if (rc)
4602                 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4603
4604         QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4605         QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4606         QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4607         QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4608         QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
4609         if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4610             ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4611             ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4612                 dev_info(&card->gdev->dev,
4613                         "Completion Queueing supported\n");
4614         } else {
4615                 card->options.cq = QETH_CQ_NOTAVAILABLE;
4616         }
4617
4618
4619 out_offline:
4620         if (ddev_offline == 1)
4621                 ccw_device_set_offline(ddev);
4622 out:
4623         return;
4624 }
4625
4626 static void qeth_qdio_establish_cq(struct qeth_card *card,
4627                                    struct qdio_buffer **in_sbal_ptrs,
4628                                    void (**queue_start_poll)
4629                                         (struct ccw_device *, int,
4630                                          unsigned long))
4631 {
4632         int i;
4633
4634         if (card->options.cq == QETH_CQ_ENABLED) {
4635                 int offset = QDIO_MAX_BUFFERS_PER_Q *
4636                              (card->qdio.no_in_queues - 1);
4637                 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4638                         in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4639                                 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4640                 }
4641
4642                 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4643         }
4644 }
4645
4646 static int qeth_qdio_establish(struct qeth_card *card)
4647 {
4648         struct qdio_initialize init_data;
4649         char *qib_param_field;
4650         struct qdio_buffer **in_sbal_ptrs;
4651         void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4652         struct qdio_buffer **out_sbal_ptrs;
4653         int i, j, k;
4654         int rc = 0;
4655
4656         QETH_CARD_TEXT(card, 2, "qdioest");
4657
4658         qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
4659                                   GFP_KERNEL);
4660         if (!qib_param_field) {
4661                 rc =  -ENOMEM;
4662                 goto out_free_nothing;
4663         }
4664
4665         qeth_create_qib_param_field(card, qib_param_field);
4666         qeth_create_qib_param_field_blkt(card, qib_param_field);
4667
4668         in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4669                                sizeof(void *),
4670                                GFP_KERNEL);
4671         if (!in_sbal_ptrs) {
4672                 rc = -ENOMEM;
4673                 goto out_free_qib_param;
4674         }
4675         for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4676                 in_sbal_ptrs[i] = (struct qdio_buffer *)
4677                         virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4678         }
4679
4680         queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4681                                    GFP_KERNEL);
4682         if (!queue_start_poll) {
4683                 rc = -ENOMEM;
4684                 goto out_free_in_sbals;
4685         }
4686         for (i = 0; i < card->qdio.no_in_queues; ++i)
4687                 queue_start_poll[i] = qeth_qdio_start_poll;
4688
4689         qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4690
4691         out_sbal_ptrs =
4692                 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4693                         sizeof(void *),
4694                         GFP_KERNEL);
4695         if (!out_sbal_ptrs) {
4696                 rc = -ENOMEM;
4697                 goto out_free_queue_start_poll;
4698         }
4699         for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4700                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4701                         out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4702                                 card->qdio.out_qs[i]->bufs[j]->buffer);
4703                 }
4704
4705         memset(&init_data, 0, sizeof(struct qdio_initialize));
4706         init_data.cdev                   = CARD_DDEV(card);
4707         init_data.q_format               = IS_IQD(card) ? QDIO_IQDIO_QFMT :
4708                                                           QDIO_QETH_QFMT;
4709         init_data.qib_param_field_format = 0;
4710         init_data.qib_param_field        = qib_param_field;
4711         init_data.no_input_qs            = card->qdio.no_in_queues;
4712         init_data.no_output_qs           = card->qdio.no_out_queues;
4713         init_data.input_handler          = qeth_qdio_input_handler;
4714         init_data.output_handler         = qeth_qdio_output_handler;
4715         init_data.queue_start_poll_array = queue_start_poll;
4716         init_data.int_parm               = (unsigned long) card;
4717         init_data.input_sbal_addr_array  = in_sbal_ptrs;
4718         init_data.output_sbal_addr_array = out_sbal_ptrs;
4719         init_data.output_sbal_state_array = card->qdio.out_bufstates;
4720         init_data.scan_threshold         = IS_IQD(card) ? 1 : 32;
4721
4722         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4723                 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4724                 rc = qdio_allocate(&init_data);
4725                 if (rc) {
4726                         atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4727                         goto out;
4728                 }
4729                 rc = qdio_establish(&init_data);
4730                 if (rc) {
4731                         atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4732                         qdio_free(CARD_DDEV(card));
4733                 }
4734         }
4735
4736         switch (card->options.cq) {
4737         case QETH_CQ_ENABLED:
4738                 dev_info(&card->gdev->dev, "Completion Queue support enabled");
4739                 break;
4740         case QETH_CQ_DISABLED:
4741                 dev_info(&card->gdev->dev, "Completion Queue support disabled");
4742                 break;
4743         default:
4744                 break;
4745         }
4746 out:
4747         kfree(out_sbal_ptrs);
4748 out_free_queue_start_poll:
4749         kfree(queue_start_poll);
4750 out_free_in_sbals:
4751         kfree(in_sbal_ptrs);
4752 out_free_qib_param:
4753         kfree(qib_param_field);
4754 out_free_nothing:
4755         return rc;
4756 }
4757
4758 static void qeth_core_free_card(struct qeth_card *card)
4759 {
4760         QETH_CARD_TEXT(card, 2, "freecrd");
4761         qeth_clean_channel(&card->read);
4762         qeth_clean_channel(&card->write);
4763         qeth_clean_channel(&card->data);
4764         qeth_put_cmd(card->read_cmd);
4765         destroy_workqueue(card->event_wq);
4766         qeth_free_qdio_queues(card);
4767         unregister_service_level(&card->qeth_service_level);
4768         dev_set_drvdata(&card->gdev->dev, NULL);
4769         kfree(card);
4770 }
4771
4772 void qeth_trace_features(struct qeth_card *card)
4773 {
4774         QETH_CARD_TEXT(card, 2, "features");
4775         QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
4776         QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
4777         QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
4778         QETH_CARD_HEX(card, 2, &card->info.diagass_support,
4779                       sizeof(card->info.diagass_support));
4780 }
4781 EXPORT_SYMBOL_GPL(qeth_trace_features);
4782
4783 static struct ccw_device_id qeth_ids[] = {
4784         {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4785                                         .driver_info = QETH_CARD_TYPE_OSD},
4786         {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
4787                                         .driver_info = QETH_CARD_TYPE_IQD},
4788         {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
4789                                         .driver_info = QETH_CARD_TYPE_OSN},
4790         {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
4791                                         .driver_info = QETH_CARD_TYPE_OSM},
4792         {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
4793                                         .driver_info = QETH_CARD_TYPE_OSX},
4794         {},
4795 };
4796 MODULE_DEVICE_TABLE(ccw, qeth_ids);
4797
4798 static struct ccw_driver qeth_ccw_driver = {
4799         .driver = {
4800                 .owner = THIS_MODULE,
4801                 .name = "qeth",
4802         },
4803         .ids = qeth_ids,
4804         .probe = ccwgroup_probe_ccwdev,
4805         .remove = ccwgroup_remove_ccwdev,
4806 };
4807
4808 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
4809 {
4810         int retries = 3;
4811         int rc;
4812
4813         QETH_CARD_TEXT(card, 2, "hrdsetup");
4814         atomic_set(&card->force_alloc_skb, 0);
4815         rc = qeth_update_from_chp_desc(card);
4816         if (rc)
4817                 return rc;
4818 retry:
4819         if (retries < 3)
4820                 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
4821                                  CARD_DEVID(card));
4822         rc = qeth_qdio_clear_card(card, !IS_IQD(card));
4823         ccw_device_set_offline(CARD_DDEV(card));
4824         ccw_device_set_offline(CARD_WDEV(card));
4825         ccw_device_set_offline(CARD_RDEV(card));
4826         qdio_free(CARD_DDEV(card));
4827         rc = ccw_device_set_online(CARD_RDEV(card));
4828         if (rc)
4829                 goto retriable;
4830         rc = ccw_device_set_online(CARD_WDEV(card));
4831         if (rc)
4832                 goto retriable;
4833         rc = ccw_device_set_online(CARD_DDEV(card));
4834         if (rc)
4835                 goto retriable;
4836 retriable:
4837         if (rc == -ERESTARTSYS) {
4838                 QETH_CARD_TEXT(card, 2, "break1");
4839                 return rc;
4840         } else if (rc) {
4841                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
4842                 if (--retries < 0)
4843                         goto out;
4844                 else
4845                         goto retry;
4846         }
4847         qeth_determine_capabilities(card);
4848         qeth_init_tokens(card);
4849         qeth_init_func_level(card);
4850
4851         rc = qeth_idx_activate_read_channel(card);
4852         if (rc == -EINTR) {
4853                 QETH_CARD_TEXT(card, 2, "break2");
4854                 return rc;
4855         } else if (rc) {
4856                 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4857                 if (--retries < 0)
4858                         goto out;
4859                 else
4860                         goto retry;
4861         }
4862
4863         rc = qeth_idx_activate_write_channel(card);
4864         if (rc == -EINTR) {
4865                 QETH_CARD_TEXT(card, 2, "break3");
4866                 return rc;
4867         } else if (rc) {
4868                 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
4869                 if (--retries < 0)
4870                         goto out;
4871                 else
4872                         goto retry;
4873         }
4874         card->read_or_write_problem = 0;
4875         rc = qeth_mpc_initialize(card);
4876         if (rc) {
4877                 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4878                 goto out;
4879         }
4880
4881         rc = qeth_send_startlan(card);
4882         if (rc) {
4883                 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4884                 if (rc == -ENETDOWN) {
4885                         dev_warn(&card->gdev->dev, "The LAN is offline\n");
4886                         *carrier_ok = false;
4887                 } else {
4888                         goto out;
4889                 }
4890         } else {
4891                 *carrier_ok = true;
4892         }
4893
4894         card->options.ipa4.supported_funcs = 0;
4895         card->options.ipa6.supported_funcs = 0;
4896         card->options.adp.supported_funcs = 0;
4897         card->options.sbp.supported_funcs = 0;
4898         card->info.diagass_support = 0;
4899         rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
4900         if (rc == -ENOMEM)
4901                 goto out;
4902         if (qeth_is_supported(card, IPA_IPV6)) {
4903                 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
4904                 if (rc == -ENOMEM)
4905                         goto out;
4906         }
4907         if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
4908                 rc = qeth_query_setadapterparms(card);
4909                 if (rc < 0) {
4910                         QETH_CARD_TEXT_(card, 2, "7err%d", rc);
4911                         goto out;
4912                 }
4913         }
4914         if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
4915                 rc = qeth_query_setdiagass(card);
4916                 if (rc < 0) {
4917                         QETH_CARD_TEXT_(card, 2, "8err%d", rc);
4918                         goto out;
4919                 }
4920         }
4921         return 0;
4922 out:
4923         dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
4924                 "an error on the device\n");
4925         QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
4926                          CARD_DEVID(card), rc);
4927         return rc;
4928 }
4929 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
4930
4931 static void qeth_create_skb_frag(struct qdio_buffer_element *element,
4932                                  struct sk_buff *skb, int offset, int data_len)
4933 {
4934         struct page *page = virt_to_page(element->addr);
4935         unsigned int next_frag;
4936
4937         /* first fill the linear space */
4938         if (!skb->len) {
4939                 unsigned int linear = min(data_len, skb_tailroom(skb));
4940
4941                 skb_put_data(skb, element->addr + offset, linear);
4942                 data_len -= linear;
4943                 if (!data_len)
4944                         return;
4945                 offset += linear;
4946                 /* fall through to add page frag for remaining data */
4947         }
4948
4949         next_frag = skb_shinfo(skb)->nr_frags;
4950         get_page(page);
4951         skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
4952 }
4953
4954 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
4955 {
4956         return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
4957 }
4958
4959 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4960                 struct qeth_qdio_buffer *qethbuffer,
4961                 struct qdio_buffer_element **__element, int *__offset,
4962                 struct qeth_hdr **hdr)
4963 {
4964         struct qdio_buffer_element *element = *__element;
4965         struct qdio_buffer *buffer = qethbuffer->buffer;
4966         int offset = *__offset;
4967         struct sk_buff *skb;
4968         int skb_len = 0;
4969         void *data_ptr;
4970         int data_len;
4971         int headroom = 0;
4972         int use_rx_sg = 0;
4973
4974         /* qeth_hdr must not cross element boundaries */
4975         while (element->length < offset + sizeof(struct qeth_hdr)) {
4976                 if (qeth_is_last_sbale(element))
4977                         return NULL;
4978                 element++;
4979                 offset = 0;
4980         }
4981         *hdr = element->addr + offset;
4982
4983         offset += sizeof(struct qeth_hdr);
4984         switch ((*hdr)->hdr.l2.id) {
4985         case QETH_HEADER_TYPE_LAYER2:
4986                 skb_len = (*hdr)->hdr.l2.pkt_length;
4987                 break;
4988         case QETH_HEADER_TYPE_LAYER3:
4989                 skb_len = (*hdr)->hdr.l3.length;
4990                 headroom = ETH_HLEN;
4991                 break;
4992         case QETH_HEADER_TYPE_OSN:
4993                 skb_len = (*hdr)->hdr.osn.pdu_length;
4994                 headroom = sizeof(struct qeth_hdr);
4995                 break;
4996         default:
4997                 break;
4998         }
4999
5000         if (!skb_len)
5001                 return NULL;
5002
5003         if (((skb_len >= card->options.rx_sg_cb) &&
5004              !IS_OSN(card) &&
5005              (!atomic_read(&card->force_alloc_skb))) ||
5006             (card->options.cq == QETH_CQ_ENABLED))
5007                 use_rx_sg = 1;
5008
5009         if (use_rx_sg && qethbuffer->rx_skb) {
5010                 /* QETH_CQ_ENABLED only: */
5011                 skb = qethbuffer->rx_skb;
5012                 qethbuffer->rx_skb = NULL;
5013         } else {
5014                 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
5015
5016                 skb = napi_alloc_skb(&card->napi, linear + headroom);
5017         }
5018         if (!skb)
5019                 goto no_mem;
5020         if (headroom)
5021                 skb_reserve(skb, headroom);
5022
5023         data_ptr = element->addr + offset;
5024         while (skb_len) {
5025                 data_len = min(skb_len, (int)(element->length - offset));
5026                 if (data_len) {
5027                         if (use_rx_sg)
5028                                 qeth_create_skb_frag(element, skb, offset,
5029                                                      data_len);
5030                         else
5031                                 skb_put_data(skb, data_ptr, data_len);
5032                 }
5033                 skb_len -= data_len;
5034                 if (skb_len) {
5035                         if (qeth_is_last_sbale(element)) {
5036                                 QETH_CARD_TEXT(card, 4, "unexeob");
5037                                 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5038                                 dev_kfree_skb_any(skb);
5039                                 QETH_CARD_STAT_INC(card, rx_errors);
5040                                 return NULL;
5041                         }
5042                         element++;
5043                         offset = 0;
5044                         data_ptr = element->addr;
5045                 } else {
5046                         offset += data_len;
5047                 }
5048         }
5049         *__element = element;
5050         *__offset = offset;
5051         if (use_rx_sg) {
5052                 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5053                 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5054                                    skb_shinfo(skb)->nr_frags);
5055         }
5056         return skb;
5057 no_mem:
5058         if (net_ratelimit()) {
5059                 QETH_CARD_TEXT(card, 2, "noskbmem");
5060         }
5061         QETH_CARD_STAT_INC(card, rx_dropped);
5062         return NULL;
5063 }
5064 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5065
5066 int qeth_poll(struct napi_struct *napi, int budget)
5067 {
5068         struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5069         int work_done = 0;
5070         struct qeth_qdio_buffer *buffer;
5071         int done;
5072         int new_budget = budget;
5073
5074         while (1) {
5075                 if (!card->rx.b_count) {
5076                         card->rx.qdio_err = 0;
5077                         card->rx.b_count = qdio_get_next_buffers(
5078                                 card->data.ccwdev, 0, &card->rx.b_index,
5079                                 &card->rx.qdio_err);
5080                         if (card->rx.b_count <= 0) {
5081                                 card->rx.b_count = 0;
5082                                 break;
5083                         }
5084                         card->rx.b_element =
5085                                 &card->qdio.in_q->bufs[card->rx.b_index]
5086                                 .buffer->element[0];
5087                         card->rx.e_offset = 0;
5088                 }
5089
5090                 while (card->rx.b_count) {
5091                         buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5092                         if (!(card->rx.qdio_err &&
5093                             qeth_check_qdio_errors(card, buffer->buffer,
5094                             card->rx.qdio_err, "qinerr")))
5095                                 work_done +=
5096                                         card->discipline->process_rx_buffer(
5097                                                 card, new_budget, &done);
5098                         else
5099                                 done = 1;
5100
5101                         if (done) {
5102                                 QETH_CARD_STAT_INC(card, rx_bufs);
5103                                 qeth_put_buffer_pool_entry(card,
5104                                         buffer->pool_entry);
5105                                 qeth_queue_input_buffer(card, card->rx.b_index);
5106                                 card->rx.b_count--;
5107                                 if (card->rx.b_count) {
5108                                         card->rx.b_index =
5109                                                 (card->rx.b_index + 1) %
5110                                                 QDIO_MAX_BUFFERS_PER_Q;
5111                                         card->rx.b_element =
5112                                                 &card->qdio.in_q
5113                                                 ->bufs[card->rx.b_index]
5114                                                 .buffer->element[0];
5115                                         card->rx.e_offset = 0;
5116                                 }
5117                         }
5118
5119                         if (work_done >= budget)
5120                                 goto out;
5121                         else
5122                                 new_budget = budget - work_done;
5123                 }
5124         }
5125
5126         napi_complete_done(napi, work_done);
5127         if (qdio_start_irq(card->data.ccwdev, 0))
5128                 napi_schedule(&card->napi);
5129 out:
5130         return work_done;
5131 }
5132 EXPORT_SYMBOL_GPL(qeth_poll);
5133
5134 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5135 {
5136         if (!cmd->hdr.return_code)
5137                 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5138         return cmd->hdr.return_code;
5139 }
5140
5141 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5142                                         struct qeth_reply *reply,
5143                                         unsigned long data)
5144 {
5145         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5146         struct qeth_ipa_caps *caps = reply->param;
5147
5148         if (qeth_setassparms_inspect_rc(cmd))
5149                 return -EIO;
5150
5151         caps->supported = cmd->data.setassparms.data.caps.supported;
5152         caps->enabled = cmd->data.setassparms.data.caps.enabled;
5153         return 0;
5154 }
5155
5156 int qeth_setassparms_cb(struct qeth_card *card,
5157                         struct qeth_reply *reply, unsigned long data)
5158 {
5159         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5160
5161         QETH_CARD_TEXT(card, 4, "defadpcb");
5162
5163         if (cmd->hdr.return_code)
5164                 return -EIO;
5165
5166         cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5167         if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5168                 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5169         if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5170                 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5171         return 0;
5172 }
5173 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5174
5175 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5176                                                  enum qeth_ipa_funcs ipa_func,
5177                                                  u16 cmd_code,
5178                                                  unsigned int data_length,
5179                                                  enum qeth_prot_versions prot)
5180 {
5181         struct qeth_ipacmd_setassparms *setassparms;
5182         struct qeth_ipacmd_setassparms_hdr *hdr;
5183         struct qeth_cmd_buffer *iob;
5184
5185         QETH_CARD_TEXT(card, 4, "getasscm");
5186         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
5187                                  data_length +
5188                                  offsetof(struct qeth_ipacmd_setassparms,
5189                                           data));
5190         if (!iob)
5191                 return NULL;
5192
5193         setassparms = &__ipa_cmd(iob)->data.setassparms;
5194         setassparms->assist_no = ipa_func;
5195
5196         hdr = &setassparms->hdr;
5197         hdr->length = sizeof(*hdr) + data_length;
5198         hdr->command_code = cmd_code;
5199         return iob;
5200 }
5201 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5202
5203 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5204                                       enum qeth_ipa_funcs ipa_func,
5205                                       u16 cmd_code, u32 *data,
5206                                       enum qeth_prot_versions prot)
5207 {
5208         unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
5209         struct qeth_cmd_buffer *iob;
5210
5211         QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5212         iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5213         if (!iob)
5214                 return -ENOMEM;
5215
5216         if (data)
5217                 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
5218         return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5219 }
5220 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5221
5222 static void qeth_unregister_dbf_views(void)
5223 {
5224         int x;
5225         for (x = 0; x < QETH_DBF_INFOS; x++) {
5226                 debug_unregister(qeth_dbf[x].id);
5227                 qeth_dbf[x].id = NULL;
5228         }
5229 }
5230
5231 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5232 {
5233         char dbf_txt_buf[32];
5234         va_list args;
5235
5236         if (!debug_level_enabled(id, level))
5237                 return;
5238         va_start(args, fmt);
5239         vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5240         va_end(args);
5241         debug_text_event(id, level, dbf_txt_buf);
5242 }
5243 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5244
5245 static int qeth_register_dbf_views(void)
5246 {
5247         int ret;
5248         int x;
5249
5250         for (x = 0; x < QETH_DBF_INFOS; x++) {
5251                 /* register the areas */
5252                 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5253                                                 qeth_dbf[x].pages,
5254                                                 qeth_dbf[x].areas,
5255                                                 qeth_dbf[x].len);
5256                 if (qeth_dbf[x].id == NULL) {
5257                         qeth_unregister_dbf_views();
5258                         return -ENOMEM;
5259                 }
5260
5261                 /* register a view */
5262                 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5263                 if (ret) {
5264                         qeth_unregister_dbf_views();
5265                         return ret;
5266                 }
5267
5268                 /* set a passing level */
5269                 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5270         }
5271
5272         return 0;
5273 }
5274
5275 static DEFINE_MUTEX(qeth_mod_mutex);    /* for synchronized module loading */
5276
5277 int qeth_core_load_discipline(struct qeth_card *card,
5278                 enum qeth_discipline_id discipline)
5279 {
5280         mutex_lock(&qeth_mod_mutex);
5281         switch (discipline) {
5282         case QETH_DISCIPLINE_LAYER3:
5283                 card->discipline = try_then_request_module(
5284                         symbol_get(qeth_l3_discipline), "qeth_l3");
5285                 break;
5286         case QETH_DISCIPLINE_LAYER2:
5287                 card->discipline = try_then_request_module(
5288                         symbol_get(qeth_l2_discipline), "qeth_l2");
5289                 break;
5290         default:
5291                 break;
5292         }
5293         mutex_unlock(&qeth_mod_mutex);
5294
5295         if (!card->discipline) {
5296                 dev_err(&card->gdev->dev, "There is no kernel module to "
5297                         "support discipline %d\n", discipline);
5298                 return -EINVAL;
5299         }
5300
5301         card->options.layer = discipline;
5302         return 0;
5303 }
5304
5305 void qeth_core_free_discipline(struct qeth_card *card)
5306 {
5307         if (IS_LAYER2(card))
5308                 symbol_put(qeth_l2_discipline);
5309         else
5310                 symbol_put(qeth_l3_discipline);
5311         card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5312         card->discipline = NULL;
5313 }
5314
5315 const struct device_type qeth_generic_devtype = {
5316         .name = "qeth_generic",
5317         .groups = qeth_generic_attr_groups,
5318 };
5319 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5320
5321 static const struct device_type qeth_osn_devtype = {
5322         .name = "qeth_osn",
5323         .groups = qeth_osn_attr_groups,
5324 };
5325
5326 #define DBF_NAME_LEN    20
5327
5328 struct qeth_dbf_entry {
5329         char dbf_name[DBF_NAME_LEN];
5330         debug_info_t *dbf_info;
5331         struct list_head dbf_list;
5332 };
5333
5334 static LIST_HEAD(qeth_dbf_list);
5335 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5336
5337 static debug_info_t *qeth_get_dbf_entry(char *name)
5338 {
5339         struct qeth_dbf_entry *entry;
5340         debug_info_t *rc = NULL;
5341
5342         mutex_lock(&qeth_dbf_list_mutex);
5343         list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5344                 if (strcmp(entry->dbf_name, name) == 0) {
5345                         rc = entry->dbf_info;
5346                         break;
5347                 }
5348         }
5349         mutex_unlock(&qeth_dbf_list_mutex);
5350         return rc;
5351 }
5352
5353 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5354 {
5355         struct qeth_dbf_entry *new_entry;
5356
5357         card->debug = debug_register(name, 2, 1, 8);
5358         if (!card->debug) {
5359                 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5360                 goto err;
5361         }
5362         if (debug_register_view(card->debug, &debug_hex_ascii_view))
5363                 goto err_dbg;
5364         new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5365         if (!new_entry)
5366                 goto err_dbg;
5367         strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5368         new_entry->dbf_info = card->debug;
5369         mutex_lock(&qeth_dbf_list_mutex);
5370         list_add(&new_entry->dbf_list, &qeth_dbf_list);
5371         mutex_unlock(&qeth_dbf_list_mutex);
5372
5373         return 0;
5374
5375 err_dbg:
5376         debug_unregister(card->debug);
5377 err:
5378         return -ENOMEM;
5379 }
5380
5381 static void qeth_clear_dbf_list(void)
5382 {
5383         struct qeth_dbf_entry *entry, *tmp;
5384
5385         mutex_lock(&qeth_dbf_list_mutex);
5386         list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5387                 list_del(&entry->dbf_list);
5388                 debug_unregister(entry->dbf_info);
5389                 kfree(entry);
5390         }
5391         mutex_unlock(&qeth_dbf_list_mutex);
5392 }
5393
5394 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5395 {
5396         struct net_device *dev;
5397
5398         switch (card->info.type) {
5399         case QETH_CARD_TYPE_IQD:
5400                 dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
5401                                        ether_setup, QETH_MAX_QUEUES, 1);
5402                 break;
5403         case QETH_CARD_TYPE_OSM:
5404                 dev = alloc_etherdev(0);
5405                 break;
5406         case QETH_CARD_TYPE_OSN:
5407                 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5408                 break;
5409         default:
5410                 dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
5411         }
5412
5413         if (!dev)
5414                 return NULL;
5415
5416         dev->ml_priv = card;
5417         dev->watchdog_timeo = QETH_TX_TIMEOUT;
5418         dev->min_mtu = IS_OSN(card) ? 64 : 576;
5419          /* initialized when device first goes online: */
5420         dev->max_mtu = 0;
5421         dev->mtu = 0;
5422         SET_NETDEV_DEV(dev, &card->gdev->dev);
5423         netif_carrier_off(dev);
5424
5425         if (IS_OSN(card)) {
5426                 dev->ethtool_ops = &qeth_osn_ethtool_ops;
5427         } else {
5428                 dev->ethtool_ops = &qeth_ethtool_ops;
5429                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5430                 dev->hw_features |= NETIF_F_SG;
5431                 dev->vlan_features |= NETIF_F_SG;
5432                 if (IS_IQD(card)) {
5433                         dev->features |= NETIF_F_SG;
5434                         if (netif_set_real_num_tx_queues(dev,
5435                                                          QETH_IQD_MIN_TXQ)) {
5436                                 free_netdev(dev);
5437                                 return NULL;
5438                         }
5439                 }
5440         }
5441
5442         return dev;
5443 }
5444
5445 struct net_device *qeth_clone_netdev(struct net_device *orig)
5446 {
5447         struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5448
5449         if (!clone)
5450                 return NULL;
5451
5452         clone->dev_port = orig->dev_port;
5453         return clone;
5454 }
5455
5456 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5457 {
5458         struct qeth_card *card;
5459         struct device *dev;
5460         int rc;
5461         enum qeth_discipline_id enforced_disc;
5462         char dbf_name[DBF_NAME_LEN];
5463
5464         QETH_DBF_TEXT(SETUP, 2, "probedev");
5465
5466         dev = &gdev->dev;
5467         if (!get_device(dev))
5468                 return -ENODEV;
5469
5470         QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5471
5472         card = qeth_alloc_card(gdev);
5473         if (!card) {
5474                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5475                 rc = -ENOMEM;
5476                 goto err_dev;
5477         }
5478
5479         snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5480                 dev_name(&gdev->dev));
5481         card->debug = qeth_get_dbf_entry(dbf_name);
5482         if (!card->debug) {
5483                 rc = qeth_add_dbf_entry(card, dbf_name);
5484                 if (rc)
5485                         goto err_card;
5486         }
5487
5488         qeth_setup_card(card);
5489         card->dev = qeth_alloc_netdev(card);
5490         if (!card->dev) {
5491                 rc = -ENOMEM;
5492                 goto err_card;
5493         }
5494
5495         card->qdio.no_out_queues = card->dev->num_tx_queues;
5496         rc = qeth_update_from_chp_desc(card);
5497         if (rc)
5498                 goto err_chp_desc;
5499         qeth_determine_capabilities(card);
5500         qeth_set_blkt_defaults(card);
5501
5502         enforced_disc = qeth_enforce_discipline(card);
5503         switch (enforced_disc) {
5504         case QETH_DISCIPLINE_UNDETERMINED:
5505                 gdev->dev.type = &qeth_generic_devtype;
5506                 break;
5507         default:
5508                 card->info.layer_enforced = true;
5509                 rc = qeth_core_load_discipline(card, enforced_disc);
5510                 if (rc)
5511                         goto err_load;
5512
5513                 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
5514                                                 card->discipline->devtype;
5515                 rc = card->discipline->setup(card->gdev);
5516                 if (rc)
5517                         goto err_disc;
5518                 break;
5519         }
5520
5521         return 0;
5522
5523 err_disc:
5524         qeth_core_free_discipline(card);
5525 err_load:
5526 err_chp_desc:
5527         free_netdev(card->dev);
5528 err_card:
5529         qeth_core_free_card(card);
5530 err_dev:
5531         put_device(dev);
5532         return rc;
5533 }
5534
5535 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5536 {
5537         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5538
5539         QETH_CARD_TEXT(card, 2, "removedv");
5540
5541         if (card->discipline) {
5542                 card->discipline->remove(gdev);
5543                 qeth_core_free_discipline(card);
5544         }
5545
5546         free_netdev(card->dev);
5547         qeth_core_free_card(card);
5548         put_device(&gdev->dev);
5549 }
5550
5551 static int qeth_core_set_online(struct ccwgroup_device *gdev)
5552 {
5553         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5554         int rc = 0;
5555         enum qeth_discipline_id def_discipline;
5556
5557         if (!card->discipline) {
5558                 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
5559                                                 QETH_DISCIPLINE_LAYER2;
5560                 rc = qeth_core_load_discipline(card, def_discipline);
5561                 if (rc)
5562                         goto err;
5563                 rc = card->discipline->setup(card->gdev);
5564                 if (rc) {
5565                         qeth_core_free_discipline(card);
5566                         goto err;
5567                 }
5568         }
5569         rc = card->discipline->set_online(gdev);
5570 err:
5571         return rc;
5572 }
5573
5574 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5575 {
5576         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5577         return card->discipline->set_offline(gdev);
5578 }
5579
5580 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5581 {
5582         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5583         qeth_set_allowed_threads(card, 0, 1);
5584         if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
5585                 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5586         qeth_qdio_clear_card(card, 0);
5587         qeth_drain_output_queues(card);
5588         qdio_free(CARD_DDEV(card));
5589 }
5590
5591 static int qeth_suspend(struct ccwgroup_device *gdev)
5592 {
5593         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5594
5595         qeth_set_allowed_threads(card, 0, 1);
5596         wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
5597         if (gdev->state == CCWGROUP_OFFLINE)
5598                 return 0;
5599
5600         card->discipline->set_offline(gdev);
5601         return 0;
5602 }
5603
5604 static int qeth_resume(struct ccwgroup_device *gdev)
5605 {
5606         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5607         int rc;
5608
5609         rc = card->discipline->set_online(gdev);
5610
5611         qeth_set_allowed_threads(card, 0xffffffff, 0);
5612         if (rc)
5613                 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
5614         return rc;
5615 }
5616
5617 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
5618                            size_t count)
5619 {
5620         int err;
5621
5622         err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
5623                                   buf);
5624
5625         return err ? err : count;
5626 }
5627 static DRIVER_ATTR_WO(group);
5628
5629 static struct attribute *qeth_drv_attrs[] = {
5630         &driver_attr_group.attr,
5631         NULL,
5632 };
5633 static struct attribute_group qeth_drv_attr_group = {
5634         .attrs = qeth_drv_attrs,
5635 };
5636 static const struct attribute_group *qeth_drv_attr_groups[] = {
5637         &qeth_drv_attr_group,
5638         NULL,
5639 };
5640
5641 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5642         .driver = {
5643                 .groups = qeth_drv_attr_groups,
5644                 .owner = THIS_MODULE,
5645                 .name = "qeth",
5646         },
5647         .ccw_driver = &qeth_ccw_driver,
5648         .setup = qeth_core_probe_device,
5649         .remove = qeth_core_remove_device,
5650         .set_online = qeth_core_set_online,
5651         .set_offline = qeth_core_set_offline,
5652         .shutdown = qeth_core_shutdown,
5653         .prepare = NULL,
5654         .complete = NULL,
5655         .freeze = qeth_suspend,
5656         .thaw = qeth_resume,
5657         .restore = qeth_resume,
5658 };
5659
5660 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
5661 {
5662         struct ccwgroup_device *gdev;
5663         struct qeth_card *card;
5664
5665         gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
5666         if (!gdev)
5667                 return NULL;
5668
5669         card = dev_get_drvdata(&gdev->dev);
5670         put_device(&gdev->dev);
5671         return card;
5672 }
5673 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
5674
5675 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5676 {
5677         struct qeth_card *card = dev->ml_priv;
5678         struct mii_ioctl_data *mii_data;
5679         int rc = 0;
5680
5681         if (!card)
5682                 return -ENODEV;
5683
5684         switch (cmd) {
5685         case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5686                 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5687                 break;
5688         case SIOC_QETH_GET_CARD_TYPE:
5689                 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
5690                     !IS_VM_NIC(card))
5691                         return 1;
5692                 return 0;
5693         case SIOCGMIIPHY:
5694                 mii_data = if_mii(rq);
5695                 mii_data->phy_id = 0;
5696                 break;
5697         case SIOCGMIIREG:
5698                 mii_data = if_mii(rq);
5699                 if (mii_data->phy_id != 0)
5700                         rc = -EINVAL;
5701                 else
5702                         mii_data->val_out = qeth_mdio_read(dev,
5703                                 mii_data->phy_id, mii_data->reg_num);
5704                 break;
5705         case SIOC_QETH_QUERY_OAT:
5706                 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
5707                 break;
5708         default:
5709                 if (card->discipline->do_ioctl)
5710                         rc = card->discipline->do_ioctl(dev, rq, cmd);
5711                 else
5712                         rc = -EOPNOTSUPP;
5713         }
5714         if (rc)
5715                 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
5716         return rc;
5717 }
5718 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
5719
5720 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
5721                               unsigned long data)
5722 {
5723         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5724         u32 *features = reply->param;
5725
5726         if (qeth_setassparms_inspect_rc(cmd))
5727                 return -EIO;
5728
5729         *features = cmd->data.setassparms.data.flags_32bit;
5730         return 0;
5731 }
5732
5733 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
5734                              enum qeth_prot_versions prot)
5735 {
5736         return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
5737                                                  NULL, prot);
5738 }
5739
5740 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
5741                             enum qeth_prot_versions prot)
5742 {
5743         u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
5744         struct qeth_cmd_buffer *iob;
5745         struct qeth_ipa_caps caps;
5746         u32 features;
5747         int rc;
5748
5749         /* some L3 HW requires combined L3+L4 csum offload: */
5750         if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
5751             cstype == IPA_OUTBOUND_CHECKSUM)
5752                 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
5753
5754         iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
5755                                        prot);
5756         if (!iob)
5757                 return -ENOMEM;
5758
5759         rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
5760         if (rc)
5761                 return rc;
5762
5763         if ((required_features & features) != required_features) {
5764                 qeth_set_csum_off(card, cstype, prot);
5765                 return -EOPNOTSUPP;
5766         }
5767
5768         iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
5769                                        SETASS_DATA_SIZEOF(flags_32bit),
5770                                        prot);
5771         if (!iob) {
5772                 qeth_set_csum_off(card, cstype, prot);
5773                 return -ENOMEM;
5774         }
5775
5776         if (features & QETH_IPA_CHECKSUM_LP2LP)
5777                 required_features |= QETH_IPA_CHECKSUM_LP2LP;
5778         __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
5779         rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
5780         if (rc) {
5781                 qeth_set_csum_off(card, cstype, prot);
5782                 return rc;
5783         }
5784
5785         if (!qeth_ipa_caps_supported(&caps, required_features) ||
5786             !qeth_ipa_caps_enabled(&caps, required_features)) {
5787                 qeth_set_csum_off(card, cstype, prot);
5788                 return -EOPNOTSUPP;
5789         }
5790
5791         dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
5792                  cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
5793         if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
5794             cstype == IPA_OUTBOUND_CHECKSUM)
5795                 dev_warn(&card->gdev->dev,
5796                          "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
5797                          QETH_CARD_IFNAME(card));
5798         return 0;
5799 }
5800
5801 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
5802                              enum qeth_prot_versions prot)
5803 {
5804         return on ? qeth_set_csum_on(card, cstype, prot) :
5805                     qeth_set_csum_off(card, cstype, prot);
5806 }
5807
5808 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
5809                              unsigned long data)
5810 {
5811         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5812         struct qeth_tso_start_data *tso_data = reply->param;
5813
5814         if (qeth_setassparms_inspect_rc(cmd))
5815                 return -EIO;
5816
5817         tso_data->mss = cmd->data.setassparms.data.tso.mss;
5818         tso_data->supported = cmd->data.setassparms.data.tso.supported;
5819         return 0;
5820 }
5821
5822 static int qeth_set_tso_off(struct qeth_card *card,
5823                             enum qeth_prot_versions prot)
5824 {
5825         return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
5826                                                  IPA_CMD_ASS_STOP, NULL, prot);
5827 }
5828
5829 static int qeth_set_tso_on(struct qeth_card *card,
5830                            enum qeth_prot_versions prot)
5831 {
5832         struct qeth_tso_start_data tso_data;
5833         struct qeth_cmd_buffer *iob;
5834         struct qeth_ipa_caps caps;
5835         int rc;
5836
5837         iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
5838                                        IPA_CMD_ASS_START, 0, prot);
5839         if (!iob)
5840                 return -ENOMEM;
5841
5842         rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
5843         if (rc)
5844                 return rc;
5845
5846         if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
5847                 qeth_set_tso_off(card, prot);
5848                 return -EOPNOTSUPP;
5849         }
5850
5851         iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
5852                                        IPA_CMD_ASS_ENABLE,
5853                                        SETASS_DATA_SIZEOF(caps), prot);
5854         if (!iob) {
5855                 qeth_set_tso_off(card, prot);
5856                 return -ENOMEM;
5857         }
5858
5859         /* enable TSO capability */
5860         __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
5861                 QETH_IPA_LARGE_SEND_TCP;
5862         rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
5863         if (rc) {
5864                 qeth_set_tso_off(card, prot);
5865                 return rc;
5866         }
5867
5868         if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
5869             !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
5870                 qeth_set_tso_off(card, prot);
5871                 return -EOPNOTSUPP;
5872         }
5873
5874         dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
5875                  tso_data.mss);
5876         return 0;
5877 }
5878
5879 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
5880                             enum qeth_prot_versions prot)
5881 {
5882         return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
5883 }
5884
5885 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
5886 {
5887         int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
5888         int rc_ipv6;
5889
5890         if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
5891                 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
5892                                             QETH_PROT_IPV4);
5893         if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
5894                 /* no/one Offload Assist available, so the rc is trivial */
5895                 return rc_ipv4;
5896
5897         rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
5898                                     QETH_PROT_IPV6);
5899
5900         if (on)
5901                 /* enable: success if any Assist is active */
5902                 return (rc_ipv6) ? rc_ipv4 : 0;
5903
5904         /* disable: failure if any Assist is still active */
5905         return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
5906 }
5907
5908 /**
5909  * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
5910  * @dev:        a net_device
5911  */
5912 void qeth_enable_hw_features(struct net_device *dev)
5913 {
5914         struct qeth_card *card = dev->ml_priv;
5915         netdev_features_t features;
5916
5917         features = dev->features;
5918         /* force-off any feature that might need an IPA sequence.
5919          * netdev_update_features() will restart them.
5920          */
5921         dev->features &= ~dev->hw_features;
5922         /* toggle VLAN filter, so that VIDs are re-programmed: */
5923         if (IS_LAYER2(card) && IS_VM_NIC(card)) {
5924                 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
5925                 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5926         }
5927         netdev_update_features(dev);
5928         if (features != dev->features)
5929                 dev_warn(&card->gdev->dev,
5930                          "Device recovery failed to restore all offload features\n");
5931 }
5932 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
5933
5934 int qeth_set_features(struct net_device *dev, netdev_features_t features)
5935 {
5936         struct qeth_card *card = dev->ml_priv;
5937         netdev_features_t changed = dev->features ^ features;
5938         int rc = 0;
5939
5940         QETH_CARD_TEXT(card, 2, "setfeat");
5941         QETH_CARD_HEX(card, 2, &features, sizeof(features));
5942
5943         if ((changed & NETIF_F_IP_CSUM)) {
5944                 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
5945                                        IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
5946                 if (rc)
5947                         changed ^= NETIF_F_IP_CSUM;
5948         }
5949         if (changed & NETIF_F_IPV6_CSUM) {
5950                 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
5951                                        IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
5952                 if (rc)
5953                         changed ^= NETIF_F_IPV6_CSUM;
5954         }
5955         if (changed & NETIF_F_RXCSUM) {
5956                 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
5957                 if (rc)
5958                         changed ^= NETIF_F_RXCSUM;
5959         }
5960         if (changed & NETIF_F_TSO) {
5961                 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
5962                                       QETH_PROT_IPV4);
5963                 if (rc)
5964                         changed ^= NETIF_F_TSO;
5965         }
5966         if (changed & NETIF_F_TSO6) {
5967                 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
5968                                       QETH_PROT_IPV6);
5969                 if (rc)
5970                         changed ^= NETIF_F_TSO6;
5971         }
5972
5973         /* everything changed successfully? */
5974         if ((dev->features ^ features) == changed)
5975                 return 0;
5976         /* something went wrong. save changed features and return error */
5977         dev->features ^= changed;
5978         return -EIO;
5979 }
5980 EXPORT_SYMBOL_GPL(qeth_set_features);
5981
5982 netdev_features_t qeth_fix_features(struct net_device *dev,
5983                                     netdev_features_t features)
5984 {
5985         struct qeth_card *card = dev->ml_priv;
5986
5987         QETH_CARD_TEXT(card, 2, "fixfeat");
5988         if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
5989                 features &= ~NETIF_F_IP_CSUM;
5990         if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
5991                 features &= ~NETIF_F_IPV6_CSUM;
5992         if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
5993             !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
5994                 features &= ~NETIF_F_RXCSUM;
5995         if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
5996                 features &= ~NETIF_F_TSO;
5997         if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
5998                 features &= ~NETIF_F_TSO6;
5999
6000         QETH_CARD_HEX(card, 2, &features, sizeof(features));
6001         return features;
6002 }
6003 EXPORT_SYMBOL_GPL(qeth_fix_features);
6004
6005 netdev_features_t qeth_features_check(struct sk_buff *skb,
6006                                       struct net_device *dev,
6007                                       netdev_features_t features)
6008 {
6009         /* GSO segmentation builds skbs with
6010          *      a (small) linear part for the headers, and
6011          *      page frags for the data.
6012          * Compared to a linear skb, the header-only part consumes an
6013          * additional buffer element. This reduces buffer utilization, and
6014          * hurts throughput. So compress small segments into one element.
6015          */
6016         if (netif_needs_gso(skb, features)) {
6017                 /* match skb_segment(): */
6018                 unsigned int doffset = skb->data - skb_mac_header(skb);
6019                 unsigned int hsize = skb_shinfo(skb)->gso_size;
6020                 unsigned int hroom = skb_headroom(skb);
6021
6022                 /* linearize only if resulting skb allocations are order-0: */
6023                 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6024                         features &= ~NETIF_F_SG;
6025         }
6026
6027         return vlan_features_check(skb, features);
6028 }
6029 EXPORT_SYMBOL_GPL(qeth_features_check);
6030
6031 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6032 {
6033         struct qeth_card *card = dev->ml_priv;
6034         struct qeth_qdio_out_q *queue;
6035         unsigned int i;
6036
6037         QETH_CARD_TEXT(card, 5, "getstat");
6038
6039         stats->rx_packets = card->stats.rx_packets;
6040         stats->rx_bytes = card->stats.rx_bytes;
6041         stats->rx_errors = card->stats.rx_errors;
6042         stats->rx_dropped = card->stats.rx_dropped;
6043         stats->multicast = card->stats.rx_multicast;
6044
6045         for (i = 0; i < card->qdio.no_out_queues; i++) {
6046                 queue = card->qdio.out_qs[i];
6047
6048                 stats->tx_packets += queue->stats.tx_packets;
6049                 stats->tx_bytes += queue->stats.tx_bytes;
6050                 stats->tx_errors += queue->stats.tx_errors;
6051                 stats->tx_dropped += queue->stats.tx_dropped;
6052         }
6053 }
6054 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6055
6056 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6057                           u8 cast_type, struct net_device *sb_dev)
6058 {
6059         if (cast_type != RTN_UNICAST)
6060                 return QETH_IQD_MCAST_TXQ;
6061         return QETH_IQD_MIN_UCAST_TXQ;
6062 }
6063 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6064
6065 int qeth_open(struct net_device *dev)
6066 {
6067         struct qeth_card *card = dev->ml_priv;
6068
6069         QETH_CARD_TEXT(card, 4, "qethopen");
6070
6071         if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
6072                 return -EIO;
6073
6074         card->data.state = CH_STATE_UP;
6075         netif_tx_start_all_queues(dev);
6076
6077         napi_enable(&card->napi);
6078         local_bh_disable();
6079         napi_schedule(&card->napi);
6080         /* kick-start the NAPI softirq: */
6081         local_bh_enable();
6082         return 0;
6083 }
6084 EXPORT_SYMBOL_GPL(qeth_open);
6085
6086 int qeth_stop(struct net_device *dev)
6087 {
6088         struct qeth_card *card = dev->ml_priv;
6089
6090         QETH_CARD_TEXT(card, 4, "qethstop");
6091         netif_tx_disable(dev);
6092         napi_disable(&card->napi);
6093         return 0;
6094 }
6095 EXPORT_SYMBOL_GPL(qeth_stop);
6096
6097 static int __init qeth_core_init(void)
6098 {
6099         int rc;
6100
6101         pr_info("loading core functions\n");
6102
6103         rc = qeth_register_dbf_views();
6104         if (rc)
6105                 goto dbf_err;
6106         qeth_core_root_dev = root_device_register("qeth");
6107         rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6108         if (rc)
6109                 goto register_err;
6110         qeth_core_header_cache =
6111                 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6112                                   roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6113                                   0, NULL);
6114         if (!qeth_core_header_cache) {
6115                 rc = -ENOMEM;
6116                 goto slab_err;
6117         }
6118         qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6119                         sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6120         if (!qeth_qdio_outbuf_cache) {
6121                 rc = -ENOMEM;
6122                 goto cqslab_err;
6123         }
6124         rc = ccw_driver_register(&qeth_ccw_driver);
6125         if (rc)
6126                 goto ccw_err;
6127         rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6128         if (rc)
6129                 goto ccwgroup_err;
6130
6131         return 0;
6132
6133 ccwgroup_err:
6134         ccw_driver_unregister(&qeth_ccw_driver);
6135 ccw_err:
6136         kmem_cache_destroy(qeth_qdio_outbuf_cache);
6137 cqslab_err:
6138         kmem_cache_destroy(qeth_core_header_cache);
6139 slab_err:
6140         root_device_unregister(qeth_core_root_dev);
6141 register_err:
6142         qeth_unregister_dbf_views();
6143 dbf_err:
6144         pr_err("Initializing the qeth device driver failed\n");
6145         return rc;
6146 }
6147
6148 static void __exit qeth_core_exit(void)
6149 {
6150         qeth_clear_dbf_list();
6151         ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6152         ccw_driver_unregister(&qeth_ccw_driver);
6153         kmem_cache_destroy(qeth_qdio_outbuf_cache);
6154         kmem_cache_destroy(qeth_core_header_cache);
6155         root_device_unregister(qeth_core_root_dev);
6156         qeth_unregister_dbf_views();
6157         pr_info("core functions removed\n");
6158 }
6159
6160 module_init(qeth_core_init);
6161 module_exit(qeth_core_exit);
6162 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6163 MODULE_DESCRIPTION("qeth core functions");
6164 MODULE_LICENSE("GPL");