2018-03-06, 21:09:27 +0530, Atul Gupta wrote: [snip] > +static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val) > +{ > + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); > + struct sk_buff *skb; > + struct cpl_set_tcb_field *req; > + struct ulptx_idata *sc; > + unsigned int wrlen = roundup(sizeof(*req) + sizeof(*sc), 16); > + unsigned int credits_needed = DIV_ROUND_UP(wrlen, 16); > + > + skb = alloc_skb(wrlen, GFP_ATOMIC); I haven't followed the whole call chain, but does this really need to be an atomic allocation? Should this skb be charged to the socket's memory accounting? > + if (!skb) > + return -ENOMEM; > + > + __set_tcb_field(sk, skb, word, mask, val, 0, 1); > + set_queue(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA, sk); > + csk->wr_credits -= credits_needed; > + csk->wr_unacked += credits_needed; > + enqueue_wr(csk, skb); > + cxgb4_ofld_send(csk->egress_dev, skb); Should the return value be checked? > + return 0; > +} [snip] > +static void *chtls_alloc_mem(unsigned long size) > +{ > + void *p = kmalloc(size, GFP_KERNEL); > + > + if (!p) > + p = vmalloc(size); > + if (p) > + memset(p, 0, size); > + return p; > +} I think you replace this with kvzalloc(). > +static void chtls_free_mem(void *addr) > +{ > + unsigned long p = (unsigned long)addr; > + > + if (p >= VMALLOC_START && p < VMALLOC_END) > + vfree(addr); > + else > + kfree(addr); > +} Use kvfree(). > +/* TLS Key bitmap processing */ > +int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi) > +{ > + unsigned int num_key_ctx, bsize; > + > + num_key_ctx = (lldi->vr->key.size / TLS_KEY_CONTEXT_SZ); > + bsize = BITS_TO_LONGS(num_key_ctx); > + > + cdev->kmap.size = num_key_ctx; > + cdev->kmap.available = bsize; > + cdev->kmap.addr = chtls_alloc_mem(sizeof(*cdev->kmap.addr) * > + bsize); > + if (!cdev->kmap.addr) > + return -1; The return value of this function is never checked. > + > + cdev->kmap.start = lldi->vr->key.start; > + spin_lock_init(&cdev->kmap.lock); > + return 0; > +} > + > +void chtls_free_kmap(struct chtls_dev *cdev) > +{ > + if (cdev->kmap.addr) > + chtls_free_mem(cdev->kmap.addr); > +} I think this wrapper function is not necessary. > +static int get_new_keyid(struct chtls_sock *csk, u32 optname) > +{ > + struct chtls_dev *cdev = csk->cdev; > + struct chtls_hws *hws = &csk->tlshws; > + struct net_device *dev = csk->egress_dev; > + struct adapter *adap = netdev2adap(dev); > + int keyid; > + > + spin_lock_bh(&cdev->kmap.lock); > + keyid = find_first_zero_bit(cdev->kmap.addr, cdev->kmap.size); > + if (keyid < cdev->kmap.size) { > + __set_bit(keyid, cdev->kmap.addr); > + if (optname == TLS_RX) TLS_RX only gets defined in patch 11, so the only reason you're not breaking the build is because all these new files aren't getting compiled until patch 12. > + hws->rxkey = keyid; > + else > + hws->txkey = keyid; > + atomic_inc(&adap->chcr_stats.tls_key); > + } else { > + keyid = -1; > + } > + spin_unlock_bh(&cdev->kmap.lock); > + return keyid; > +} > + [snip] > +int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) > +{ ... >+ skb = alloc_skb(len, GFP_KERNEL); >+ if (!skb) >+ return -ENOMEM; This return code is also ignored by its caller. Please review the whole patchset for that problem. Same question as before, does this need to be accounted? > + /* ulptx command */ > + kwr->req.cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | > + T5_ULP_MEMIO_ORDER_V(1) | > + T5_ULP_MEMIO_IMM_V(1)); > + kwr->req.len16 = cpu_to_be32((csk->tid << 8) | > + DIV_ROUND_UP(len - sizeof(kwr->wr), 16)); > + kwr->req.dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen >> 5)); > + kwr->req.lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(keyid_to_addr > + (cdev->kmap.start, keyid))); Breaking this line in that way makes it really hard to read for humans. -- Sabrina