1409 "tcp_input_listener: listen half-open "
1410 "queue (max=%d) full (%d pending) on %s",
1411 tcps->tcps_conn_req_max_q0,
1412 listener->tcp_conn_req_cnt_q0,
1413 tcp_display(listener, NULL,
1414 DISP_PORT_ONLY));
1415 }
1416 goto error2;
1417 }
1418 }
1419
1420 /*
1421 * Enforce the limit set on the number of connections per listener.
1422 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max
1423 * for comparison.
1424 */
1425 if (listener->tcp_listen_cnt != NULL) {
1426 tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt;
1427 int64_t now;
1428
1429 if (atomic_add_32_nv(&tlc->tlc_cnt, 1) > tlc->tlc_max + 1) {
1430 mutex_exit(&listener->tcp_eager_lock);
1431 now = ddi_get_lbolt64();
1432 atomic_add_32(&tlc->tlc_cnt, -1);
1433 TCP_STAT(tcps, tcp_listen_cnt_drop);
1434 tlc->tlc_drop++;
1435 if (now - tlc->tlc_report_time >
1436 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL)) {
1437 zcmn_err(lconnp->conn_zoneid, CE_WARN,
1438 "Listener (port %d) connection max (%u) "
1439 "reached: %u attempts dropped total\n",
1440 ntohs(listener->tcp_connp->conn_lport),
1441 tlc->tlc_max, tlc->tlc_drop);
1442 tlc->tlc_report_time = now;
1443 }
1444 goto error2;
1445 }
1446 tlc_set = B_TRUE;
1447 }
1448
1449 mutex_exit(&listener->tcp_eager_lock);
1450
1451 /*
1452 * IP sets ira_sqp to either the senders conn_sqp (for loopback)
1854 * will retransmit at which time the SYN can be
1855 * treated as a new connection or dealth with
1856 * a TH_RST if a connection already exists.
1857 */
1858 CONN_DEC_REF(econnp);
1859 freemsg(mp);
1860 } else {
1861 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data,
1862 econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1);
1863 }
1864 } else {
1865 /* Nobody wants this packet */
1866 freemsg(mp);
1867 }
1868 return;
1869 error3:
1870 CONN_DEC_REF(econnp);
1871 error2:
1872 freemsg(mp);
1873 if (tlc_set)
1874 atomic_add_32(&listener->tcp_listen_cnt->tlc_cnt, -1);
1875 }
1876
1877 /*
1878 * In an ideal case of vertical partition in NUMA architecture, its
1879 * beneficial to have the listener and all the incoming connections
1880 * tied to the same squeue. The other constraint is that incoming
1881 * connections should be tied to the squeue attached to interrupted
1882 * CPU for obvious locality reason so this leaves the listener to
1883 * be tied to the same squeue. Our only problem is that when listener
1884 * is binding, the CPU that will get interrupted by the NIC whose
1885 * IP address the listener is binding to is not even known. So
1886 * the code below allows us to change that binding at the time the
1887 * CPU is interrupted by virtue of incoming connection's squeue.
1888 *
1889 * This is usefull only in case of a listener bound to a specific IP
1890 * address. For other kind of listeners, they get bound the
1891 * very first time and there is no attempt to rebind them.
1892 */
1893 void
1894 tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2,
|
1409 "tcp_input_listener: listen half-open "
1410 "queue (max=%d) full (%d pending) on %s",
1411 tcps->tcps_conn_req_max_q0,
1412 listener->tcp_conn_req_cnt_q0,
1413 tcp_display(listener, NULL,
1414 DISP_PORT_ONLY));
1415 }
1416 goto error2;
1417 }
1418 }
1419
1420 /*
1421 * Enforce the limit set on the number of connections per listener.
1422 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max
1423 * for comparison.
1424 */
1425 if (listener->tcp_listen_cnt != NULL) {
1426 tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt;
1427 int64_t now;
1428
1429 if (atomic_inc_32_nv(&tlc->tlc_cnt) > tlc->tlc_max + 1) {
1430 mutex_exit(&listener->tcp_eager_lock);
1431 now = ddi_get_lbolt64();
1432 atomic_dec_32(&tlc->tlc_cnt);
1433 TCP_STAT(tcps, tcp_listen_cnt_drop);
1434 tlc->tlc_drop++;
1435 if (now - tlc->tlc_report_time >
1436 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL)) {
1437 zcmn_err(lconnp->conn_zoneid, CE_WARN,
1438 "Listener (port %d) connection max (%u) "
1439 "reached: %u attempts dropped total\n",
1440 ntohs(listener->tcp_connp->conn_lport),
1441 tlc->tlc_max, tlc->tlc_drop);
1442 tlc->tlc_report_time = now;
1443 }
1444 goto error2;
1445 }
1446 tlc_set = B_TRUE;
1447 }
1448
1449 mutex_exit(&listener->tcp_eager_lock);
1450
1451 /*
1452 * IP sets ira_sqp to either the senders conn_sqp (for loopback)
1854 * will retransmit at which time the SYN can be
1855 * treated as a new connection or dealth with
1856 * a TH_RST if a connection already exists.
1857 */
1858 CONN_DEC_REF(econnp);
1859 freemsg(mp);
1860 } else {
1861 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data,
1862 econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1);
1863 }
1864 } else {
1865 /* Nobody wants this packet */
1866 freemsg(mp);
1867 }
1868 return;
1869 error3:
1870 CONN_DEC_REF(econnp);
1871 error2:
1872 freemsg(mp);
1873 if (tlc_set)
1874 atomic_dec_32(&listener->tcp_listen_cnt->tlc_cnt);
1875 }
1876
1877 /*
1878 * In an ideal case of vertical partition in NUMA architecture, its
1879 * beneficial to have the listener and all the incoming connections
1880 * tied to the same squeue. The other constraint is that incoming
1881 * connections should be tied to the squeue attached to interrupted
1882 * CPU for obvious locality reason so this leaves the listener to
1883 * be tied to the same squeue. Our only problem is that when listener
1884 * is binding, the CPU that will get interrupted by the NIC whose
1885 * IP address the listener is binding to is not even known. So
1886 * the code below allows us to change that binding at the time the
1887 * CPU is interrupted by virtue of incoming connection's squeue.
1888 *
1889 * This is usefull only in case of a listener bound to a specific IP
1890 * address. For other kind of listeners, they get bound the
1891 * very first time and there is no attempt to rebind them.
1892 */
1893 void
1894 tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2,
|