mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 05:11:48 +00:00
Including fixes from rxrpc.
Current release - regressions: - rxrpc: - only disconnect calls in the I/O thread - move client call connection to the I/O thread - fix incoming call setup race - eth: mlx5: - restore pkt rate policing support - fix memory leak on updating vport counters Previous releases - regressions: - gro: take care of DODGY packets - ipv6: deduct extension header length in rawv6_push_pending_frames - tipc: fix unexpected link reset due to discovery messages Previous releases - always broken: - sched: disallow noqueue for qdisc classes - eth: ice: fix potential memory leak in ice_gnss_tty_write() - eth: ixgbe: fix pci device refcount leak - eth: mlx5: - fix command stats access after free - fix macsec possible null dereference when updating MAC security entity (SecY) Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmPAGskSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOk9aQQAInUtOfTi0EwR5oveTMWOcDc8P1rGFru Yfid6d4gVRKDm9tosW8HSlnMVCDIGrvhmwVfMevkLjgQtgRXYecXM7MYMVH+6f6e yIF0azu5z2PEQvfTLTuTN++bQ3lgyfYXOB3mScCOtBE9BFXwjtL111Qby1QlvHTg sPIH5kCxpDfg3i2rge1BiyoQ4BWc4c6Us86CriKDX1vl7lilJccpWYxKFY8hyRzl PF3OVJlMph7jny4zKOa2chWUnDj5ycK289/x2rOla4EOX7R8IHDyL+sAAAvdm7/q FDuuetC3M+eo8/NTLiZkjTipw1nO+G0c1VtzAZ/wX1QkomwmN0yyPx47EllVH+ez YQ80UrXOF3f7xYXHZIhwCrIVSaHpLyZHSfDBW1r+vTokIRSJ+5TOIH/YAUUKSR0U kE4r+eHU3AdcBsDV0pZXtE0mUxROwRatOt5u+XQ3WYdORDyKo0HYu8QskIurgqUv Cnr554zogmC4Bt/uY7j5u9NvhUH/Xyp5RVXtaQnwz+hcncgVFASDDpblejHE2Lcu 8fb1NrwB7AsPnMDGUSjnG0BbQaTo6ccacBrIrhWxRbEBAQmZEbO507yoYz21EBM5 5XKWd1bTq1YG5oPYl9WR3FI9hSQN7vKsUoW4SXsuh5j65ENhCAwBK8i3liy1j+dS sf5xUgCg6KyH =4ANJ -----END PGP SIGNATURE----- Merge tag 'net-6.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from rxrpc. The rxrpc changes are noticeable large: to address a recent regression has been necessary completing the threaded refactor. Current release - regressions: - rxrpc: - only disconnect calls in the I/O thread - move client call connection to the I/O thread - fix incoming call setup race - eth: mlx5: - restore pkt rate policing support - fix memory leak on updating vport counters Previous releases - regressions: - gro: take care of DODGY packets - ipv6: deduct extension header length in rawv6_push_pending_frames - tipc: fix unexpected link reset due to discovery messages Previous releases - always broken: - sched: disallow noqueue for qdisc classes - eth: ice: fix potential memory leak in ice_gnss_tty_write() - eth: ixgbe: fix pci device refcount leak - eth: mlx5: - fix command stats access after free - fix macsec possible null dereference when updating MAC security entity (SecY)" * tag 'net-6.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (64 commits) r8152: add vendor/device ID pair for Microsoft Devkit net: stmmac: add aux timestamps fifo clearance wait bnxt: make sure we return pages to the pool net: hns3: fix wrong use of rss size during VF rss config ipv6: raw: Deduct extension header length in rawv6_push_pending_frames net: lan966x: check for ptp to be enabled in lan966x_ptp_deinit() net: sched: disallow noqueue for qdisc classes iavf/iavf_main: actually log ->src mask when talking about it igc: Fix PPS delta between two synchronized end-points ixgbe: fix pci device refcount leak octeontx2-pf: Fix resource leakage in VF driver unbind selftests/net: l2_tos_ttl_inherit.sh: Ensure environment cleanup on failure. selftests/net: l2_tos_ttl_inherit.sh: Run tests in their own netns. selftests/net: l2_tos_ttl_inherit.sh: Set IPv6 addresses with "nodad". net/mlx5e: Fix macsec possible null dereference when updating MAC security entity (SecY) net/mlx5e: Fix macsec ssci attribute handling in offload path net/mlx5: E-switch, Coverity: overlapping copy net/mlx5e: Don't support encap rules with gbp option net/mlx5: Fix ptp max frequency adjustment range net/mlx5e: Fix memory leak on updating vport counters ...
This commit is contained in:
commit
d9fc151172
@ -880,8 +880,8 @@ The kernel interface functions are as follows:
|
||||
|
||||
notify_end_rx can be NULL or it can be used to specify a function to be
|
||||
called when the call changes state to end the Tx phase. This function is
|
||||
called with the call-state spinlock held to prevent any reply or final ACK
|
||||
from being delivered first.
|
||||
called with a spinlock held to prevent the last DATA packet from being
|
||||
transmitted until the function returns.
|
||||
|
||||
(#) Receive data from a call::
|
||||
|
||||
|
@ -993,7 +993,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
|
||||
DMA_ATTR_WEAK_ORDERING);
|
||||
skb = build_skb(page_address(page), PAGE_SIZE);
|
||||
if (!skb) {
|
||||
__free_page(page);
|
||||
page_pool_recycle_direct(rxr->page_pool, page);
|
||||
return NULL;
|
||||
}
|
||||
skb_mark_for_recycle(skb);
|
||||
@ -1031,7 +1031,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
|
||||
|
||||
skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
|
||||
if (!skb) {
|
||||
__free_page(page);
|
||||
page_pool_recycle_direct(rxr->page_pool, page);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -3130,7 +3130,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
|
||||
|
||||
hclgevf_update_rss_size(handle, new_tqps_num);
|
||||
|
||||
hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
|
||||
hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
|
||||
tc_offset, tc_valid, tc_size);
|
||||
ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
|
||||
tc_valid, tc_size);
|
||||
|
@ -3850,7 +3850,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
|
||||
field_flags |= IAVF_CLOUD_FIELD_IIP;
|
||||
} else {
|
||||
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
|
||||
be32_to_cpu(match.mask->dst));
|
||||
be32_to_cpu(match.mask->src));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -363,6 +363,7 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
|
||||
/* Send the data out to a hardware port */
|
||||
write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
|
||||
if (!write_buf) {
|
||||
kfree(cmd_buf);
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
@ -460,6 +461,9 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
|
||||
for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
|
||||
pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
|
||||
GFP_KERNEL);
|
||||
if (!pf->gnss_tty_port[i])
|
||||
goto err_out;
|
||||
|
||||
pf->gnss_serial[i] = NULL;
|
||||
|
||||
tty_port_init(pf->gnss_tty_port[i]);
|
||||
@ -469,21 +473,23 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
|
||||
err = tty_register_driver(tty_driver);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to register TTY driver err=%d\n", err);
|
||||
|
||||
for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
|
||||
tty_port_destroy(pf->gnss_tty_port[i]);
|
||||
kfree(pf->gnss_tty_port[i]);
|
||||
}
|
||||
kfree(ttydrv_name);
|
||||
tty_driver_kref_put(pf->ice_gnss_tty_driver);
|
||||
|
||||
return NULL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
|
||||
dev_info(dev, "%s%d registered\n", ttydrv_name, i);
|
||||
|
||||
return tty_driver;
|
||||
|
||||
err_out:
|
||||
while (i--) {
|
||||
tty_port_destroy(pf->gnss_tty_port[i]);
|
||||
kfree(pf->gnss_tty_port[i]);
|
||||
}
|
||||
kfree(ttydrv_name);
|
||||
tty_driver_kref_put(pf->ice_gnss_tty_driver);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -475,7 +475,9 @@
|
||||
#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */
|
||||
#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */
|
||||
#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */
|
||||
#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */
|
||||
#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */
|
||||
#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */
|
||||
#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */
|
||||
#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */
|
||||
#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
|
||||
|
@ -322,7 +322,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
|
||||
ts = ns_to_timespec64(ns);
|
||||
if (rq->perout.index == 1) {
|
||||
if (use_freq) {
|
||||
tsauxc_mask = IGC_TSAUXC_EN_CLK1;
|
||||
tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1;
|
||||
tsim_mask = 0;
|
||||
} else {
|
||||
tsauxc_mask = IGC_TSAUXC_EN_TT1;
|
||||
@ -333,7 +333,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
|
||||
freqout = IGC_FREQOUT1;
|
||||
} else {
|
||||
if (use_freq) {
|
||||
tsauxc_mask = IGC_TSAUXC_EN_CLK0;
|
||||
tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0;
|
||||
tsim_mask = 0;
|
||||
} else {
|
||||
tsauxc_mask = IGC_TSAUXC_EN_TT0;
|
||||
@ -347,10 +347,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
|
||||
tsauxc = rd32(IGC_TSAUXC);
|
||||
tsim = rd32(IGC_TSIM);
|
||||
if (rq->perout.index == 1) {
|
||||
tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1);
|
||||
tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 |
|
||||
IGC_TSAUXC_ST1);
|
||||
tsim &= ~IGC_TSICR_TT1;
|
||||
} else {
|
||||
tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0);
|
||||
tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 |
|
||||
IGC_TSAUXC_ST0);
|
||||
tsim &= ~IGC_TSICR_TT0;
|
||||
}
|
||||
if (on) {
|
||||
|
@ -855,9 +855,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
|
||||
rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
|
||||
if (rp_pdev && rp_pdev->subordinate) {
|
||||
bus = rp_pdev->subordinate->number;
|
||||
pci_dev_put(rp_pdev);
|
||||
return pci_get_domain_bus_and_slot(0, bus, 0);
|
||||
}
|
||||
|
||||
pci_dev_put(rp_pdev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -874,6 +876,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
|
||||
struct ixgbe_adapter *adapter = hw->back;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct pci_dev *func0_pdev;
|
||||
bool has_mii = false;
|
||||
|
||||
/* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
|
||||
* are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
|
||||
@ -884,15 +887,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
|
||||
func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
|
||||
if (func0_pdev) {
|
||||
if (func0_pdev == pdev)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
has_mii = true;
|
||||
goto out;
|
||||
}
|
||||
func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
|
||||
if (func0_pdev == pdev)
|
||||
return true;
|
||||
has_mii = true;
|
||||
|
||||
return false;
|
||||
out:
|
||||
pci_dev_put(func0_pdev);
|
||||
return has_mii;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -774,9 +774,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
|
||||
|
||||
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
|
||||
if (enable)
|
||||
cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
|
||||
cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
|
||||
else
|
||||
cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
|
||||
cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
|
||||
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
|
||||
return 0;
|
||||
}
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define CMR_P2X_SEL_SHIFT 59ULL
|
||||
#define CMR_P2X_SEL_NIX0 1ULL
|
||||
#define CMR_P2X_SEL_NIX1 2ULL
|
||||
#define CMR_EN BIT_ULL(55)
|
||||
#define DATA_PKT_TX_EN BIT_ULL(53)
|
||||
#define DATA_PKT_RX_EN BIT_ULL(54)
|
||||
#define CGX_LMAC_TYPE_SHIFT 40
|
||||
|
@ -758,6 +758,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
|
||||
if (vf->otx2_wq)
|
||||
destroy_workqueue(vf->otx2_wq);
|
||||
otx2_ptp_destroy(vf);
|
||||
otx2_mcam_flow_del(vf);
|
||||
otx2_shutdown_tc(vf);
|
||||
otx2vf_disable_mbox_intr(vf);
|
||||
otx2_detach_resources(&vf->mbox);
|
||||
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
|
||||
|
@ -2176,15 +2176,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
|
||||
if (!cmd->stats)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
|
||||
if (!cmd->pool) {
|
||||
err = -ENOMEM;
|
||||
goto dma_pool_err;
|
||||
}
|
||||
if (!cmd->pool)
|
||||
return -ENOMEM;
|
||||
|
||||
err = alloc_cmd_page(dev, cmd);
|
||||
if (err)
|
||||
@ -2268,8 +2262,6 @@ err_free_page:
|
||||
|
||||
err_free_pool:
|
||||
dma_pool_destroy(cmd->pool);
|
||||
dma_pool_err:
|
||||
kvfree(cmd->stats);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2282,7 +2274,6 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
|
||||
destroy_msg_cache(dev);
|
||||
free_cmd_page(dev, cmd);
|
||||
dma_pool_destroy(cmd->pool);
|
||||
kvfree(cmd->stats);
|
||||
}
|
||||
|
||||
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
|
||||
|
@ -34,12 +34,6 @@ static int police_act_validate(const struct flow_action_entry *act,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (act->police.rate_pkt_ps) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"QoS offload not support packets per second");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -127,6 +127,7 @@ mlx5e_post_meter_add_rule(struct mlx5e_priv *priv,
|
||||
attr->counter = act_counter;
|
||||
|
||||
attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
|
||||
attr->inner_match_level = MLX5_MATCH_NONE;
|
||||
attr->outer_match_level = MLX5_MATCH_NONE;
|
||||
attr->chain = 0;
|
||||
attr->prio = 0;
|
||||
|
@ -88,6 +88,8 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
|
||||
struct udphdr *udp = (struct udphdr *)(buf);
|
||||
struct vxlanhdr *vxh;
|
||||
|
||||
if (tun_key->tun_flags & TUNNEL_VXLAN_OPT)
|
||||
return -EOPNOTSUPP;
|
||||
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
|
||||
*ip_proto = IPPROTO_UDP;
|
||||
|
||||
|
@ -62,6 +62,7 @@ struct mlx5e_macsec_sa {
|
||||
u32 enc_key_id;
|
||||
u32 next_pn;
|
||||
sci_t sci;
|
||||
ssci_t ssci;
|
||||
salt_t salt;
|
||||
|
||||
struct rhash_head hash;
|
||||
@ -358,7 +359,6 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5_macsec_obj_attrs obj_attrs;
|
||||
union mlx5e_macsec_rule *macsec_rule;
|
||||
struct macsec_key *key;
|
||||
int err;
|
||||
|
||||
obj_attrs.next_pn = sa->next_pn;
|
||||
@ -368,13 +368,9 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
|
||||
obj_attrs.aso_pdn = macsec->aso.pdn;
|
||||
obj_attrs.epn_state = sa->epn_state;
|
||||
|
||||
key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
|
||||
|
||||
if (sa->epn_state.epn_enabled) {
|
||||
obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
|
||||
cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
|
||||
|
||||
memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
|
||||
obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
|
||||
memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
|
||||
}
|
||||
|
||||
obj_attrs.replay_window = ctx->secy->replay_window;
|
||||
@ -499,10 +495,11 @@ mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
|
||||
}
|
||||
|
||||
static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
|
||||
const pn_t *next_pn_halves)
|
||||
const pn_t *next_pn_halves, ssci_t ssci)
|
||||
{
|
||||
struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
|
||||
|
||||
sa->ssci = ssci;
|
||||
sa->salt = key->salt;
|
||||
epn_state->epn_enabled = 1;
|
||||
epn_state->epn_msb = next_pn_halves->upper;
|
||||
@ -550,7 +547,8 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
|
||||
tx_sa->assoc_num = assoc_num;
|
||||
|
||||
if (secy->xpn)
|
||||
update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
|
||||
update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
|
||||
ctx_tx_sa->ssci);
|
||||
|
||||
err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
|
||||
MLX5_ACCEL_OBJ_MACSEC_KEY,
|
||||
@ -945,7 +943,8 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
|
||||
rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
|
||||
|
||||
if (ctx->secy->xpn)
|
||||
update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
|
||||
update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
|
||||
ctx_rx_sa->ssci);
|
||||
|
||||
err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
|
||||
MLX5_ACCEL_OBJ_MACSEC_KEY,
|
||||
|
@ -4084,6 +4084,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
||||
struct mlx5e_vlan_table *vlan;
|
||||
struct mlx5e_params *params;
|
||||
|
||||
if (!netif_device_present(netdev))
|
||||
return features;
|
||||
|
||||
vlan = mlx5e_fs_get_vlan(priv->fs);
|
||||
mutex_lock(&priv->state_lock);
|
||||
params = &priv->channels.params;
|
||||
|
@ -191,7 +191,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
|
||||
if (err) {
|
||||
netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
|
||||
rep->vport, err);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
#define MLX5_GET_CTR(p, x) \
|
||||
@ -241,6 +241,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
|
||||
rep_stats->tx_vport_rdma_multicast_bytes =
|
||||
MLX5_GET_CTR(out, received_ib_multicast.octets);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
}
|
||||
|
||||
|
@ -2419,7 +2419,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
|
||||
|
||||
priv = mlx5i_epriv(netdev);
|
||||
tstamp = &priv->tstamp;
|
||||
stats = rq->stats;
|
||||
stats = &priv->channel_stats[rq->ix]->rq;
|
||||
|
||||
flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
|
||||
g = (flags_rqpn >> 28) & 3;
|
||||
|
@ -1301,7 +1301,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
||||
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
|
||||
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
|
||||
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -1359,8 +1358,10 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
|
||||
}
|
||||
mutex_unlock(&tc->t_lock);
|
||||
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
|
||||
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
|
||||
mlx5e_detach_mod_hdr(priv, flow);
|
||||
}
|
||||
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
|
||||
mlx5_fc_destroy(priv->mdev, attr->counter);
|
||||
|
@ -143,7 +143,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
|
||||
if (mlx5_esw_indir_table_decap_vport(attr))
|
||||
vport = mlx5_esw_indir_table_decap_vport(attr);
|
||||
|
||||
if (attr && !attr->chain && esw_attr->int_port)
|
||||
if (!attr->chain && esw_attr && esw_attr->int_port)
|
||||
metadata =
|
||||
mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
|
||||
else
|
||||
@ -4143,8 +4143,6 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
|
||||
}
|
||||
|
||||
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
|
||||
memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
|
||||
MLX5_UN_SZ_BYTES(hca_cap_union));
|
||||
MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
|
||||
|
||||
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
|
||||
@ -4236,8 +4234,6 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
|
||||
}
|
||||
|
||||
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
|
||||
memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
|
||||
MLX5_UN_SZ_BYTES(hca_cap_union));
|
||||
MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
|
||||
|
||||
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
|
||||
|
@ -90,9 +90,21 @@ static void mlx5i_get_ringparam(struct net_device *dev,
|
||||
static int mlx5i_set_channels(struct net_device *dev,
|
||||
struct ethtool_channels *ch)
|
||||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(dev);
|
||||
struct mlx5i_priv *ipriv = netdev_priv(dev);
|
||||
struct mlx5e_priv *epriv = mlx5i_epriv(dev);
|
||||
|
||||
return mlx5e_ethtool_set_channels(priv, ch);
|
||||
/* rtnl lock protects from race between this ethtool op and sub
|
||||
* interface ndo_init/uninit.
|
||||
*/
|
||||
ASSERT_RTNL();
|
||||
if (ipriv->num_sub_interfaces > 0) {
|
||||
mlx5_core_warn(epriv->mdev,
|
||||
"can't change number of channels for interfaces with sub interfaces (%u)\n",
|
||||
ipriv->num_sub_interfaces);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mlx5e_ethtool_set_channels(epriv, ch);
|
||||
}
|
||||
|
||||
static void mlx5i_get_channels(struct net_device *dev,
|
||||
|
@ -160,6 +160,44 @@ void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
stats->tx_dropped = sstats->tx_queue_dropped;
|
||||
}
|
||||
|
||||
struct net_device *mlx5i_parent_get(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
|
||||
struct mlx5i_priv *ipriv, *parent_ipriv;
|
||||
struct net_device *parent_dev;
|
||||
int parent_ifindex;
|
||||
|
||||
ipriv = priv->ppriv;
|
||||
|
||||
parent_ifindex = netdev->netdev_ops->ndo_get_iflink(netdev);
|
||||
parent_dev = dev_get_by_index(dev_net(netdev), parent_ifindex);
|
||||
if (!parent_dev)
|
||||
return NULL;
|
||||
|
||||
parent_ipriv = netdev_priv(parent_dev);
|
||||
|
||||
ASSERT_RTNL();
|
||||
parent_ipriv->num_sub_interfaces++;
|
||||
|
||||
ipriv->parent_dev = parent_dev;
|
||||
|
||||
return parent_dev;
|
||||
}
|
||||
|
||||
void mlx5i_parent_put(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
|
||||
struct mlx5i_priv *ipriv, *parent_ipriv;
|
||||
|
||||
ipriv = priv->ppriv;
|
||||
parent_ipriv = netdev_priv(ipriv->parent_dev);
|
||||
|
||||
ASSERT_RTNL();
|
||||
parent_ipriv->num_sub_interfaces--;
|
||||
|
||||
dev_put(ipriv->parent_dev);
|
||||
}
|
||||
|
||||
int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
@ -54,9 +54,11 @@ struct mlx5i_priv {
|
||||
struct rdma_netdev rn; /* keep this first */
|
||||
u32 qpn;
|
||||
bool sub_interface;
|
||||
u32 num_sub_interfaces;
|
||||
u32 qkey;
|
||||
u16 pkey_index;
|
||||
struct mlx5i_pkey_qpn_ht *qpn_htbl;
|
||||
struct net_device *parent_dev;
|
||||
char *mlx5e_priv[];
|
||||
};
|
||||
|
||||
@ -117,5 +119,9 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
|
||||
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
|
||||
|
||||
/* Reference management for child to parent interfaces. */
|
||||
struct net_device *mlx5i_parent_get(struct net_device *netdev);
|
||||
void mlx5i_parent_put(struct net_device *netdev);
|
||||
|
||||
#endif /* CONFIG_MLX5_CORE_IPOIB */
|
||||
#endif /* __MLX5E_IPOB_H__ */
|
||||
|
@ -158,21 +158,28 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(dev);
|
||||
struct mlx5i_priv *ipriv, *parent_ipriv;
|
||||
struct net_device *parent_dev;
|
||||
int parent_ifindex;
|
||||
|
||||
ipriv = priv->ppriv;
|
||||
|
||||
/* Get QPN to netdevice hash table from parent */
|
||||
parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev);
|
||||
parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex);
|
||||
/* Link to parent */
|
||||
parent_dev = mlx5i_parent_get(dev);
|
||||
if (!parent_dev) {
|
||||
mlx5_core_warn(priv->mdev, "failed to get parent device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->num_rx_queues < parent_dev->real_num_rx_queues) {
|
||||
mlx5_core_warn(priv->mdev,
|
||||
"failed to create child device with rx queues [%d] less than parent's [%d]\n",
|
||||
dev->num_rx_queues,
|
||||
parent_dev->real_num_rx_queues);
|
||||
mlx5i_parent_put(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get QPN to netdevice hash table from parent */
|
||||
parent_ipriv = netdev_priv(parent_dev);
|
||||
ipriv->qpn_htbl = parent_ipriv->qpn_htbl;
|
||||
dev_put(parent_dev);
|
||||
|
||||
return mlx5i_dev_init(dev);
|
||||
}
|
||||
@ -184,6 +191,7 @@ static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
|
||||
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
|
||||
{
|
||||
mlx5i_parent_put(netdev);
|
||||
return mlx5i_dev_cleanup(netdev);
|
||||
}
|
||||
|
||||
|
@ -681,7 +681,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
|
||||
static const struct ptp_clock_info mlx5_ptp_clock_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "mlx5_ptp",
|
||||
.max_adj = 100000000,
|
||||
.max_adj = 50000000,
|
||||
.n_alarm = 0,
|
||||
.n_ext_ts = 0,
|
||||
.n_per_out = 0,
|
||||
|
@ -3,7 +3,12 @@
|
||||
|
||||
#include "dr_types.h"
|
||||
|
||||
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048)
|
||||
/* don't try to optimize STE allocation if the stack is too constaraining */
|
||||
#define DR_RULE_MAX_STES_OPTIMIZED 0
|
||||
#else
|
||||
#define DR_RULE_MAX_STES_OPTIMIZED 5
|
||||
#endif
|
||||
#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
|
||||
|
||||
static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
|
||||
@ -1218,10 +1223,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
|
||||
|
||||
mlx5dr_domain_nic_unlock(nic_dmn);
|
||||
|
||||
if (unlikely(!hw_ste_arr_is_opt))
|
||||
kfree(hw_ste_arr);
|
||||
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
free_rule:
|
||||
dr_rule_clean_rule_members(rule, nic_rule);
|
||||
@ -1238,6 +1240,7 @@ remove_from_nic_tbl:
|
||||
free_hw_ste:
|
||||
mlx5dr_domain_nic_unlock(nic_dmn);
|
||||
|
||||
out:
|
||||
if (unlikely(!hw_ste_arr_is_opt))
|
||||
kfree(hw_ste_arr);
|
||||
|
||||
|
@ -2951,7 +2951,7 @@ struct mlxsw_sp_nexthop_group_info {
|
||||
gateway:1, /* routes using the group use a gateway */
|
||||
is_resilient:1;
|
||||
struct list_head list; /* member in nh_res_grp_list */
|
||||
struct mlxsw_sp_nexthop nexthops[0];
|
||||
struct mlxsw_sp_nexthop nexthops[];
|
||||
#define nh_rif nexthops[0].rif
|
||||
};
|
||||
|
||||
|
@ -1073,6 +1073,9 @@ void lan966x_ptp_deinit(struct lan966x *lan966x)
|
||||
struct lan966x_port *port;
|
||||
int i;
|
||||
|
||||
if (!lan966x->ptp)
|
||||
return;
|
||||
|
||||
for (i = 0; i < lan966x->num_phys_ports; i++) {
|
||||
port = lan966x->ports[i];
|
||||
if (!port)
|
||||
|
@ -95,10 +95,7 @@ lan966x_vcap_is2_get_port_keysets(struct net_device *dev, int lookup,
|
||||
bool found = false;
|
||||
u32 val;
|
||||
|
||||
/* Check if the port keyset selection is enabled */
|
||||
val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
|
||||
if (!ANA_VCAP_S2_CFG_ENA_GET(val))
|
||||
return -ENOENT;
|
||||
|
||||
/* Collect all keysets for the port in a list */
|
||||
if (l3_proto == ETH_P_ALL)
|
||||
|
@ -1996,10 +1996,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
|
||||
|
||||
/* 8168F family. */
|
||||
{ 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
|
||||
/* It seems this chip version never made it to
|
||||
* the wild. Let's disable detection.
|
||||
* { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
|
||||
*/
|
||||
{ 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
|
||||
{ 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
|
||||
|
||||
/* 8168E family. */
|
||||
|
@ -90,7 +90,6 @@ struct mediatek_dwmac_plat_data {
|
||||
struct mediatek_dwmac_variant {
|
||||
int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
|
||||
int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
|
||||
void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
|
||||
|
||||
/* clock ids to be requested */
|
||||
const char * const *clk_list;
|
||||
@ -443,32 +442,9 @@ static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
|
||||
{
|
||||
struct mediatek_dwmac_plat_data *priv_plat = priv;
|
||||
|
||||
if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
|
||||
/* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
|
||||
* when link speed is 1Gbps with RGMII interface,
|
||||
* Fall back to delay macro circuit for 10/100Mbps link speed.
|
||||
*/
|
||||
if (speed == SPEED_1000)
|
||||
regmap_update_bits(priv_plat->peri_regmap,
|
||||
MT8195_PERI_ETH_CTRL0,
|
||||
MT8195_RGMII_TXC_PHASE_CTRL |
|
||||
MT8195_DLY_GTXC_ENABLE |
|
||||
MT8195_DLY_GTXC_INV |
|
||||
MT8195_DLY_GTXC_STAGES,
|
||||
MT8195_RGMII_TXC_PHASE_CTRL);
|
||||
else
|
||||
mt8195_set_delay(priv_plat);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
|
||||
.dwmac_set_phy_interface = mt8195_set_interface,
|
||||
.dwmac_set_delay = mt8195_set_delay,
|
||||
.dwmac_fix_mac_speed = mt8195_fix_mac_speed,
|
||||
.clk_list = mt8195_dwmac_clk_l,
|
||||
.num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
|
||||
.dma_bit_mask = 35,
|
||||
@ -619,8 +595,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
|
||||
plat->bsp_priv = priv_plat;
|
||||
plat->init = mediatek_dwmac_init;
|
||||
plat->clks_config = mediatek_dwmac_clks_config;
|
||||
if (priv_plat->variant->dwmac_fix_mac_speed)
|
||||
plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
|
||||
|
||||
plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
|
||||
sizeof(*plat->safety_feat_cfg),
|
||||
|
@ -210,7 +210,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
|
||||
}
|
||||
writel(acr_value, ptpaddr + PTP_ACR);
|
||||
mutex_unlock(&priv->aux_ts_lock);
|
||||
ret = 0;
|
||||
/* wait for auxts fifo clear to finish */
|
||||
ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value,
|
||||
!(acr_value & PTP_ACR_ATSFC),
|
||||
10, 10000);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -357,7 +357,7 @@ static const struct ipa_mem ipa_mem_local_data[] = {
|
||||
static const struct ipa_mem_data ipa_mem_data = {
|
||||
.local_count = ARRAY_SIZE(ipa_mem_local_data),
|
||||
.local = ipa_mem_local_data,
|
||||
.imem_addr = 0x146a9000,
|
||||
.imem_addr = 0x146a8000,
|
||||
.imem_size = 0x00002000,
|
||||
.smem_id = 497,
|
||||
.smem_size = 0x00009000,
|
||||
|
@ -1007,6 +1007,12 @@ static const struct usb_device_id products[] = {
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* Cinterion PLS62-W modem by GEMALTO/THALES */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
|
||||
|
@ -9836,6 +9836,7 @@ static const struct usb_device_id rtl8152_table[] = {
|
||||
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
|
||||
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
|
||||
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
|
||||
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
|
||||
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
|
||||
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
|
||||
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
|
||||
|
@ -79,7 +79,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
|
||||
/* Apple ARM64 platforms have their own idea of board type, passed in
|
||||
* via the device tree. They also have an antenna SKU parameter
|
||||
*/
|
||||
if (!of_property_read_string(np, "brcm,board-type", &prop))
|
||||
err = of_property_read_string(np, "brcm,board-type", &prop);
|
||||
if (!err)
|
||||
settings->board_type = prop;
|
||||
|
||||
if (!of_property_read_string(np, "apple,antenna-sku", &prop))
|
||||
@ -87,7 +88,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
|
||||
|
||||
/* Set board-type to the first string of the machine compatible prop */
|
||||
root = of_find_node_by_path("/");
|
||||
if (root && !settings->board_type) {
|
||||
if (root && err) {
|
||||
char *board_type;
|
||||
const char *tmp;
|
||||
|
||||
|
@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
|
||||
return usb_submit_urb(phy->ack_urb, flags);
|
||||
}
|
||||
|
||||
struct pn533_out_arg {
|
||||
struct pn533_usb_phy *phy;
|
||||
struct completion done;
|
||||
};
|
||||
|
||||
static int pn533_usb_send_frame(struct pn533 *dev,
|
||||
struct sk_buff *out)
|
||||
{
|
||||
struct pn533_usb_phy *phy = dev->phy;
|
||||
struct pn533_out_arg arg;
|
||||
void *cntx;
|
||||
int rc;
|
||||
|
||||
if (phy->priv == NULL)
|
||||
@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev,
|
||||
print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
|
||||
out->data, out->len, false);
|
||||
|
||||
init_completion(&arg.done);
|
||||
cntx = phy->out_urb->context;
|
||||
phy->out_urb->context = &arg;
|
||||
|
||||
rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
wait_for_completion(&arg.done);
|
||||
phy->out_urb->context = cntx;
|
||||
|
||||
if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
|
||||
/* request for response for sent packet directly */
|
||||
rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
|
||||
@ -408,7 +422,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
|
||||
return arg.rc;
|
||||
}
|
||||
|
||||
static void pn533_send_complete(struct urb *urb)
|
||||
static void pn533_out_complete(struct urb *urb)
|
||||
{
|
||||
struct pn533_out_arg *arg = urb->context;
|
||||
struct pn533_usb_phy *phy = arg->phy;
|
||||
|
||||
switch (urb->status) {
|
||||
case 0:
|
||||
break; /* success */
|
||||
case -ECONNRESET:
|
||||
case -ENOENT:
|
||||
dev_dbg(&phy->udev->dev,
|
||||
"The urb has been stopped (status %d)\n",
|
||||
urb->status);
|
||||
break;
|
||||
case -ESHUTDOWN:
|
||||
default:
|
||||
nfc_err(&phy->udev->dev,
|
||||
"Urb failure (status %d)\n",
|
||||
urb->status);
|
||||
}
|
||||
|
||||
complete(&arg->done);
|
||||
}
|
||||
|
||||
static void pn533_ack_complete(struct urb *urb)
|
||||
{
|
||||
struct pn533_usb_phy *phy = urb->context;
|
||||
|
||||
@ -496,10 +534,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
|
||||
|
||||
usb_fill_bulk_urb(phy->out_urb, phy->udev,
|
||||
usb_sndbulkpipe(phy->udev, out_endpoint),
|
||||
NULL, 0, pn533_send_complete, phy);
|
||||
NULL, 0, pn533_out_complete, phy);
|
||||
usb_fill_bulk_urb(phy->ack_urb, phy->udev,
|
||||
usb_sndbulkpipe(phy->udev, out_endpoint),
|
||||
NULL, 0, pn533_send_complete, phy);
|
||||
NULL, 0, pn533_ack_complete, phy);
|
||||
|
||||
switch (id->driver_info) {
|
||||
case PN533_DEVICE_STD:
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include "internal.h"
|
||||
#include "afs_cm.h"
|
||||
#include "protocol_yfs.h"
|
||||
#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
|
||||
#include <trace/events/rxrpc.h>
|
||||
|
||||
static int afs_deliver_cb_init_call_back_state(struct afs_call *);
|
||||
static int afs_deliver_cb_init_call_back_state3(struct afs_call *);
|
||||
@ -191,7 +193,7 @@ static void afs_cm_destructor(struct afs_call *call)
|
||||
* Abort a service call from within an action function.
|
||||
*/
|
||||
static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
|
||||
const char *why)
|
||||
enum rxrpc_abort_reason why)
|
||||
{
|
||||
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
abort_code, error, why);
|
||||
@ -469,7 +471,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
|
||||
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
|
||||
afs_send_empty_reply(call);
|
||||
else
|
||||
afs_abort_service_call(call, 1, 1, "K-1");
|
||||
afs_abort_service_call(call, 1, 1, afs_abort_probeuuid_negative);
|
||||
|
||||
afs_put_call(call);
|
||||
_leave("");
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include "internal.h"
|
||||
#include "afs_cm.h"
|
||||
#include "protocol_yfs.h"
|
||||
#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
|
||||
#include <trace/events/rxrpc.h>
|
||||
|
||||
struct workqueue_struct *afs_async_calls;
|
||||
|
||||
@ -397,7 +399,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
|
||||
error_do_abort:
|
||||
if (ret != -ECONNABORTED) {
|
||||
rxrpc_kernel_abort_call(call->net->socket, rxcall,
|
||||
RX_USER_ABORT, ret, "KSD");
|
||||
RX_USER_ABORT, ret,
|
||||
afs_abort_send_data_error);
|
||||
} else {
|
||||
len = 0;
|
||||
iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
|
||||
@ -527,7 +530,8 @@ static void afs_deliver_to_call(struct afs_call *call)
|
||||
case -ENOTSUPP:
|
||||
abort_code = RXGEN_OPCODE;
|
||||
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
abort_code, ret, "KIV");
|
||||
abort_code, ret,
|
||||
afs_abort_op_not_supported);
|
||||
goto local_abort;
|
||||
case -EIO:
|
||||
pr_err("kAFS: Call %u in bad state %u\n",
|
||||
@ -542,12 +546,14 @@ static void afs_deliver_to_call(struct afs_call *call)
|
||||
if (state != AFS_CALL_CL_AWAIT_REPLY)
|
||||
abort_code = RXGEN_SS_UNMARSHAL;
|
||||
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
abort_code, ret, "KUM");
|
||||
abort_code, ret,
|
||||
afs_abort_unmarshal_error);
|
||||
goto local_abort;
|
||||
default:
|
||||
abort_code = RX_CALL_DEAD;
|
||||
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
abort_code, ret, "KER");
|
||||
abort_code, ret,
|
||||
afs_abort_general_error);
|
||||
goto local_abort;
|
||||
}
|
||||
}
|
||||
@ -619,7 +625,8 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
|
||||
/* Kill off the call if it's still live. */
|
||||
_debug("call interrupted");
|
||||
if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
RX_USER_ABORT, -EINTR, "KWI"))
|
||||
RX_USER_ABORT, -EINTR,
|
||||
afs_abort_interrupted))
|
||||
afs_set_call_complete(call, -EINTR, 0);
|
||||
}
|
||||
}
|
||||
@ -836,7 +843,8 @@ void afs_send_empty_reply(struct afs_call *call)
|
||||
case -ENOMEM:
|
||||
_debug("oom");
|
||||
rxrpc_kernel_abort_call(net->socket, call->rxcall,
|
||||
RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
|
||||
RXGEN_SS_MARSHAL, -ENOMEM,
|
||||
afs_abort_oom);
|
||||
fallthrough;
|
||||
default:
|
||||
_leave(" [error]");
|
||||
@ -878,7 +886,8 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
|
||||
if (n == -ENOMEM) {
|
||||
_debug("oom");
|
||||
rxrpc_kernel_abort_call(net->socket, call->rxcall,
|
||||
RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
|
||||
RXGEN_SS_MARSHAL, -ENOMEM,
|
||||
afs_abort_oom);
|
||||
}
|
||||
_leave(" [error]");
|
||||
}
|
||||
@ -900,6 +909,7 @@ int afs_extract_data(struct afs_call *call, bool want_more)
|
||||
ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
|
||||
&call->iov_len, want_more, &remote_abort,
|
||||
&call->service_id);
|
||||
trace_afs_receive_data(call, call->iter, want_more, ret);
|
||||
if (ret == 0 || ret == -EAGAIN)
|
||||
return ret;
|
||||
|
||||
|
@ -315,7 +315,7 @@ struct mlx5_cmd {
|
||||
struct mlx5_cmd_debug dbg;
|
||||
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
|
||||
int checksum_disabled;
|
||||
struct mlx5_cmd_stats *stats;
|
||||
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
|
||||
};
|
||||
|
||||
struct mlx5_cmd_mailbox {
|
||||
|
@ -15,6 +15,7 @@ struct key;
|
||||
struct sock;
|
||||
struct socket;
|
||||
struct rxrpc_call;
|
||||
enum rxrpc_abort_reason;
|
||||
|
||||
enum rxrpc_interruptibility {
|
||||
RXRPC_INTERRUPTIBLE, /* Call is interruptible */
|
||||
@ -55,7 +56,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
|
||||
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
|
||||
struct iov_iter *, size_t *, bool, u32 *, u16 *);
|
||||
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
|
||||
u32, int, const char *);
|
||||
u32, int, enum rxrpc_abort_reason);
|
||||
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
|
||||
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
|
||||
struct sockaddr_rxrpc *);
|
||||
|
@ -16,7 +16,107 @@
|
||||
/*
|
||||
* Declare tracing information enums and their string mappings for display.
|
||||
*/
|
||||
#define rxrpc_abort_reasons \
|
||||
/* AFS errors */ \
|
||||
EM(afs_abort_general_error, "afs-error") \
|
||||
EM(afs_abort_interrupted, "afs-intr") \
|
||||
EM(afs_abort_oom, "afs-oom") \
|
||||
EM(afs_abort_op_not_supported, "afs-op-notsupp") \
|
||||
EM(afs_abort_probeuuid_negative, "afs-probeuuid-neg") \
|
||||
EM(afs_abort_send_data_error, "afs-send-data") \
|
||||
EM(afs_abort_unmarshal_error, "afs-unmarshal") \
|
||||
/* rxperf errors */ \
|
||||
EM(rxperf_abort_general_error, "rxperf-error") \
|
||||
EM(rxperf_abort_oom, "rxperf-oom") \
|
||||
EM(rxperf_abort_op_not_supported, "rxperf-op-notsupp") \
|
||||
EM(rxperf_abort_unmarshal_error, "rxperf-unmarshal") \
|
||||
/* RxKAD security errors */ \
|
||||
EM(rxkad_abort_1_short_check, "rxkad1-short-check") \
|
||||
EM(rxkad_abort_1_short_data, "rxkad1-short-data") \
|
||||
EM(rxkad_abort_1_short_encdata, "rxkad1-short-encdata") \
|
||||
EM(rxkad_abort_1_short_header, "rxkad1-short-hdr") \
|
||||
EM(rxkad_abort_2_short_check, "rxkad2-short-check") \
|
||||
EM(rxkad_abort_2_short_data, "rxkad2-short-data") \
|
||||
EM(rxkad_abort_2_short_header, "rxkad2-short-hdr") \
|
||||
EM(rxkad_abort_2_short_len, "rxkad2-short-len") \
|
||||
EM(rxkad_abort_bad_checksum, "rxkad2-bad-cksum") \
|
||||
EM(rxkad_abort_chall_key_expired, "rxkad-chall-key-exp") \
|
||||
EM(rxkad_abort_chall_level, "rxkad-chall-level") \
|
||||
EM(rxkad_abort_chall_no_key, "rxkad-chall-nokey") \
|
||||
EM(rxkad_abort_chall_short, "rxkad-chall-short") \
|
||||
EM(rxkad_abort_chall_version, "rxkad-chall-version") \
|
||||
EM(rxkad_abort_resp_bad_callid, "rxkad-resp-bad-callid") \
|
||||
EM(rxkad_abort_resp_bad_checksum, "rxkad-resp-bad-cksum") \
|
||||
EM(rxkad_abort_resp_bad_param, "rxkad-resp-bad-param") \
|
||||
EM(rxkad_abort_resp_call_ctr, "rxkad-resp-call-ctr") \
|
||||
EM(rxkad_abort_resp_call_state, "rxkad-resp-call-state") \
|
||||
EM(rxkad_abort_resp_key_expired, "rxkad-resp-key-exp") \
|
||||
EM(rxkad_abort_resp_key_rejected, "rxkad-resp-key-rej") \
|
||||
EM(rxkad_abort_resp_level, "rxkad-resp-level") \
|
||||
EM(rxkad_abort_resp_nokey, "rxkad-resp-nokey") \
|
||||
EM(rxkad_abort_resp_ooseq, "rxkad-resp-ooseq") \
|
||||
EM(rxkad_abort_resp_short, "rxkad-resp-short") \
|
||||
EM(rxkad_abort_resp_short_tkt, "rxkad-resp-short-tkt") \
|
||||
EM(rxkad_abort_resp_tkt_aname, "rxkad-resp-tk-aname") \
|
||||
EM(rxkad_abort_resp_tkt_expired, "rxkad-resp-tk-exp") \
|
||||
EM(rxkad_abort_resp_tkt_future, "rxkad-resp-tk-future") \
|
||||
EM(rxkad_abort_resp_tkt_inst, "rxkad-resp-tk-inst") \
|
||||
EM(rxkad_abort_resp_tkt_len, "rxkad-resp-tk-len") \
|
||||
EM(rxkad_abort_resp_tkt_realm, "rxkad-resp-tk-realm") \
|
||||
EM(rxkad_abort_resp_tkt_short, "rxkad-resp-tk-short") \
|
||||
EM(rxkad_abort_resp_tkt_sinst, "rxkad-resp-tk-sinst") \
|
||||
EM(rxkad_abort_resp_tkt_sname, "rxkad-resp-tk-sname") \
|
||||
EM(rxkad_abort_resp_unknown_tkt, "rxkad-resp-unknown-tkt") \
|
||||
EM(rxkad_abort_resp_version, "rxkad-resp-version") \
|
||||
/* rxrpc errors */ \
|
||||
EM(rxrpc_abort_call_improper_term, "call-improper-term") \
|
||||
EM(rxrpc_abort_call_reset, "call-reset") \
|
||||
EM(rxrpc_abort_call_sendmsg, "call-sendmsg") \
|
||||
EM(rxrpc_abort_call_sock_release, "call-sock-rel") \
|
||||
EM(rxrpc_abort_call_sock_release_tba, "call-sock-rel-tba") \
|
||||
EM(rxrpc_abort_call_timeout, "call-timeout") \
|
||||
EM(rxrpc_abort_no_service_key, "no-serv-key") \
|
||||
EM(rxrpc_abort_nomem, "nomem") \
|
||||
EM(rxrpc_abort_service_not_offered, "serv-not-offered") \
|
||||
EM(rxrpc_abort_shut_down, "shut-down") \
|
||||
EM(rxrpc_abort_unsupported_security, "unsup-sec") \
|
||||
EM(rxrpc_badmsg_bad_abort, "bad-abort") \
|
||||
EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \
|
||||
EM(rxrpc_badmsg_short_ack, "short-ack") \
|
||||
EM(rxrpc_badmsg_short_ack_info, "short-ack-info") \
|
||||
EM(rxrpc_badmsg_short_hdr, "short-hdr") \
|
||||
EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \
|
||||
EM(rxrpc_badmsg_zero_call, "zero-call") \
|
||||
EM(rxrpc_badmsg_zero_seq, "zero-seq") \
|
||||
EM(rxrpc_badmsg_zero_service, "zero-service") \
|
||||
EM(rxrpc_eproto_ackr_outside_window, "ackr-out-win") \
|
||||
EM(rxrpc_eproto_ackr_sack_overflow, "ackr-sack-over") \
|
||||
EM(rxrpc_eproto_ackr_short_sack, "ackr-short-sack") \
|
||||
EM(rxrpc_eproto_ackr_zero, "ackr-zero") \
|
||||
EM(rxrpc_eproto_bad_upgrade, "bad-upgrade") \
|
||||
EM(rxrpc_eproto_data_after_last, "data-after-last") \
|
||||
EM(rxrpc_eproto_different_last, "diff-last") \
|
||||
EM(rxrpc_eproto_early_reply, "early-reply") \
|
||||
EM(rxrpc_eproto_improper_term, "improper-term") \
|
||||
EM(rxrpc_eproto_no_client_call, "no-cl-call") \
|
||||
EM(rxrpc_eproto_no_client_conn, "no-cl-conn") \
|
||||
EM(rxrpc_eproto_no_service_call, "no-sv-call") \
|
||||
EM(rxrpc_eproto_reupgrade, "re-upgrade") \
|
||||
EM(rxrpc_eproto_rxnull_challenge, "rxnull-chall") \
|
||||
EM(rxrpc_eproto_rxnull_response, "rxnull-resp") \
|
||||
EM(rxrpc_eproto_tx_rot_last, "tx-rot-last") \
|
||||
EM(rxrpc_eproto_unexpected_ack, "unex-ack") \
|
||||
EM(rxrpc_eproto_unexpected_ackall, "unex-ackall") \
|
||||
EM(rxrpc_eproto_unexpected_implicit_end, "unex-impl-end") \
|
||||
EM(rxrpc_eproto_unexpected_reply, "unex-reply") \
|
||||
EM(rxrpc_eproto_wrong_security, "wrong-sec") \
|
||||
EM(rxrpc_recvmsg_excess_data, "recvmsg-excess") \
|
||||
EM(rxrpc_recvmsg_short_data, "recvmsg-short") \
|
||||
E_(rxrpc_sendmsg_late_send, "sendmsg-late")
|
||||
|
||||
#define rxrpc_call_poke_traces \
|
||||
EM(rxrpc_call_poke_abort, "Abort") \
|
||||
EM(rxrpc_call_poke_complete, "Compl") \
|
||||
EM(rxrpc_call_poke_error, "Error") \
|
||||
EM(rxrpc_call_poke_idle, "Idle") \
|
||||
EM(rxrpc_call_poke_start, "Start") \
|
||||
@ -26,6 +126,7 @@
|
||||
#define rxrpc_skb_traces \
|
||||
EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
|
||||
EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
|
||||
EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
|
||||
EM(rxrpc_skb_get_conn_work, "GET conn-work") \
|
||||
EM(rxrpc_skb_get_local_work, "GET locl-work") \
|
||||
EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
|
||||
@ -35,6 +136,7 @@
|
||||
EM(rxrpc_skb_new_error_report, "NEW error-rpt") \
|
||||
EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \
|
||||
EM(rxrpc_skb_new_unshared, "NEW unshared ") \
|
||||
EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \
|
||||
EM(rxrpc_skb_put_conn_work, "PUT conn-work") \
|
||||
EM(rxrpc_skb_put_error_report, "PUT error-rep") \
|
||||
EM(rxrpc_skb_put_input, "PUT input ") \
|
||||
@ -76,7 +178,6 @@
|
||||
#define rxrpc_peer_traces \
|
||||
EM(rxrpc_peer_free, "FREE ") \
|
||||
EM(rxrpc_peer_get_accept, "GET accept ") \
|
||||
EM(rxrpc_peer_get_activate_call, "GET act-call") \
|
||||
EM(rxrpc_peer_get_bundle, "GET bundle ") \
|
||||
EM(rxrpc_peer_get_client_conn, "GET cln-conn") \
|
||||
EM(rxrpc_peer_get_input, "GET input ") \
|
||||
@ -89,7 +190,6 @@
|
||||
EM(rxrpc_peer_put_bundle, "PUT bundle ") \
|
||||
EM(rxrpc_peer_put_call, "PUT call ") \
|
||||
EM(rxrpc_peer_put_conn, "PUT conn ") \
|
||||
EM(rxrpc_peer_put_discard_tmp, "PUT disc-tmp") \
|
||||
EM(rxrpc_peer_put_input, "PUT input ") \
|
||||
EM(rxrpc_peer_put_input_error, "PUT inpt-err") \
|
||||
E_(rxrpc_peer_put_keepalive, "PUT keepaliv")
|
||||
@ -99,6 +199,7 @@
|
||||
EM(rxrpc_bundle_get_client_call, "GET clt-call") \
|
||||
EM(rxrpc_bundle_get_client_conn, "GET clt-conn") \
|
||||
EM(rxrpc_bundle_get_service_conn, "GET svc-conn") \
|
||||
EM(rxrpc_bundle_put_call, "PUT call ") \
|
||||
EM(rxrpc_bundle_put_conn, "PUT conn ") \
|
||||
EM(rxrpc_bundle_put_discard, "PUT discard ") \
|
||||
E_(rxrpc_bundle_new, "NEW ")
|
||||
@ -109,14 +210,14 @@
|
||||
EM(rxrpc_conn_get_call_input, "GET inp-call") \
|
||||
EM(rxrpc_conn_get_conn_input, "GET inp-conn") \
|
||||
EM(rxrpc_conn_get_idle, "GET idle ") \
|
||||
EM(rxrpc_conn_get_poke, "GET poke ") \
|
||||
EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \
|
||||
EM(rxrpc_conn_get_poke_timer, "GET poke ") \
|
||||
EM(rxrpc_conn_get_service_conn, "GET svc-conn") \
|
||||
EM(rxrpc_conn_new_client, "NEW client ") \
|
||||
EM(rxrpc_conn_new_service, "NEW service ") \
|
||||
EM(rxrpc_conn_put_call, "PUT call ") \
|
||||
EM(rxrpc_conn_put_call_input, "PUT inp-call") \
|
||||
EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \
|
||||
EM(rxrpc_conn_put_discard, "PUT discard ") \
|
||||
EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \
|
||||
EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \
|
||||
EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \
|
||||
@ -124,10 +225,10 @@
|
||||
EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \
|
||||
EM(rxrpc_conn_put_unbundle, "PUT unbundle") \
|
||||
EM(rxrpc_conn_put_unidle, "PUT unidle ") \
|
||||
EM(rxrpc_conn_put_work, "PUT work ") \
|
||||
EM(rxrpc_conn_queue_challenge, "QUE chall ") \
|
||||
EM(rxrpc_conn_queue_retry_work, "QUE retry-wk") \
|
||||
EM(rxrpc_conn_queue_rx_work, "QUE rx-work ") \
|
||||
EM(rxrpc_conn_queue_timer, "QUE timer ") \
|
||||
EM(rxrpc_conn_see_new_service_conn, "SEE new-svc ") \
|
||||
EM(rxrpc_conn_see_reap_service, "SEE reap-svc") \
|
||||
E_(rxrpc_conn_see_work, "SEE work ")
|
||||
@ -138,16 +239,16 @@
|
||||
EM(rxrpc_client_chan_activate, "ChActv") \
|
||||
EM(rxrpc_client_chan_disconnect, "ChDisc") \
|
||||
EM(rxrpc_client_chan_pass, "ChPass") \
|
||||
EM(rxrpc_client_chan_wait_failed, "ChWtFl") \
|
||||
EM(rxrpc_client_cleanup, "Clean ") \
|
||||
EM(rxrpc_client_discard, "Discar") \
|
||||
EM(rxrpc_client_duplicate, "Duplic") \
|
||||
EM(rxrpc_client_exposed, "Expose") \
|
||||
EM(rxrpc_client_replace, "Replac") \
|
||||
EM(rxrpc_client_queue_new_call, "Q-Call") \
|
||||
EM(rxrpc_client_to_active, "->Actv") \
|
||||
E_(rxrpc_client_to_idle, "->Idle")
|
||||
|
||||
#define rxrpc_call_traces \
|
||||
EM(rxrpc_call_get_io_thread, "GET iothread") \
|
||||
EM(rxrpc_call_get_input, "GET input ") \
|
||||
EM(rxrpc_call_get_kernel_service, "GET krnl-srv") \
|
||||
EM(rxrpc_call_get_notify_socket, "GET notify ") \
|
||||
@ -160,6 +261,7 @@
|
||||
EM(rxrpc_call_new_prealloc_service, "NEW prealloc") \
|
||||
EM(rxrpc_call_put_discard_prealloc, "PUT disc-pre") \
|
||||
EM(rxrpc_call_put_discard_error, "PUT disc-err") \
|
||||
EM(rxrpc_call_put_io_thread, "PUT iothread") \
|
||||
EM(rxrpc_call_put_input, "PUT input ") \
|
||||
EM(rxrpc_call_put_kernel, "PUT kernel ") \
|
||||
EM(rxrpc_call_put_poke, "PUT poke ") \
|
||||
@ -169,10 +271,12 @@
|
||||
EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \
|
||||
EM(rxrpc_call_put_unnotify, "PUT unnotify") \
|
||||
EM(rxrpc_call_put_userid_exists, "PUT u-exists") \
|
||||
EM(rxrpc_call_put_userid, "PUT user-id ") \
|
||||
EM(rxrpc_call_see_accept, "SEE accept ") \
|
||||
EM(rxrpc_call_see_activate_client, "SEE act-clnt") \
|
||||
EM(rxrpc_call_see_connect_failed, "SEE con-fail") \
|
||||
EM(rxrpc_call_see_connected, "SEE connect ") \
|
||||
EM(rxrpc_call_see_disconnected, "SEE disconn ") \
|
||||
EM(rxrpc_call_see_distribute_error, "SEE dist-err") \
|
||||
EM(rxrpc_call_see_input, "SEE input ") \
|
||||
EM(rxrpc_call_see_release, "SEE release ") \
|
||||
@ -376,6 +480,7 @@
|
||||
#define EM(a, b) a,
|
||||
#define E_(a, b) a
|
||||
|
||||
enum rxrpc_abort_reason { rxrpc_abort_reasons } __mode(byte);
|
||||
enum rxrpc_bundle_trace { rxrpc_bundle_traces } __mode(byte);
|
||||
enum rxrpc_call_poke_trace { rxrpc_call_poke_traces } __mode(byte);
|
||||
enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte);
|
||||
@ -404,9 +509,13 @@ enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte);
|
||||
*/
|
||||
#undef EM
|
||||
#undef E_
|
||||
|
||||
#ifndef RXRPC_TRACE_ONLY_DEFINE_ENUMS
|
||||
|
||||
#define EM(a, b) TRACE_DEFINE_ENUM(a);
|
||||
#define E_(a, b) TRACE_DEFINE_ENUM(a);
|
||||
|
||||
rxrpc_abort_reasons;
|
||||
rxrpc_bundle_traces;
|
||||
rxrpc_call_poke_traces;
|
||||
rxrpc_call_traces;
|
||||
@ -657,14 +766,14 @@ TRACE_EVENT(rxrpc_rx_done,
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_abort,
|
||||
TP_PROTO(unsigned int call_nr, const char *why, u32 cid, u32 call_id,
|
||||
rxrpc_seq_t seq, int abort_code, int error),
|
||||
TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why,
|
||||
u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error),
|
||||
|
||||
TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, call_nr )
|
||||
__array(char, why, 4 )
|
||||
__field(enum rxrpc_abort_reason, why )
|
||||
__field(u32, cid )
|
||||
__field(u32, call_id )
|
||||
__field(rxrpc_seq_t, seq )
|
||||
@ -673,8 +782,8 @@ TRACE_EVENT(rxrpc_abort,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->why, why, 4);
|
||||
__entry->call_nr = call_nr;
|
||||
__entry->why = why;
|
||||
__entry->cid = cid;
|
||||
__entry->call_id = call_id;
|
||||
__entry->abort_code = abort_code;
|
||||
@ -685,7 +794,8 @@ TRACE_EVENT(rxrpc_abort,
|
||||
TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s",
|
||||
__entry->call_nr,
|
||||
__entry->cid, __entry->call_id, __entry->seq,
|
||||
__entry->abort_code, __entry->error, __entry->why)
|
||||
__entry->abort_code, __entry->error,
|
||||
__print_symbolic(__entry->why, rxrpc_abort_reasons))
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_call_complete,
|
||||
@ -1521,30 +1631,6 @@ TRACE_EVENT(rxrpc_improper_term,
|
||||
__entry->abort_code)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_rx_eproto,
|
||||
TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
|
||||
const char *why),
|
||||
|
||||
TP_ARGS(call, serial, why),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, call )
|
||||
__field(rxrpc_serial_t, serial )
|
||||
__field(const char *, why )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->call = call ? call->debug_id : 0;
|
||||
__entry->serial = serial;
|
||||
__entry->why = why;
|
||||
),
|
||||
|
||||
TP_printk("c=%08x EPROTO %08x %s",
|
||||
__entry->call,
|
||||
__entry->serial,
|
||||
__entry->why)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_connect_call,
|
||||
TP_PROTO(struct rxrpc_call *call),
|
||||
|
||||
@ -1842,6 +1928,8 @@ TRACE_EVENT(rxrpc_call_poked,
|
||||
|
||||
#undef EM
|
||||
#undef E_
|
||||
|
||||
#endif /* RXRPC_TRACE_ONLY_DEFINE_ENUMS */
|
||||
#endif /* _TRACE_RXRPC_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@ -505,8 +505,9 @@ found_ptype:
|
||||
NAPI_GRO_CB(skb)->count = 1;
|
||||
if (unlikely(skb_is_gso(skb))) {
|
||||
NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
|
||||
/* Only support TCP at the moment. */
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
/* Only support TCP and non DODGY users. */
|
||||
if (!skb_is_gso_tcp(skb) ||
|
||||
(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
|
||||
NAPI_GRO_CB(skb)->flush = 1;
|
||||
}
|
||||
|
||||
|
@ -505,6 +505,7 @@ csum_copy_err:
|
||||
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
|
||||
struct raw6_sock *rp)
|
||||
{
|
||||
struct ipv6_txoptions *opt;
|
||||
struct sk_buff *skb;
|
||||
int err = 0;
|
||||
int offset;
|
||||
@ -522,6 +523,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
|
||||
|
||||
offset = rp->offset;
|
||||
total_len = inet_sk(sk)->cork.base.length;
|
||||
opt = inet6_sk(sk)->cork.opt;
|
||||
total_len -= opt ? opt->opt_flen : 0;
|
||||
|
||||
if (offset >= total_len - 1) {
|
||||
err = -EINVAL;
|
||||
ip6_flush_pending_frames(sk);
|
||||
|
@ -10,6 +10,7 @@ rxrpc-y := \
|
||||
call_accept.o \
|
||||
call_event.o \
|
||||
call_object.o \
|
||||
call_state.o \
|
||||
conn_client.o \
|
||||
conn_event.o \
|
||||
conn_object.o \
|
||||
|
@ -155,10 +155,10 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
|
||||
|
||||
if (service_id) {
|
||||
write_lock(&local->services_lock);
|
||||
if (rcu_access_pointer(local->service))
|
||||
if (local->service)
|
||||
goto service_in_use;
|
||||
rx->local = local;
|
||||
rcu_assign_pointer(local->service, rx);
|
||||
local->service = rx;
|
||||
write_unlock(&local->services_lock);
|
||||
|
||||
rx->sk.sk_state = RXRPC_SERVER_BOUND;
|
||||
@ -328,7 +328,6 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
||||
mutex_unlock(&call->user_mutex);
|
||||
}
|
||||
|
||||
rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
|
||||
_leave(" = %p", call);
|
||||
return call;
|
||||
}
|
||||
@ -374,13 +373,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
|
||||
* @sock: The socket the call is on
|
||||
* @call: The call to check
|
||||
*
|
||||
* Allow a kernel service to find out whether a call is still alive -
|
||||
* ie. whether it has completed.
|
||||
* Allow a kernel service to find out whether a call is still alive - whether
|
||||
* it has completed successfully and all received data has been consumed.
|
||||
*/
|
||||
bool rxrpc_kernel_check_life(const struct socket *sock,
|
||||
const struct rxrpc_call *call)
|
||||
{
|
||||
return call->state != RXRPC_CALL_COMPLETE;
|
||||
if (!rxrpc_call_is_complete(call))
|
||||
return true;
|
||||
if (call->completion != RXRPC_CALL_SUCCEEDED)
|
||||
return false;
|
||||
return !skb_queue_empty(&call->recvmsg_queue);
|
||||
}
|
||||
EXPORT_SYMBOL(rxrpc_kernel_check_life);
|
||||
|
||||
@ -872,9 +875,9 @@ static int rxrpc_release_sock(struct sock *sk)
|
||||
|
||||
sk->sk_state = RXRPC_CLOSE;
|
||||
|
||||
if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
|
||||
if (rx->local && rx->local->service == rx) {
|
||||
write_lock(&rx->local->services_lock);
|
||||
rcu_assign_pointer(rx->local->service, NULL);
|
||||
rx->local->service = NULL;
|
||||
write_unlock(&rx->local->services_lock);
|
||||
}
|
||||
|
||||
@ -957,16 +960,9 @@ static const struct net_proto_family rxrpc_family_ops = {
|
||||
static int __init af_rxrpc_init(void)
|
||||
{
|
||||
int ret = -1;
|
||||
unsigned int tmp;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
|
||||
|
||||
get_random_bytes(&tmp, sizeof(tmp));
|
||||
tmp &= 0x3fffffff;
|
||||
if (tmp == 0)
|
||||
tmp = 1;
|
||||
idr_set_cursor(&rxrpc_client_conn_ids, tmp);
|
||||
|
||||
ret = -ENOMEM;
|
||||
rxrpc_call_jar = kmem_cache_create(
|
||||
"rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
|
||||
@ -1062,7 +1058,6 @@ static void __exit af_rxrpc_exit(void)
|
||||
* are released.
|
||||
*/
|
||||
rcu_barrier();
|
||||
rxrpc_destroy_client_conn_ids();
|
||||
|
||||
destroy_workqueue(rxrpc_workqueue);
|
||||
rxrpc_exit_security();
|
||||
|
@ -38,6 +38,7 @@ struct rxrpc_txbuf;
|
||||
enum rxrpc_skb_mark {
|
||||
RXRPC_SKB_MARK_PACKET, /* Received packet */
|
||||
RXRPC_SKB_MARK_ERROR, /* Error notification */
|
||||
RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */
|
||||
RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
|
||||
RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
|
||||
};
|
||||
@ -75,13 +76,7 @@ struct rxrpc_net {
|
||||
|
||||
bool live;
|
||||
|
||||
bool kill_all_client_conns;
|
||||
atomic_t nr_client_conns;
|
||||
spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
|
||||
struct mutex client_conn_discard_lock; /* Prevent multiple discarders */
|
||||
struct list_head idle_client_conns;
|
||||
struct work_struct client_conn_reaper;
|
||||
struct timer_list client_conn_reap_timer;
|
||||
|
||||
struct hlist_head local_endpoints;
|
||||
struct mutex local_mutex; /* Lock for ->local_endpoints */
|
||||
@ -202,6 +197,7 @@ struct rxrpc_host_header {
|
||||
* - max 48 bytes (struct sk_buff::cb)
|
||||
*/
|
||||
struct rxrpc_skb_priv {
|
||||
struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
|
||||
u16 offset; /* Offset of data */
|
||||
u16 len; /* Length of data */
|
||||
u8 flags;
|
||||
@ -262,13 +258,11 @@ struct rxrpc_security {
|
||||
|
||||
/* respond to a challenge */
|
||||
int (*respond_to_challenge)(struct rxrpc_connection *,
|
||||
struct sk_buff *,
|
||||
u32 *);
|
||||
struct sk_buff *);
|
||||
|
||||
/* verify a response */
|
||||
int (*verify_response)(struct rxrpc_connection *,
|
||||
struct sk_buff *,
|
||||
u32 *);
|
||||
struct sk_buff *);
|
||||
|
||||
/* clear connection security */
|
||||
void (*clear)(struct rxrpc_connection *);
|
||||
@ -283,22 +277,34 @@ struct rxrpc_local {
|
||||
struct rcu_head rcu;
|
||||
atomic_t active_users; /* Number of users of the local endpoint */
|
||||
refcount_t ref; /* Number of references to the structure */
|
||||
struct rxrpc_net *rxnet; /* The network ns in which this resides */
|
||||
struct net *net; /* The network namespace */
|
||||
struct rxrpc_net *rxnet; /* Our bits in the network namespace */
|
||||
struct hlist_node link;
|
||||
struct socket *socket; /* my UDP socket */
|
||||
struct task_struct *io_thread;
|
||||
struct completion io_thread_ready; /* Indication that the I/O thread started */
|
||||
struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
|
||||
struct rxrpc_sock *service; /* Service(s) listening on this endpoint */
|
||||
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
|
||||
struct sk_buff_head rx_queue; /* Received packets */
|
||||
struct list_head conn_attend_q; /* Conns requiring immediate attention */
|
||||
struct list_head call_attend_q; /* Calls requiring immediate attention */
|
||||
|
||||
struct rb_root client_bundles; /* Client connection bundles by socket params */
|
||||
spinlock_t client_bundles_lock; /* Lock for client_bundles */
|
||||
bool kill_all_client_conns;
|
||||
struct list_head idle_client_conns;
|
||||
struct timer_list client_conn_reap_timer;
|
||||
unsigned long client_conn_flags;
|
||||
#define RXRPC_CLIENT_CONN_REAP_TIMER 0 /* The client conn reap timer expired */
|
||||
|
||||
spinlock_t lock; /* access lock */
|
||||
rwlock_t services_lock; /* lock for services list */
|
||||
int debug_id; /* debug ID for printks */
|
||||
bool dead;
|
||||
bool service_closed; /* Service socket closed */
|
||||
struct idr conn_ids; /* List of connection IDs */
|
||||
struct list_head new_client_calls; /* Newly created client calls need connection */
|
||||
spinlock_t client_call_lock; /* Lock for ->new_client_calls */
|
||||
struct sockaddr_rxrpc srx; /* local address */
|
||||
};
|
||||
|
||||
@ -356,7 +362,6 @@ struct rxrpc_conn_proto {
|
||||
|
||||
struct rxrpc_conn_parameters {
|
||||
struct rxrpc_local *local; /* Representation of local endpoint */
|
||||
struct rxrpc_peer *peer; /* Remote endpoint */
|
||||
struct key *key; /* Security details */
|
||||
bool exclusive; /* T if conn is exclusive */
|
||||
bool upgrade; /* T if service ID can be upgraded */
|
||||
@ -364,11 +369,22 @@ struct rxrpc_conn_parameters {
|
||||
u32 security_level; /* Security level selected */
|
||||
};
|
||||
|
||||
/*
|
||||
* Call completion condition (state == RXRPC_CALL_COMPLETE).
|
||||
*/
|
||||
enum rxrpc_call_completion {
|
||||
RXRPC_CALL_SUCCEEDED, /* - Normal termination */
|
||||
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
|
||||
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
|
||||
RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
|
||||
RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
|
||||
NR__RXRPC_CALL_COMPLETIONS
|
||||
};
|
||||
|
||||
/*
|
||||
* Bits in the connection flags.
|
||||
*/
|
||||
enum rxrpc_conn_flag {
|
||||
RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
|
||||
RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
|
||||
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
|
||||
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
|
||||
@ -388,6 +404,7 @@ enum rxrpc_conn_flag {
|
||||
*/
|
||||
enum rxrpc_conn_event {
|
||||
RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
|
||||
RXRPC_CONN_EV_ABORT_CALLS, /* Abort attached calls */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -395,13 +412,13 @@ enum rxrpc_conn_event {
|
||||
*/
|
||||
enum rxrpc_conn_proto_state {
|
||||
RXRPC_CONN_UNUSED, /* Connection not yet attempted */
|
||||
RXRPC_CONN_CLIENT_UNSECURED, /* Client connection needs security init */
|
||||
RXRPC_CONN_CLIENT, /* Client connection */
|
||||
RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
|
||||
RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
|
||||
RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
|
||||
RXRPC_CONN_SERVICE, /* Service secured connection */
|
||||
RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
|
||||
RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
|
||||
RXRPC_CONN_ABORTED, /* Conn aborted */
|
||||
RXRPC_CONN__NR_STATES
|
||||
};
|
||||
|
||||
@ -412,17 +429,16 @@ struct rxrpc_bundle {
|
||||
struct rxrpc_local *local; /* Representation of local endpoint */
|
||||
struct rxrpc_peer *peer; /* Remote endpoint */
|
||||
struct key *key; /* Security details */
|
||||
const struct rxrpc_security *security; /* applied security module */
|
||||
refcount_t ref;
|
||||
atomic_t active; /* Number of active users */
|
||||
unsigned int debug_id;
|
||||
u32 security_level; /* Security level selected */
|
||||
u16 service_id; /* Service ID for this connection */
|
||||
bool try_upgrade; /* True if the bundle is attempting upgrade */
|
||||
bool alloc_conn; /* True if someone's getting a conn */
|
||||
bool exclusive; /* T if conn is exclusive */
|
||||
bool upgrade; /* T if service ID can be upgraded */
|
||||
short alloc_error; /* Error from last conn allocation */
|
||||
spinlock_t channel_lock;
|
||||
unsigned short alloc_error; /* Error from last conn allocation */
|
||||
struct rb_node local_node; /* Node in local->client_conns */
|
||||
struct list_head waiting_calls; /* Calls waiting for channels */
|
||||
unsigned long avail_chans; /* Mask of available channels */
|
||||
@ -440,6 +456,7 @@ struct rxrpc_connection {
|
||||
struct rxrpc_peer *peer; /* Remote endpoint */
|
||||
struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
|
||||
struct key *key; /* Security details */
|
||||
struct list_head attend_link; /* Link in local->conn_attend_q */
|
||||
|
||||
refcount_t ref;
|
||||
atomic_t active; /* Active count for service conns */
|
||||
@ -449,7 +466,7 @@ struct rxrpc_connection {
|
||||
unsigned char act_chans; /* Mask of active channels */
|
||||
struct rxrpc_channel {
|
||||
unsigned long final_ack_at; /* Time at which to issue final ACK */
|
||||
struct rxrpc_call __rcu *call; /* Active call */
|
||||
struct rxrpc_call *call; /* Active call */
|
||||
unsigned int call_debug_id; /* call->debug_id */
|
||||
u32 call_id; /* ID of current call */
|
||||
u32 call_counter; /* Call ID counter */
|
||||
@ -470,6 +487,7 @@ struct rxrpc_connection {
|
||||
struct list_head link; /* link in master connection list */
|
||||
struct sk_buff_head rx_queue; /* received conn-level packets */
|
||||
|
||||
struct mutex security_lock; /* Lock for security management */
|
||||
const struct rxrpc_security *security; /* applied security module */
|
||||
union {
|
||||
struct {
|
||||
@ -483,7 +501,8 @@ struct rxrpc_connection {
|
||||
unsigned long idle_timestamp; /* Time at which last became idle */
|
||||
spinlock_t state_lock; /* state-change lock */
|
||||
enum rxrpc_conn_proto_state state; /* current state of connection */
|
||||
u32 abort_code; /* Abort code of connection abort */
|
||||
enum rxrpc_call_completion completion; /* Completion condition */
|
||||
s32 abort_code; /* Abort code of connection abort */
|
||||
int debug_id; /* debug ID for printks */
|
||||
atomic_t serial; /* packet serial number counter */
|
||||
unsigned int hi_serial; /* highest serial number received */
|
||||
@ -527,7 +546,8 @@ enum rxrpc_call_flag {
|
||||
RXRPC_CALL_KERNEL, /* The call was made by the kernel */
|
||||
RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */
|
||||
RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */
|
||||
RXRPC_CALL_RX_IS_IDLE, /* Reception is idle - send an ACK */
|
||||
RXRPC_CALL_RX_IS_IDLE, /* recvmsg() is idle - send an ACK */
|
||||
RXRPC_CALL_RECVMSG_READ_ALL, /* recvmsg() read all of the received data */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -557,18 +577,6 @@ enum rxrpc_call_state {
|
||||
NR__RXRPC_CALL_STATES
|
||||
};
|
||||
|
||||
/*
|
||||
* Call completion condition (state == RXRPC_CALL_COMPLETE).
|
||||
*/
|
||||
enum rxrpc_call_completion {
|
||||
RXRPC_CALL_SUCCEEDED, /* - Normal termination */
|
||||
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
|
||||
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
|
||||
RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
|
||||
RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
|
||||
NR__RXRPC_CALL_COMPLETIONS
|
||||
};
|
||||
|
||||
/*
|
||||
* Call Tx congestion management modes.
|
||||
*/
|
||||
@ -587,6 +595,7 @@ enum rxrpc_congest_mode {
|
||||
struct rxrpc_call {
|
||||
struct rcu_head rcu;
|
||||
struct rxrpc_connection *conn; /* connection carrying call */
|
||||
struct rxrpc_bundle *bundle; /* Connection bundle to use */
|
||||
struct rxrpc_peer *peer; /* Peer record for remote address */
|
||||
struct rxrpc_local *local; /* Representation of local endpoint */
|
||||
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
||||
@ -609,7 +618,7 @@ struct rxrpc_call {
|
||||
struct work_struct destroyer; /* In-process-context destroyer */
|
||||
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
||||
struct list_head link; /* link in master call list */
|
||||
struct list_head chan_wait_link; /* Link in conn->bundle->waiting_calls */
|
||||
struct list_head wait_link; /* Link in local->new_client_calls */
|
||||
struct hlist_node error_link; /* link in error distribution list */
|
||||
struct list_head accept_link; /* Link in rx->acceptq */
|
||||
struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
|
||||
@ -623,10 +632,13 @@ struct rxrpc_call {
|
||||
unsigned long flags;
|
||||
unsigned long events;
|
||||
spinlock_t notify_lock; /* Kernel notification lock */
|
||||
rwlock_t state_lock; /* lock for state transition */
|
||||
u32 abort_code; /* Local/remote abort code */
|
||||
unsigned int send_abort_why; /* Why the abort [enum rxrpc_abort_reason] */
|
||||
s32 send_abort; /* Abort code to be sent */
|
||||
short send_abort_err; /* Error to be associated with the abort */
|
||||
rxrpc_seq_t send_abort_seq; /* DATA packet that incurred the abort (or 0) */
|
||||
s32 abort_code; /* Local/remote abort code */
|
||||
int error; /* Local error incurred */
|
||||
enum rxrpc_call_state state; /* current state of call */
|
||||
enum rxrpc_call_state _state; /* Current state of call (needs barrier) */
|
||||
enum rxrpc_call_completion completion; /* Call completion condition */
|
||||
refcount_t ref;
|
||||
u8 security_ix; /* Security type */
|
||||
@ -812,9 +824,11 @@ extern struct workqueue_struct *rxrpc_workqueue;
|
||||
*/
|
||||
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
|
||||
void rxrpc_discard_prealloc(struct rxrpc_sock *);
|
||||
int rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *,
|
||||
struct rxrpc_connection *, struct sockaddr_rxrpc *,
|
||||
struct sk_buff *);
|
||||
bool rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer,
|
||||
struct rxrpc_connection *conn,
|
||||
struct sockaddr_rxrpc *peer_srx,
|
||||
struct sk_buff *skb);
|
||||
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
|
||||
int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
|
||||
|
||||
@ -834,7 +848,7 @@ void rxrpc_reduce_call_timer(struct rxrpc_call *call,
|
||||
unsigned long now,
|
||||
enum rxrpc_timer_trace why);
|
||||
|
||||
void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
|
||||
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
|
||||
|
||||
/*
|
||||
* call_object.c
|
||||
@ -851,6 +865,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
|
||||
struct sockaddr_rxrpc *,
|
||||
struct rxrpc_call_params *, gfp_t,
|
||||
unsigned int);
|
||||
void rxrpc_start_call_timer(struct rxrpc_call *call);
|
||||
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
|
||||
struct sk_buff *);
|
||||
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
|
||||
@ -872,33 +887,89 @@ static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
|
||||
return !rxrpc_is_service_call(call);
|
||||
}
|
||||
|
||||
/*
|
||||
* call_state.c
|
||||
*/
|
||||
bool rxrpc_set_call_completion(struct rxrpc_call *call,
|
||||
enum rxrpc_call_completion compl,
|
||||
u32 abort_code,
|
||||
int error);
|
||||
bool rxrpc_call_completed(struct rxrpc_call *call);
|
||||
bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
|
||||
u32 abort_code, int error, enum rxrpc_abort_reason why);
|
||||
void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
|
||||
int error);
|
||||
|
||||
static inline void rxrpc_set_call_state(struct rxrpc_call *call,
|
||||
enum rxrpc_call_state state)
|
||||
{
|
||||
/* Order write of completion info before write of ->state. */
|
||||
smp_store_release(&call->_state, state);
|
||||
wake_up(&call->waitq);
|
||||
}
|
||||
|
||||
static inline enum rxrpc_call_state __rxrpc_call_state(const struct rxrpc_call *call)
|
||||
{
|
||||
return call->_state; /* Only inside I/O thread */
|
||||
}
|
||||
|
||||
static inline bool __rxrpc_call_is_complete(const struct rxrpc_call *call)
|
||||
{
|
||||
return __rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
|
||||
}
|
||||
|
||||
static inline enum rxrpc_call_state rxrpc_call_state(const struct rxrpc_call *call)
|
||||
{
|
||||
/* Order read ->state before read of completion info. */
|
||||
return smp_load_acquire(&call->_state);
|
||||
}
|
||||
|
||||
static inline bool rxrpc_call_is_complete(const struct rxrpc_call *call)
|
||||
{
|
||||
return rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
|
||||
}
|
||||
|
||||
static inline bool rxrpc_call_has_failed(const struct rxrpc_call *call)
|
||||
{
|
||||
return rxrpc_call_is_complete(call) && call->completion != RXRPC_CALL_SUCCEEDED;
|
||||
}
|
||||
|
||||
/*
|
||||
* conn_client.c
|
||||
*/
|
||||
extern unsigned int rxrpc_reap_client_connections;
|
||||
extern unsigned long rxrpc_conn_idle_client_expiry;
|
||||
extern unsigned long rxrpc_conn_idle_client_fast_expiry;
|
||||
extern struct idr rxrpc_client_conn_ids;
|
||||
|
||||
void rxrpc_destroy_client_conn_ids(void);
|
||||
void rxrpc_purge_client_connections(struct rxrpc_local *local);
|
||||
struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
|
||||
void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
|
||||
int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
|
||||
struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
|
||||
gfp_t);
|
||||
int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp);
|
||||
void rxrpc_connect_client_calls(struct rxrpc_local *local);
|
||||
void rxrpc_expose_client_call(struct rxrpc_call *);
|
||||
void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
|
||||
void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
|
||||
void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace);
|
||||
void rxrpc_discard_expired_client_conns(struct work_struct *);
|
||||
void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
|
||||
void rxrpc_discard_expired_client_conns(struct rxrpc_local *local);
|
||||
void rxrpc_clean_up_local_conns(struct rxrpc_local *);
|
||||
|
||||
/*
|
||||
* conn_event.c
|
||||
*/
|
||||
void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct sk_buff *skb,
|
||||
unsigned int channel);
|
||||
int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
|
||||
s32 abort_code, int err, enum rxrpc_abort_reason why);
|
||||
void rxrpc_process_connection(struct work_struct *);
|
||||
void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool);
|
||||
int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
|
||||
bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
|
||||
void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb);
|
||||
|
||||
static inline bool rxrpc_is_conn_aborted(const struct rxrpc_connection *conn)
|
||||
{
|
||||
/* Order reading the abort info after the state check. */
|
||||
return smp_load_acquire(&conn->state) == RXRPC_CONN_ABORTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* conn_object.c
|
||||
@ -906,6 +977,7 @@ int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
|
||||
extern unsigned int rxrpc_connection_expiry;
|
||||
extern unsigned int rxrpc_closed_conn_expiry;
|
||||
|
||||
void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why);
|
||||
struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t);
|
||||
struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *,
|
||||
struct sockaddr_rxrpc *,
|
||||
@ -961,12 +1033,19 @@ void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *);
|
||||
*/
|
||||
int rxrpc_encap_rcv(struct sock *, struct sk_buff *);
|
||||
void rxrpc_error_report(struct sock *);
|
||||
bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
|
||||
s32 abort_code, int err);
|
||||
int rxrpc_io_thread(void *data);
|
||||
static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
|
||||
{
|
||||
wake_up_process(local->io_thread);
|
||||
}
|
||||
|
||||
static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
|
||||
{
|
||||
return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EPROTO);
|
||||
}
|
||||
|
||||
/*
|
||||
* insecure.c
|
||||
*/
|
||||
@ -1048,6 +1127,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
|
||||
int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
|
||||
int rxrpc_send_abort_packet(struct rxrpc_call *);
|
||||
int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
|
||||
void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
|
||||
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
|
||||
void rxrpc_send_keepalive(struct rxrpc_peer *);
|
||||
void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
|
||||
@ -1063,12 +1143,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
|
||||
*/
|
||||
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
|
||||
const struct sockaddr_rxrpc *);
|
||||
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
|
||||
struct sockaddr_rxrpc *, gfp_t);
|
||||
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
|
||||
struct sockaddr_rxrpc *srx, gfp_t gfp);
|
||||
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t,
|
||||
enum rxrpc_peer_trace);
|
||||
void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
|
||||
struct rxrpc_peer *);
|
||||
void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer);
|
||||
void rxrpc_destroy_all_peers(struct rxrpc_net *);
|
||||
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
|
||||
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace);
|
||||
@ -1086,33 +1165,22 @@ extern const struct seq_operations rxrpc_local_seq_ops;
|
||||
* recvmsg.c
|
||||
*/
|
||||
void rxrpc_notify_socket(struct rxrpc_call *);
|
||||
bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
|
||||
bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
|
||||
bool __rxrpc_call_completed(struct rxrpc_call *);
|
||||
bool rxrpc_call_completed(struct rxrpc_call *);
|
||||
bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
|
||||
bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
|
||||
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
|
||||
|
||||
/*
|
||||
* Abort a call due to a protocol error.
|
||||
*/
|
||||
static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
|
||||
struct sk_buff *skb,
|
||||
const char *eproto_why,
|
||||
const char *why,
|
||||
u32 abort_code)
|
||||
static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
|
||||
struct sk_buff *skb,
|
||||
s32 abort_code,
|
||||
enum rxrpc_abort_reason why)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
|
||||
return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
|
||||
rxrpc_abort_call(call, sp->hdr.seq, abort_code, -EPROTO, why);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
|
||||
__rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
|
||||
(abort_why), (abort_code))
|
||||
|
||||
/*
|
||||
* rtt.c
|
||||
*/
|
||||
@ -1144,6 +1212,8 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *,
|
||||
/*
|
||||
* sendmsg.c
|
||||
*/
|
||||
bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
|
||||
enum rxrpc_abort_reason why);
|
||||
int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
|
||||
|
||||
/*
|
||||
|
@ -99,7 +99,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
||||
if (!call)
|
||||
return -ENOMEM;
|
||||
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
|
||||
call->state = RXRPC_CALL_SERVER_PREALLOC;
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
|
||||
__set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
|
||||
|
||||
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
|
||||
@ -280,7 +280,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
|
||||
(peer_tail + 1) &
|
||||
(RXRPC_BACKLOG_MAX - 1));
|
||||
|
||||
rxrpc_new_incoming_peer(rx, local, peer);
|
||||
rxrpc_new_incoming_peer(local, peer);
|
||||
}
|
||||
|
||||
/* Now allocate and set up the connection */
|
||||
@ -326,11 +326,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
|
||||
* If we want to report an error, we mark the skb with the packet type and
|
||||
* abort code and return false.
|
||||
*/
|
||||
int rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer,
|
||||
struct rxrpc_connection *conn,
|
||||
struct sockaddr_rxrpc *peer_srx,
|
||||
struct sk_buff *skb)
|
||||
bool rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer,
|
||||
struct rxrpc_connection *conn,
|
||||
struct sockaddr_rxrpc *peer_srx,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const struct rxrpc_security *sec = NULL;
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
@ -339,18 +339,17 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
|
||||
_enter("");
|
||||
|
||||
/* Don't set up a call for anything other than the first DATA packet. */
|
||||
if (sp->hdr.seq != 1 ||
|
||||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
|
||||
return 0; /* Just discard */
|
||||
/* Don't set up a call for anything other than a DATA packet. */
|
||||
if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
|
||||
return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
|
||||
|
||||
rcu_read_lock();
|
||||
read_lock(&local->services_lock);
|
||||
|
||||
/* Weed out packets to services we're not offering. Packets that would
|
||||
* begin a call are explicitly rejected and the rest are just
|
||||
* discarded.
|
||||
*/
|
||||
rx = rcu_dereference(local->service);
|
||||
rx = local->service;
|
||||
if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
|
||||
sp->hdr.serviceId != rx->second_service)
|
||||
) {
|
||||
@ -363,16 +362,14 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
if (!conn) {
|
||||
sec = rxrpc_get_incoming_security(rx, skb);
|
||||
if (!sec)
|
||||
goto reject;
|
||||
goto unsupported_security;
|
||||
}
|
||||
|
||||
spin_lock(&rx->incoming_lock);
|
||||
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
|
||||
rx->sk.sk_state == RXRPC_CLOSE) {
|
||||
trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
|
||||
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
|
||||
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
||||
skb->priority = RX_INVALID_OPERATION;
|
||||
rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
|
||||
RX_INVALID_OPERATION, -ESHUTDOWN);
|
||||
goto no_call;
|
||||
}
|
||||
|
||||
@ -402,7 +399,7 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
spin_unlock(&conn->state_lock);
|
||||
|
||||
spin_unlock(&rx->incoming_lock);
|
||||
rcu_read_unlock();
|
||||
read_unlock(&local->services_lock);
|
||||
|
||||
if (hlist_unhashed(&call->error_link)) {
|
||||
spin_lock(&call->peer->lock);
|
||||
@ -413,22 +410,24 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
_leave(" = %p{%d}", call, call->debug_id);
|
||||
rxrpc_input_call_event(call, skb);
|
||||
rxrpc_put_call(call, rxrpc_call_put_input);
|
||||
return 0;
|
||||
return true;
|
||||
|
||||
unsupported_service:
|
||||
trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
RX_INVALID_OPERATION, EOPNOTSUPP);
|
||||
skb->priority = RX_INVALID_OPERATION;
|
||||
goto reject;
|
||||
read_unlock(&local->services_lock);
|
||||
return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
|
||||
RX_INVALID_OPERATION, -EOPNOTSUPP);
|
||||
unsupported_security:
|
||||
read_unlock(&local->services_lock);
|
||||
return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
|
||||
RX_INVALID_OPERATION, -EKEYREJECTED);
|
||||
no_call:
|
||||
spin_unlock(&rx->incoming_lock);
|
||||
reject:
|
||||
rcu_read_unlock();
|
||||
read_unlock(&local->services_lock);
|
||||
_leave(" = f [%u]", skb->mark);
|
||||
return -EPROTO;
|
||||
return false;
|
||||
discard:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
read_unlock(&local->services_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -251,6 +251,41 @@ out:
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* Start transmitting the reply to a service. This cancels the need to ACK the
|
||||
* request if we haven't yet done so.
|
||||
*/
|
||||
static void rxrpc_begin_service_reply(struct rxrpc_call *call)
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
|
||||
WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
|
||||
if (call->ackr_reason == RXRPC_ACK_DELAY)
|
||||
call->ackr_reason = 0;
|
||||
trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
|
||||
}
|
||||
|
||||
/*
|
||||
* Close the transmission phase. After this point there is no more data to be
|
||||
* transmitted in the call.
|
||||
*/
|
||||
static void rxrpc_close_tx_phase(struct rxrpc_call *call)
|
||||
{
|
||||
_debug("________awaiting reply/ACK__________");
|
||||
|
||||
switch (__rxrpc_call_state(call)) {
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
|
||||
break;
|
||||
case RXRPC_CALL_SERVER_SEND_REPLY:
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_AWAIT_ACK);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
|
||||
{
|
||||
unsigned int winsize = min_t(unsigned int, call->tx_winsize,
|
||||
@ -270,9 +305,11 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
|
||||
{
|
||||
struct rxrpc_txbuf *txb;
|
||||
|
||||
if (rxrpc_is_client_call(call) &&
|
||||
!test_bit(RXRPC_CALL_EXPOSED, &call->flags))
|
||||
if (!test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
|
||||
if (list_empty(&call->tx_sendmsg))
|
||||
return;
|
||||
rxrpc_expose_client_call(call);
|
||||
}
|
||||
|
||||
while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
|
||||
struct rxrpc_txbuf, call_link))) {
|
||||
@ -283,6 +320,9 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
|
||||
call->tx_top = txb->seq;
|
||||
list_add_tail(&txb->call_link, &call->tx_buffer);
|
||||
|
||||
if (txb->wire.flags & RXRPC_LAST_PACKET)
|
||||
rxrpc_close_tx_phase(call);
|
||||
|
||||
rxrpc_transmit_one(call, txb);
|
||||
|
||||
if (!rxrpc_tx_window_has_space(call))
|
||||
@ -292,16 +332,15 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
|
||||
|
||||
static void rxrpc_transmit_some_data(struct rxrpc_call *call)
|
||||
{
|
||||
switch (call->state) {
|
||||
switch (__rxrpc_call_state(call)) {
|
||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||
if (list_empty(&call->tx_sendmsg))
|
||||
return;
|
||||
rxrpc_begin_service_reply(call);
|
||||
fallthrough;
|
||||
|
||||
case RXRPC_CALL_SERVER_SEND_REPLY:
|
||||
case RXRPC_CALL_SERVER_AWAIT_ACK:
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
|
||||
if (!rxrpc_tx_window_has_space(call))
|
||||
return;
|
||||
if (list_empty(&call->tx_sendmsg)) {
|
||||
@ -331,21 +370,31 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
|
||||
/*
|
||||
* Handle retransmission and deferred ACK/abort generation.
|
||||
*/
|
||||
void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
{
|
||||
unsigned long now, next, t;
|
||||
rxrpc_serial_t ackr_serial;
|
||||
bool resend = false, expired = false;
|
||||
s32 abort_code;
|
||||
|
||||
rxrpc_see_call(call, rxrpc_call_see_input);
|
||||
|
||||
//printk("\n--------------------\n");
|
||||
_enter("{%d,%s,%lx}",
|
||||
call->debug_id, rxrpc_call_states[call->state], call->events);
|
||||
call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)],
|
||||
call->events);
|
||||
|
||||
if (call->state == RXRPC_CALL_COMPLETE)
|
||||
if (__rxrpc_call_is_complete(call))
|
||||
goto out;
|
||||
|
||||
/* Handle abort request locklessly, vs rxrpc_propose_abort(). */
|
||||
abort_code = smp_load_acquire(&call->send_abort);
|
||||
if (abort_code) {
|
||||
rxrpc_abort_call(call, 0, call->send_abort, call->send_abort_err,
|
||||
call->send_abort_why);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
|
||||
goto out;
|
||||
|
||||
@ -358,7 +407,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
t = READ_ONCE(call->expect_req_by);
|
||||
if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
|
||||
if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
|
||||
time_after_eq(now, t)) {
|
||||
trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
|
||||
expired = true;
|
||||
@ -429,11 +478,12 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
|
||||
(int)call->conn->hi_serial - (int)call->rx_serial > 0) {
|
||||
trace_rxrpc_call_reset(call);
|
||||
rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
|
||||
rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
|
||||
rxrpc_abort_call_reset);
|
||||
} else {
|
||||
rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
|
||||
rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
|
||||
rxrpc_abort_call_timeout);
|
||||
}
|
||||
rxrpc_send_abort_packet(call);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -441,7 +491,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
|
||||
rxrpc_propose_ack_ping_for_lost_ack);
|
||||
|
||||
if (resend && call->state != RXRPC_CALL_CLIENT_RECV_REPLY)
|
||||
if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
|
||||
rxrpc_resend(call, NULL);
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
|
||||
@ -453,7 +503,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
rxrpc_propose_ack_input_data);
|
||||
|
||||
/* Make sure the timer is restarted */
|
||||
if (call->state != RXRPC_CALL_COMPLETE) {
|
||||
if (!__rxrpc_call_is_complete(call)) {
|
||||
next = call->expect_rx_by;
|
||||
|
||||
#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
|
||||
@ -474,9 +524,15 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
out:
|
||||
if (call->state == RXRPC_CALL_COMPLETE)
|
||||
if (__rxrpc_call_is_complete(call)) {
|
||||
del_timer_sync(&call->timer);
|
||||
if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
|
||||
rxrpc_disconnect_call(call);
|
||||
if (call->security)
|
||||
call->security->free_call_crypto(call);
|
||||
}
|
||||
if (call->acks_hard_ack != call->tx_bottom)
|
||||
rxrpc_shrink_call_tx_buffer(call);
|
||||
_leave("");
|
||||
return true;
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
|
||||
struct rxrpc_local *local = call->local;
|
||||
bool busy;
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) {
|
||||
spin_lock_bh(&local->lock);
|
||||
busy = !list_empty(&call->attend_link);
|
||||
trace_rxrpc_poke_call(call, busy, what);
|
||||
@ -69,7 +69,7 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
|
||||
|
||||
_enter("%d", call->debug_id);
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
if (!__rxrpc_call_is_complete(call)) {
|
||||
trace_rxrpc_timer_expired(call, jiffies);
|
||||
rxrpc_poke_call(call, rxrpc_call_poke_timer);
|
||||
}
|
||||
@ -150,7 +150,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
|
||||
timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
|
||||
INIT_WORK(&call->destroyer, rxrpc_destroy_call);
|
||||
INIT_LIST_HEAD(&call->link);
|
||||
INIT_LIST_HEAD(&call->chan_wait_link);
|
||||
INIT_LIST_HEAD(&call->wait_link);
|
||||
INIT_LIST_HEAD(&call->accept_link);
|
||||
INIT_LIST_HEAD(&call->recvmsg_link);
|
||||
INIT_LIST_HEAD(&call->sock_link);
|
||||
@ -162,7 +162,6 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
|
||||
init_waitqueue_head(&call->waitq);
|
||||
spin_lock_init(&call->notify_lock);
|
||||
spin_lock_init(&call->tx_lock);
|
||||
rwlock_init(&call->state_lock);
|
||||
refcount_set(&call->ref, 1);
|
||||
call->debug_id = debug_id;
|
||||
call->tx_total_len = -1;
|
||||
@ -211,7 +210,6 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
|
||||
now = ktime_get_real();
|
||||
call->acks_latest_ts = now;
|
||||
call->cong_tstamp = now;
|
||||
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
|
||||
call->dest_srx = *srx;
|
||||
call->interruptibility = p->interruptibility;
|
||||
call->tx_total_len = p->tx_total_len;
|
||||
@ -227,11 +225,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
|
||||
|
||||
ret = rxrpc_init_client_call_security(call);
|
||||
if (ret < 0) {
|
||||
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
|
||||
rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
|
||||
rxrpc_put_call(call, rxrpc_call_put_discard_error);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN);
|
||||
|
||||
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
|
||||
p->user_call_ID, rxrpc_call_new_client);
|
||||
|
||||
@ -242,7 +242,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
|
||||
/*
|
||||
* Initiate the call ack/resend/expiry timer.
|
||||
*/
|
||||
static void rxrpc_start_call_timer(struct rxrpc_call *call)
|
||||
void rxrpc_start_call_timer(struct rxrpc_call *call)
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
unsigned long j = now + MAX_JIFFY_OFFSET;
|
||||
@ -286,6 +286,39 @@ static void rxrpc_put_call_slot(struct rxrpc_call *call)
|
||||
up(limiter);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the process of connecting a call. We obtain a peer and a connection
|
||||
* bundle, but the actual association of a call with a connection is offloaded
|
||||
* to the I/O thread to simplify locking.
|
||||
*/
|
||||
static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_local *local = call->local;
|
||||
int ret = 0;
|
||||
|
||||
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
|
||||
|
||||
call->peer = rxrpc_lookup_peer(local, &call->dest_srx, gfp);
|
||||
if (!call->peer)
|
||||
goto error;
|
||||
|
||||
ret = rxrpc_look_up_bundle(call, gfp);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call);
|
||||
rxrpc_get_call(call, rxrpc_call_get_io_thread);
|
||||
spin_lock(&local->client_call_lock);
|
||||
list_add_tail(&call->wait_link, &local->new_client_calls);
|
||||
spin_unlock(&local->client_call_lock);
|
||||
rxrpc_wake_up_io_thread(local);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
__set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a call for the given parameters.
|
||||
* - Called with the socket lock held, which it must release.
|
||||
@ -365,14 +398,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||
/* Set up or get a connection record and set the protocol parameters,
|
||||
* including channel number and call ID.
|
||||
*/
|
||||
ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
|
||||
ret = rxrpc_connect_call(call, gfp);
|
||||
if (ret < 0)
|
||||
goto error_attached_to_socket;
|
||||
|
||||
rxrpc_see_call(call, rxrpc_call_see_connected);
|
||||
|
||||
rxrpc_start_call_timer(call);
|
||||
|
||||
_leave(" = %p [new]", call);
|
||||
return call;
|
||||
|
||||
@ -384,27 +413,23 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||
error_dup_user_ID:
|
||||
write_unlock(&rx->call_lock);
|
||||
release_sock(&rx->sk);
|
||||
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
|
||||
RX_CALL_DEAD, -EEXIST);
|
||||
rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST);
|
||||
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0,
|
||||
rxrpc_call_see_userid_exists);
|
||||
rxrpc_release_call(rx, call);
|
||||
mutex_unlock(&call->user_mutex);
|
||||
rxrpc_put_call(call, rxrpc_call_put_userid_exists);
|
||||
_leave(" = -EEXIST");
|
||||
return ERR_PTR(-EEXIST);
|
||||
|
||||
/* We got an error, but the call is attached to the socket and is in
|
||||
* need of release. However, we might now race with recvmsg() when
|
||||
* completing the call queues it. Return 0 from sys_sendmsg() and
|
||||
* need of release. However, we might now race with recvmsg() when it
|
||||
* completion notifies the socket. Return 0 from sys_sendmsg() and
|
||||
* leave the error to recvmsg() to deal with.
|
||||
*/
|
||||
error_attached_to_socket:
|
||||
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret,
|
||||
rxrpc_call_see_connect_failed);
|
||||
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
|
||||
RX_CALL_DEAD, ret);
|
||||
rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
|
||||
_leave(" = c=%08x [err]", call->debug_id);
|
||||
return call;
|
||||
}
|
||||
@ -427,32 +452,32 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
|
||||
call->call_id = sp->hdr.callNumber;
|
||||
call->dest_srx.srx_service = sp->hdr.serviceId;
|
||||
call->cid = sp->hdr.cid;
|
||||
call->state = RXRPC_CALL_SERVER_SECURING;
|
||||
call->cong_tstamp = skb->tstamp;
|
||||
|
||||
__set_bit(RXRPC_CALL_EXPOSED, &call->flags);
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
|
||||
|
||||
spin_lock(&conn->state_lock);
|
||||
|
||||
switch (conn->state) {
|
||||
case RXRPC_CONN_SERVICE_UNSECURED:
|
||||
case RXRPC_CONN_SERVICE_CHALLENGING:
|
||||
call->state = RXRPC_CALL_SERVER_SECURING;
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
|
||||
break;
|
||||
case RXRPC_CONN_SERVICE:
|
||||
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
|
||||
break;
|
||||
|
||||
case RXRPC_CONN_REMOTELY_ABORTED:
|
||||
__rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
|
||||
conn->abort_code, conn->error);
|
||||
break;
|
||||
case RXRPC_CONN_LOCALLY_ABORTED:
|
||||
__rxrpc_abort_call("CON", call, 1,
|
||||
conn->abort_code, conn->error);
|
||||
case RXRPC_CONN_ABORTED:
|
||||
rxrpc_set_call_completion(call, conn->completion,
|
||||
conn->abort_code, conn->error);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
rxrpc_get_call(call, rxrpc_call_get_io_thread);
|
||||
|
||||
/* Set the channel for this call. We don't get channel_lock as we're
|
||||
* only defending against the data_ready handler (which we're called
|
||||
* from) and the RESPONSE packet parser (which is only really
|
||||
@ -462,7 +487,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
|
||||
chan = sp->hdr.cid & RXRPC_CHANNELMASK;
|
||||
conn->channels[chan].call_counter = call->call_id;
|
||||
conn->channels[chan].call_id = call->call_id;
|
||||
rcu_assign_pointer(conn->channels[chan].call, call);
|
||||
conn->channels[chan].call = call;
|
||||
spin_unlock(&conn->state_lock);
|
||||
|
||||
spin_lock(&conn->peer->lock);
|
||||
@ -522,20 +547,17 @@ static void rxrpc_cleanup_ring(struct rxrpc_call *call)
|
||||
void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
||||
{
|
||||
struct rxrpc_connection *conn = call->conn;
|
||||
bool put = false;
|
||||
bool put = false, putu = false;
|
||||
|
||||
_enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
|
||||
|
||||
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
|
||||
call->flags, rxrpc_call_see_release);
|
||||
|
||||
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
|
||||
|
||||
if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
|
||||
BUG();
|
||||
|
||||
rxrpc_put_call_slot(call);
|
||||
del_timer_sync(&call->timer);
|
||||
|
||||
/* Make sure we don't get any more notifications */
|
||||
write_lock(&rx->recvmsg_lock);
|
||||
@ -560,7 +582,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
||||
if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
|
||||
rb_erase(&call->sock_node, &rx->calls);
|
||||
memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
|
||||
rxrpc_put_call(call, rxrpc_call_put_userid_exists);
|
||||
putu = true;
|
||||
}
|
||||
|
||||
list_del(&call->sock_link);
|
||||
@ -568,10 +590,9 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
||||
|
||||
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
|
||||
|
||||
if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
|
||||
rxrpc_disconnect_call(call);
|
||||
if (call->security)
|
||||
call->security->free_call_crypto(call);
|
||||
if (putu)
|
||||
rxrpc_put_call(call, rxrpc_call_put_userid);
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
@ -588,7 +609,8 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
|
||||
call = list_entry(rx->to_be_accepted.next,
|
||||
struct rxrpc_call, accept_link);
|
||||
list_del(&call->accept_link);
|
||||
rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
|
||||
rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
|
||||
rxrpc_abort_call_sock_release_tba);
|
||||
rxrpc_put_call(call, rxrpc_call_put_release_sock_tba);
|
||||
}
|
||||
|
||||
@ -596,8 +618,8 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
|
||||
call = list_entry(rx->sock_calls.next,
|
||||
struct rxrpc_call, sock_link);
|
||||
rxrpc_get_call(call, rxrpc_call_get_release_sock);
|
||||
rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
|
||||
rxrpc_send_abort_packet(call);
|
||||
rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
|
||||
rxrpc_abort_call_sock_release);
|
||||
rxrpc_release_call(rx, call);
|
||||
rxrpc_put_call(call, rxrpc_call_put_release_sock);
|
||||
}
|
||||
@ -620,7 +642,7 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
|
||||
dead = __refcount_dec_and_test(&call->ref, &r);
|
||||
trace_rxrpc_call(debug_id, r - 1, 0, why);
|
||||
if (dead) {
|
||||
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
|
||||
ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
|
||||
|
||||
if (!list_empty(&call->link)) {
|
||||
spin_lock(&rxnet->call_lock);
|
||||
@ -669,6 +691,8 @@ static void rxrpc_destroy_call(struct work_struct *work)
|
||||
|
||||
rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
|
||||
rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
|
||||
rxrpc_deactivate_bundle(call->bundle);
|
||||
rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call);
|
||||
rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
|
||||
rxrpc_put_local(call->local, rxrpc_local_put_call);
|
||||
call_rcu(&call->rcu, rxrpc_rcu_free_call);
|
||||
@ -681,7 +705,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
|
||||
{
|
||||
memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
|
||||
|
||||
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
|
||||
ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
|
||||
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
|
||||
|
||||
del_timer(&call->timer);
|
||||
@ -719,7 +743,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
|
||||
|
||||
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
|
||||
call, refcount_read(&call->ref),
|
||||
rxrpc_call_states[call->state],
|
||||
rxrpc_call_states[__rxrpc_call_state(call)],
|
||||
call->flags, call->events);
|
||||
|
||||
spin_unlock(&rxnet->call_lock);
|
||||
|
69
net/rxrpc/call_state.c
Normal file
69
net/rxrpc/call_state.c
Normal file
@ -0,0 +1,69 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/* Call state changing functions.
|
||||
*
|
||||
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*/
|
||||
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* Transition a call to the complete state.
|
||||
*/
|
||||
bool rxrpc_set_call_completion(struct rxrpc_call *call,
|
||||
enum rxrpc_call_completion compl,
|
||||
u32 abort_code,
|
||||
int error)
|
||||
{
|
||||
if (__rxrpc_call_state(call) == RXRPC_CALL_COMPLETE)
|
||||
return false;
|
||||
|
||||
call->abort_code = abort_code;
|
||||
call->error = error;
|
||||
call->completion = compl;
|
||||
/* Allow reader of completion state to operate locklessly */
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_COMPLETE);
|
||||
trace_rxrpc_call_complete(call);
|
||||
wake_up(&call->waitq);
|
||||
rxrpc_notify_socket(call);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that a call successfully completed.
|
||||
*/
|
||||
bool rxrpc_call_completed(struct rxrpc_call *call)
|
||||
{
|
||||
return rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that a call is locally aborted.
|
||||
*/
|
||||
bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
|
||||
u32 abort_code, int error, enum rxrpc_abort_reason why)
|
||||
{
|
||||
trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
|
||||
abort_code, error);
|
||||
if (!rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
|
||||
abort_code, error))
|
||||
return false;
|
||||
if (test_bit(RXRPC_CALL_EXPOSED, &call->flags))
|
||||
rxrpc_send_abort_packet(call);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that a call errored out before even getting off the ground, thereby
|
||||
* setting the state to allow it to be destroyed.
|
||||
*/
|
||||
void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
|
||||
int error)
|
||||
{
|
||||
call->abort_code = RX_CALL_DEAD;
|
||||
call->error = error;
|
||||
call->completion = compl;
|
||||
call->_state = RXRPC_CALL_COMPLETE;
|
||||
trace_rxrpc_call_complete(call);
|
||||
WARN_ON_ONCE(__test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags));
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -16,12 +16,66 @@
|
||||
#include <net/ip.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* Set the completion state on an aborted connection.
|
||||
*/
|
||||
static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff *skb,
|
||||
s32 abort_code, int err,
|
||||
enum rxrpc_call_completion compl)
|
||||
{
|
||||
bool aborted = false;
|
||||
|
||||
if (conn->state != RXRPC_CONN_ABORTED) {
|
||||
spin_lock(&conn->state_lock);
|
||||
if (conn->state != RXRPC_CONN_ABORTED) {
|
||||
conn->abort_code = abort_code;
|
||||
conn->error = err;
|
||||
conn->completion = compl;
|
||||
/* Order the abort info before the state change. */
|
||||
smp_store_release(&conn->state, RXRPC_CONN_ABORTED);
|
||||
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
|
||||
set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events);
|
||||
aborted = true;
|
||||
}
|
||||
spin_unlock(&conn->state_lock);
|
||||
}
|
||||
|
||||
return aborted;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a socket buffer to indicate that the connection it's on should be aborted.
|
||||
*/
|
||||
int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
|
||||
s32 abort_code, int err, enum rxrpc_abort_reason why)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
if (rxrpc_set_conn_aborted(conn, skb, abort_code, err,
|
||||
RXRPC_CALL_LOCALLY_ABORTED)) {
|
||||
trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber,
|
||||
sp->hdr.seq, abort_code, err);
|
||||
rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort);
|
||||
}
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a connection as being remotely aborted.
|
||||
*/
|
||||
static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
|
||||
RXRPC_CALL_REMOTELY_ABORTED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Retransmit terminal ACK or ABORT of the previous call.
|
||||
*/
|
||||
static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
unsigned int channel)
|
||||
void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
unsigned int channel)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
|
||||
struct rxrpc_channel *chan;
|
||||
@ -46,9 +100,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||
/* If the last call got moved on whilst we were waiting to run, just
|
||||
* ignore this packet.
|
||||
*/
|
||||
call_id = READ_ONCE(chan->last_call);
|
||||
/* Sync with __rxrpc_disconnect_call() */
|
||||
smp_rmb();
|
||||
call_id = chan->last_call;
|
||||
if (skb && call_id != sp->hdr.callNumber)
|
||||
return;
|
||||
|
||||
@ -65,9 +117,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||
iov[2].iov_base = &ack_info;
|
||||
iov[2].iov_len = sizeof(ack_info);
|
||||
|
||||
serial = atomic_inc_return(&conn->serial);
|
||||
|
||||
pkt.whdr.epoch = htonl(conn->proto.epoch);
|
||||
pkt.whdr.cid = htonl(conn->proto.cid | channel);
|
||||
pkt.whdr.callNumber = htonl(call_id);
|
||||
pkt.whdr.serial = htonl(serial);
|
||||
pkt.whdr.seq = 0;
|
||||
pkt.whdr.type = chan->last_type;
|
||||
pkt.whdr.flags = conn->out_clientflag;
|
||||
@ -104,31 +159,15 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||
iov[0].iov_len += sizeof(pkt.ack);
|
||||
len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
|
||||
ioc = 3;
|
||||
break;
|
||||
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
/* Resync with __rxrpc_disconnect_call() and check that the last call
|
||||
* didn't get advanced whilst we were filling out the packets.
|
||||
*/
|
||||
smp_rmb();
|
||||
if (READ_ONCE(chan->last_call) != call_id)
|
||||
return;
|
||||
|
||||
serial = atomic_inc_return(&conn->serial);
|
||||
pkt.whdr.serial = htonl(serial);
|
||||
|
||||
switch (chan->last_type) {
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
break;
|
||||
case RXRPC_PACKET_TYPE_ACK:
|
||||
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
|
||||
ntohl(pkt.ack.firstPacket),
|
||||
ntohl(pkt.ack.serial),
|
||||
pkt.ack.reason, 0);
|
||||
break;
|
||||
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len);
|
||||
@ -146,131 +185,34 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
||||
/*
|
||||
* pass a connection-level abort onto all calls on that connection
|
||||
*/
|
||||
static void rxrpc_abort_calls(struct rxrpc_connection *conn,
|
||||
enum rxrpc_call_completion compl,
|
||||
rxrpc_serial_t serial)
|
||||
static void rxrpc_abort_calls(struct rxrpc_connection *conn)
|
||||
{
|
||||
struct rxrpc_call *call;
|
||||
int i;
|
||||
|
||||
_enter("{%d},%x", conn->debug_id, conn->abort_code);
|
||||
|
||||
spin_lock(&conn->bundle->channel_lock);
|
||||
|
||||
for (i = 0; i < RXRPC_MAXCALLS; i++) {
|
||||
call = rcu_dereference_protected(
|
||||
conn->channels[i].call,
|
||||
lockdep_is_held(&conn->bundle->channel_lock));
|
||||
if (call) {
|
||||
if (compl == RXRPC_CALL_LOCALLY_ABORTED)
|
||||
trace_rxrpc_abort(call->debug_id,
|
||||
"CON", call->cid,
|
||||
call->call_id, 0,
|
||||
call = conn->channels[i].call;
|
||||
if (call)
|
||||
rxrpc_set_call_completion(call,
|
||||
conn->completion,
|
||||
conn->abort_code,
|
||||
conn->error);
|
||||
else
|
||||
trace_rxrpc_rx_abort(call, serial,
|
||||
conn->abort_code);
|
||||
rxrpc_set_call_completion(call, compl,
|
||||
conn->abort_code,
|
||||
conn->error);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&conn->bundle->channel_lock);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* generate a connection-level abort
|
||||
*/
|
||||
static int rxrpc_abort_connection(struct rxrpc_connection *conn,
|
||||
int error, u32 abort_code)
|
||||
{
|
||||
struct rxrpc_wire_header whdr;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[2];
|
||||
__be32 word;
|
||||
size_t len;
|
||||
u32 serial;
|
||||
int ret;
|
||||
|
||||
_enter("%d,,%u,%u", conn->debug_id, error, abort_code);
|
||||
|
||||
/* generate a connection-level abort */
|
||||
spin_lock(&conn->state_lock);
|
||||
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
|
||||
spin_unlock(&conn->state_lock);
|
||||
_leave(" = 0 [already dead]");
|
||||
return 0;
|
||||
}
|
||||
|
||||
conn->error = error;
|
||||
conn->abort_code = abort_code;
|
||||
conn->state = RXRPC_CONN_LOCALLY_ABORTED;
|
||||
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
|
||||
spin_unlock(&conn->state_lock);
|
||||
|
||||
msg.msg_name = &conn->peer->srx.transport;
|
||||
msg.msg_namelen = conn->peer->srx.transport_len;
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
whdr.epoch = htonl(conn->proto.epoch);
|
||||
whdr.cid = htonl(conn->proto.cid);
|
||||
whdr.callNumber = 0;
|
||||
whdr.seq = 0;
|
||||
whdr.type = RXRPC_PACKET_TYPE_ABORT;
|
||||
whdr.flags = conn->out_clientflag;
|
||||
whdr.userStatus = 0;
|
||||
whdr.securityIndex = conn->security_ix;
|
||||
whdr._rsvd = 0;
|
||||
whdr.serviceId = htons(conn->service_id);
|
||||
|
||||
word = htonl(conn->abort_code);
|
||||
|
||||
iov[0].iov_base = &whdr;
|
||||
iov[0].iov_len = sizeof(whdr);
|
||||
iov[1].iov_base = &word;
|
||||
iov[1].iov_len = sizeof(word);
|
||||
|
||||
len = iov[0].iov_len + iov[1].iov_len;
|
||||
|
||||
serial = atomic_inc_return(&conn->serial);
|
||||
rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
|
||||
whdr.serial = htonl(serial);
|
||||
|
||||
ret = kernel_sendmsg(conn->local->socket, &msg, iov, 2, len);
|
||||
if (ret < 0) {
|
||||
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
|
||||
rxrpc_tx_point_conn_abort);
|
||||
_debug("sendmsg failed: %d", ret);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
|
||||
|
||||
conn->peer->last_tx_at = ktime_get_seconds();
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* mark a call as being on a now-secured channel
|
||||
* - must be called with BH's disabled.
|
||||
*/
|
||||
static void rxrpc_call_is_secure(struct rxrpc_call *call)
|
||||
{
|
||||
_enter("%p", call);
|
||||
if (call) {
|
||||
write_lock(&call->state_lock);
|
||||
if (call->state == RXRPC_CALL_SERVER_SECURING) {
|
||||
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
||||
rxrpc_notify_socket(call);
|
||||
}
|
||||
write_unlock(&call->state_lock);
|
||||
if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
|
||||
rxrpc_notify_socket(call);
|
||||
}
|
||||
}
|
||||
|
||||
@ -278,44 +220,22 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
|
||||
* connection-level Rx packet processor
|
||||
*/
|
||||
static int rxrpc_process_event(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
u32 *_abort_code)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
int loop, ret;
|
||||
int ret;
|
||||
|
||||
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
|
||||
_leave(" = -ECONNABORTED [%u]", conn->state);
|
||||
if (conn->state == RXRPC_CONN_ABORTED)
|
||||
return -ECONNABORTED;
|
||||
}
|
||||
|
||||
_enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
|
||||
|
||||
switch (sp->hdr.type) {
|
||||
case RXRPC_PACKET_TYPE_DATA:
|
||||
case RXRPC_PACKET_TYPE_ACK:
|
||||
rxrpc_conn_retransmit_call(conn, skb,
|
||||
sp->hdr.cid & RXRPC_CHANNELMASK);
|
||||
return 0;
|
||||
|
||||
case RXRPC_PACKET_TYPE_BUSY:
|
||||
/* Just ignore BUSY packets for now. */
|
||||
return 0;
|
||||
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
conn->error = -ECONNABORTED;
|
||||
conn->abort_code = skb->priority;
|
||||
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
|
||||
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
|
||||
rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
|
||||
return -ECONNABORTED;
|
||||
|
||||
case RXRPC_PACKET_TYPE_CHALLENGE:
|
||||
return conn->security->respond_to_challenge(conn, skb,
|
||||
_abort_code);
|
||||
return conn->security->respond_to_challenge(conn, skb);
|
||||
|
||||
case RXRPC_PACKET_TYPE_RESPONSE:
|
||||
ret = conn->security->verify_response(conn, skb, _abort_code);
|
||||
ret = conn->security->verify_response(conn, skb);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -324,27 +244,25 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock(&conn->bundle->channel_lock);
|
||||
spin_lock(&conn->state_lock);
|
||||
|
||||
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
|
||||
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
|
||||
conn->state = RXRPC_CONN_SERVICE;
|
||||
spin_unlock(&conn->state_lock);
|
||||
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
|
||||
rxrpc_call_is_secure(
|
||||
rcu_dereference_protected(
|
||||
conn->channels[loop].call,
|
||||
lockdep_is_held(&conn->bundle->channel_lock)));
|
||||
} else {
|
||||
spin_unlock(&conn->state_lock);
|
||||
}
|
||||
spin_unlock(&conn->state_lock);
|
||||
|
||||
spin_unlock(&conn->bundle->channel_lock);
|
||||
if (conn->state == RXRPC_CONN_SERVICE) {
|
||||
/* Offload call state flipping to the I/O thread. As
|
||||
* we've already received the packet, put it on the
|
||||
* front of the queue.
|
||||
*/
|
||||
skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
|
||||
rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
|
||||
skb_queue_head(&conn->local->rx_queue, skb);
|
||||
rxrpc_wake_up_io_thread(conn->local);
|
||||
}
|
||||
return 0;
|
||||
|
||||
default:
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
|
||||
tracepoint_string("bad_conn_pkt"));
|
||||
WARN_ON_ONCE(1);
|
||||
return -EPROTO;
|
||||
}
|
||||
}
|
||||
@ -354,26 +272,9 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
|
||||
*/
|
||||
static void rxrpc_secure_connection(struct rxrpc_connection *conn)
|
||||
{
|
||||
u32 abort_code;
|
||||
int ret;
|
||||
|
||||
_enter("{%d}", conn->debug_id);
|
||||
|
||||
ASSERT(conn->security_ix != 0);
|
||||
|
||||
if (conn->security->issue_challenge(conn) < 0) {
|
||||
abort_code = RX_CALL_DEAD;
|
||||
ret = -ENOMEM;
|
||||
goto abort;
|
||||
}
|
||||
|
||||
_leave("");
|
||||
return;
|
||||
|
||||
abort:
|
||||
_debug("abort %d, %d", ret, abort_code);
|
||||
rxrpc_abort_connection(conn, ret, abort_code);
|
||||
_leave(" [aborted]");
|
||||
if (conn->security->issue_challenge(conn) < 0)
|
||||
rxrpc_abort_conn(conn, NULL, RX_CALL_DEAD, -ENOMEM,
|
||||
rxrpc_abort_nomem);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -395,9 +296,7 @@ again:
|
||||
if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
|
||||
continue;
|
||||
|
||||
smp_rmb(); /* vs rxrpc_disconnect_client_call */
|
||||
ack_at = READ_ONCE(chan->final_ack_at);
|
||||
|
||||
ack_at = chan->final_ack_at;
|
||||
if (time_before(j, ack_at) && !force) {
|
||||
if (time_before(ack_at, next_j)) {
|
||||
next_j = ack_at;
|
||||
@ -424,47 +323,27 @@ again:
|
||||
static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 abort_code = RX_PROTOCOL_ERROR;
|
||||
int ret;
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
|
||||
rxrpc_secure_connection(conn);
|
||||
|
||||
/* Process delayed ACKs whose time has come. */
|
||||
if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
|
||||
rxrpc_process_delayed_final_acks(conn, false);
|
||||
|
||||
/* go through the conn-level event packets, releasing the ref on this
|
||||
* connection that each one has when we've finished with it */
|
||||
while ((skb = skb_dequeue(&conn->rx_queue))) {
|
||||
rxrpc_see_skb(skb, rxrpc_skb_see_conn_work);
|
||||
ret = rxrpc_process_event(conn, skb, &abort_code);
|
||||
ret = rxrpc_process_event(conn, skb);
|
||||
switch (ret) {
|
||||
case -EPROTO:
|
||||
case -EKEYEXPIRED:
|
||||
case -EKEYREJECTED:
|
||||
goto protocol_error;
|
||||
case -ENOMEM:
|
||||
case -EAGAIN:
|
||||
goto requeue_and_leave;
|
||||
case -ECONNABORTED:
|
||||
skb_queue_head(&conn->rx_queue, skb);
|
||||
rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work);
|
||||
break;
|
||||
default:
|
||||
rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
requeue_and_leave:
|
||||
skb_queue_head(&conn->rx_queue, skb);
|
||||
return;
|
||||
|
||||
protocol_error:
|
||||
if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
|
||||
goto requeue_and_leave;
|
||||
rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
|
||||
return;
|
||||
}
|
||||
|
||||
void rxrpc_process_connection(struct work_struct *work)
|
||||
@ -498,44 +377,59 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
|
||||
/*
|
||||
* Input a connection-level packet.
|
||||
*/
|
||||
int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
|
||||
bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
|
||||
_leave(" = -ECONNABORTED [%u]", conn->state);
|
||||
return -ECONNABORTED;
|
||||
}
|
||||
|
||||
_enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
|
||||
|
||||
switch (sp->hdr.type) {
|
||||
case RXRPC_PACKET_TYPE_DATA:
|
||||
case RXRPC_PACKET_TYPE_ACK:
|
||||
rxrpc_conn_retransmit_call(conn, skb,
|
||||
sp->hdr.cid & RXRPC_CHANNELMASK);
|
||||
return 0;
|
||||
|
||||
case RXRPC_PACKET_TYPE_BUSY:
|
||||
/* Just ignore BUSY packets for now. */
|
||||
return 0;
|
||||
return true;
|
||||
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
conn->error = -ECONNABORTED;
|
||||
conn->abort_code = skb->priority;
|
||||
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
|
||||
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
|
||||
rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
|
||||
return -ECONNABORTED;
|
||||
if (rxrpc_is_conn_aborted(conn))
|
||||
return true;
|
||||
rxrpc_input_conn_abort(conn, skb);
|
||||
rxrpc_abort_calls(conn);
|
||||
return true;
|
||||
|
||||
case RXRPC_PACKET_TYPE_CHALLENGE:
|
||||
case RXRPC_PACKET_TYPE_RESPONSE:
|
||||
if (rxrpc_is_conn_aborted(conn)) {
|
||||
if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED)
|
||||
rxrpc_send_conn_abort(conn);
|
||||
return true;
|
||||
}
|
||||
rxrpc_post_packet_to_conn(conn, skb);
|
||||
return 0;
|
||||
return true;
|
||||
|
||||
default:
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
|
||||
tracepoint_string("bad_conn_pkt"));
|
||||
return -EPROTO;
|
||||
WARN_ON_ONCE(1);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Input a connection event.
|
||||
*/
|
||||
void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
|
||||
{
|
||||
unsigned int loop;
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
|
||||
rxrpc_abort_calls(conn);
|
||||
|
||||
switch (skb->mark) {
|
||||
case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
|
||||
if (conn->state != RXRPC_CONN_SERVICE)
|
||||
break;
|
||||
|
||||
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
|
||||
rxrpc_call_is_secure(conn->channels[loop].call);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Process delayed ACKs whose time has come. */
|
||||
if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
|
||||
rxrpc_process_delayed_final_acks(conn, false);
|
||||
}
|
||||
|
@ -23,12 +23,30 @@ static void rxrpc_clean_up_connection(struct work_struct *work);
|
||||
static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
|
||||
unsigned long reap_at);
|
||||
|
||||
void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
|
||||
{
|
||||
struct rxrpc_local *local = conn->local;
|
||||
bool busy;
|
||||
|
||||
if (WARN_ON_ONCE(!local))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&local->lock);
|
||||
busy = !list_empty(&conn->attend_link);
|
||||
if (!busy) {
|
||||
rxrpc_get_connection(conn, why);
|
||||
list_add_tail(&conn->attend_link, &local->conn_attend_q);
|
||||
}
|
||||
spin_unlock_bh(&local->lock);
|
||||
rxrpc_wake_up_io_thread(local);
|
||||
}
|
||||
|
||||
static void rxrpc_connection_timer(struct timer_list *timer)
|
||||
{
|
||||
struct rxrpc_connection *conn =
|
||||
container_of(timer, struct rxrpc_connection, timer);
|
||||
|
||||
rxrpc_queue_conn(conn, rxrpc_conn_queue_timer);
|
||||
rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -49,6 +67,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
|
||||
INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
|
||||
INIT_LIST_HEAD(&conn->proc_link);
|
||||
INIT_LIST_HEAD(&conn->link);
|
||||
mutex_init(&conn->security_lock);
|
||||
skb_queue_head_init(&conn->rx_queue);
|
||||
conn->rxnet = rxnet;
|
||||
conn->security = &rxrpc_no_security;
|
||||
@ -82,10 +101,10 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
|
||||
|
||||
_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
|
||||
|
||||
/* Look up client connections by connection ID alone as their IDs are
|
||||
* unique for this machine.
|
||||
/* Look up client connections by connection ID alone as their
|
||||
* IDs are unique for this machine.
|
||||
*/
|
||||
conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
|
||||
conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
|
||||
if (!conn || refcount_read(&conn->ref) == 0) {
|
||||
_debug("no conn");
|
||||
goto not_found;
|
||||
@ -139,7 +158,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
|
||||
|
||||
_enter("%d,%x", conn->debug_id, call->cid);
|
||||
|
||||
if (rcu_access_pointer(chan->call) == call) {
|
||||
if (chan->call == call) {
|
||||
/* Save the result of the call so that we can repeat it if necessary
|
||||
* through the channel, whilst disposing of the actual call record.
|
||||
*/
|
||||
@ -159,12 +178,9 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
|
||||
break;
|
||||
}
|
||||
|
||||
/* Sync with rxrpc_conn_retransmit(). */
|
||||
smp_wmb();
|
||||
chan->last_call = chan->call_id;
|
||||
chan->call_id = chan->call_counter;
|
||||
|
||||
rcu_assign_pointer(chan->call, NULL);
|
||||
chan->call = NULL;
|
||||
}
|
||||
|
||||
_leave("");
|
||||
@ -178,6 +194,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
|
||||
{
|
||||
struct rxrpc_connection *conn = call->conn;
|
||||
|
||||
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
rxrpc_see_call(call, rxrpc_call_see_disconnected);
|
||||
|
||||
call->peer->cong_ssthresh = call->cong_ssthresh;
|
||||
|
||||
if (!hlist_unhashed(&call->error_link)) {
|
||||
@ -186,18 +205,17 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
|
||||
spin_unlock(&call->peer->lock);
|
||||
}
|
||||
|
||||
if (rxrpc_is_client_call(call))
|
||||
return rxrpc_disconnect_client_call(conn->bundle, call);
|
||||
if (rxrpc_is_client_call(call)) {
|
||||
rxrpc_disconnect_client_call(call->bundle, call);
|
||||
} else {
|
||||
__rxrpc_disconnect_call(conn, call);
|
||||
conn->idle_timestamp = jiffies;
|
||||
if (atomic_dec_and_test(&conn->active))
|
||||
rxrpc_set_service_reap_timer(conn->rxnet,
|
||||
jiffies + rxrpc_connection_expiry);
|
||||
}
|
||||
|
||||
spin_lock(&conn->bundle->channel_lock);
|
||||
__rxrpc_disconnect_call(conn, call);
|
||||
spin_unlock(&conn->bundle->channel_lock);
|
||||
|
||||
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
conn->idle_timestamp = jiffies;
|
||||
if (atomic_dec_and_test(&conn->active))
|
||||
rxrpc_set_service_reap_timer(conn->rxnet,
|
||||
jiffies + rxrpc_connection_expiry);
|
||||
rxrpc_put_call(call, rxrpc_call_put_io_thread);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -293,10 +311,10 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
|
||||
container_of(work, struct rxrpc_connection, destructor);
|
||||
struct rxrpc_net *rxnet = conn->rxnet;
|
||||
|
||||
ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
|
||||
!rcu_access_pointer(conn->channels[1].call) &&
|
||||
!rcu_access_pointer(conn->channels[2].call) &&
|
||||
!rcu_access_pointer(conn->channels[3].call));
|
||||
ASSERT(!conn->channels[0].call &&
|
||||
!conn->channels[1].call &&
|
||||
!conn->channels[2].call &&
|
||||
!conn->channels[3].call);
|
||||
ASSERT(list_empty(&conn->cache_link));
|
||||
|
||||
del_timer_sync(&conn->timer);
|
||||
@ -447,7 +465,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
|
||||
_enter("");
|
||||
|
||||
atomic_dec(&rxnet->nr_conns);
|
||||
rxrpc_destroy_all_client_connections(rxnet);
|
||||
|
||||
del_timer_sync(&rxnet->service_conn_reap_timer);
|
||||
rxrpc_queue_work(&rxnet->service_conn_reaper);
|
||||
|
@ -11,7 +11,6 @@
|
||||
static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
|
||||
.ref = REFCOUNT_INIT(1),
|
||||
.debug_id = UINT_MAX,
|
||||
.channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -9,11 +9,10 @@
|
||||
|
||||
#include "ar-internal.h"
|
||||
|
||||
static void rxrpc_proto_abort(const char *why,
|
||||
struct rxrpc_call *call, rxrpc_seq_t seq)
|
||||
static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
|
||||
enum rxrpc_abort_reason why)
|
||||
{
|
||||
if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG))
|
||||
rxrpc_send_abort_packet(call);
|
||||
rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -185,7 +184,7 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
|
||||
if (call->cong_mode != RXRPC_CALL_SLOW_START &&
|
||||
call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE)
|
||||
return;
|
||||
if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
|
||||
if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
|
||||
return;
|
||||
|
||||
rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8));
|
||||
@ -250,47 +249,34 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
|
||||
* This occurs when we get an ACKALL packet, the first DATA packet of a reply,
|
||||
* or a final ACK packet.
|
||||
*/
|
||||
static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
|
||||
const char *abort_why)
|
||||
static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
|
||||
enum rxrpc_abort_reason abort_why)
|
||||
{
|
||||
unsigned int state;
|
||||
|
||||
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
|
||||
|
||||
write_lock(&call->state_lock);
|
||||
|
||||
state = call->state;
|
||||
switch (state) {
|
||||
switch (__rxrpc_call_state(call)) {
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
|
||||
if (reply_begun)
|
||||
call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
|
||||
else
|
||||
call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
|
||||
if (reply_begun) {
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_RECV_REPLY);
|
||||
trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
|
||||
break;
|
||||
}
|
||||
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
|
||||
trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
|
||||
break;
|
||||
|
||||
case RXRPC_CALL_SERVER_AWAIT_ACK:
|
||||
__rxrpc_call_completed(call);
|
||||
state = call->state;
|
||||
rxrpc_call_completed(call);
|
||||
trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
|
||||
break;
|
||||
|
||||
default:
|
||||
goto bad_state;
|
||||
kdebug("end_tx %s", rxrpc_call_states[__rxrpc_call_state(call)]);
|
||||
rxrpc_proto_abort(call, call->tx_top, abort_why);
|
||||
break;
|
||||
}
|
||||
|
||||
write_unlock(&call->state_lock);
|
||||
if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
|
||||
trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
|
||||
else
|
||||
trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
|
||||
_leave(" = ok");
|
||||
return true;
|
||||
|
||||
bad_state:
|
||||
write_unlock(&call->state_lock);
|
||||
kdebug("end_tx %s", rxrpc_call_states[call->state]);
|
||||
rxrpc_proto_abort(abort_why, call, call->tx_top);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -305,18 +291,48 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
|
||||
if (call->ackr_reason) {
|
||||
now = jiffies;
|
||||
timo = now + MAX_JIFFY_OFFSET;
|
||||
WRITE_ONCE(call->resend_at, timo);
|
||||
|
||||
WRITE_ONCE(call->delay_ack_at, timo);
|
||||
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
|
||||
}
|
||||
|
||||
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
|
||||
if (!rxrpc_rotate_tx_window(call, top, &summary)) {
|
||||
rxrpc_proto_abort("TXL", call, top);
|
||||
rxrpc_proto_abort(call, top, rxrpc_eproto_early_reply);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return rxrpc_end_tx_phase(call, true, "ETD");
|
||||
|
||||
rxrpc_end_tx_phase(call, true, rxrpc_eproto_unexpected_reply);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* End the packet reception phase.
|
||||
*/
|
||||
static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
|
||||
{
|
||||
rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
|
||||
|
||||
_enter("%d,%s", call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)]);
|
||||
|
||||
trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
|
||||
|
||||
switch (__rxrpc_call_state(call)) {
|
||||
case RXRPC_CALL_CLIENT_RECV_REPLY:
|
||||
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
|
||||
rxrpc_call_completed(call);
|
||||
break;
|
||||
|
||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
|
||||
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
|
||||
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
|
||||
@ -337,8 +353,9 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
|
||||
__skb_queue_tail(&call->recvmsg_queue, skb);
|
||||
rxrpc_input_update_ack_window(call, window, wtop);
|
||||
|
||||
trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
|
||||
if (last)
|
||||
rxrpc_end_rx_phase(call, sp->hdr.serial);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -366,17 +383,14 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
|
||||
if (last) {
|
||||
if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
|
||||
seq + 1 != wtop) {
|
||||
rxrpc_proto_abort("LSN", call, seq);
|
||||
return;
|
||||
}
|
||||
seq + 1 != wtop)
|
||||
return rxrpc_proto_abort(call, seq, rxrpc_eproto_different_last);
|
||||
} else {
|
||||
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
|
||||
after_eq(seq, wtop)) {
|
||||
pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n",
|
||||
call->debug_id, seq, window, wtop, wlimit);
|
||||
rxrpc_proto_abort("LSA", call, seq);
|
||||
return;
|
||||
return rxrpc_proto_abort(call, seq, rxrpc_eproto_data_after_last);
|
||||
}
|
||||
}
|
||||
|
||||
@ -550,7 +564,6 @@ protocol_error:
|
||||
static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
enum rxrpc_call_state state;
|
||||
rxrpc_serial_t serial = sp->hdr.serial;
|
||||
rxrpc_seq_t seq0 = sp->hdr.seq;
|
||||
|
||||
@ -558,11 +571,20 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
atomic64_read(&call->ackr_window), call->rx_highest_seq,
|
||||
skb->len, seq0);
|
||||
|
||||
state = READ_ONCE(call->state);
|
||||
if (state >= RXRPC_CALL_COMPLETE)
|
||||
if (__rxrpc_call_is_complete(call))
|
||||
return;
|
||||
|
||||
if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
|
||||
switch (__rxrpc_call_state(call)) {
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
|
||||
/* Received data implicitly ACKs all of the request
|
||||
* packets we sent when we're acting as a client.
|
||||
*/
|
||||
if (!rxrpc_receiving_reply(call))
|
||||
goto out_notify;
|
||||
break;
|
||||
|
||||
case RXRPC_CALL_SERVER_RECV_REQUEST: {
|
||||
unsigned long timo = READ_ONCE(call->next_req_timo);
|
||||
unsigned long now, expect_req_by;
|
||||
|
||||
@ -573,18 +595,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
rxrpc_reduce_call_timer(call, expect_req_by, now,
|
||||
rxrpc_timer_set_for_idle);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* Received data implicitly ACKs all of the request packets we sent
|
||||
* when we're acting as a client.
|
||||
*/
|
||||
if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
|
||||
state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
|
||||
!rxrpc_receiving_reply(call))
|
||||
goto out_notify;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!rxrpc_input_split_jumbo(call, skb)) {
|
||||
rxrpc_proto_abort("VLD", call, sp->hdr.seq);
|
||||
rxrpc_proto_abort(call, sp->hdr.seq, rxrpc_badmsg_bad_jumbo);
|
||||
goto out_notify;
|
||||
}
|
||||
skb = NULL;
|
||||
@ -765,7 +784,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
|
||||
offset = sizeof(struct rxrpc_wire_header);
|
||||
if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
|
||||
return rxrpc_proto_abort("XAK", call, 0);
|
||||
return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
|
||||
offset += sizeof(ack);
|
||||
|
||||
ack_serial = sp->hdr.serial;
|
||||
@ -845,7 +864,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
ioffset = offset + nr_acks + 3;
|
||||
if (skb->len >= ioffset + sizeof(info) &&
|
||||
skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
|
||||
return rxrpc_proto_abort("XAI", call, 0);
|
||||
return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
|
||||
|
||||
if (nr_acks > 0)
|
||||
skb_condense(skb);
|
||||
@ -868,10 +887,10 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
rxrpc_input_ackinfo(call, skb, &info);
|
||||
|
||||
if (first_soft_ack == 0)
|
||||
return rxrpc_proto_abort("AK0", call, 0);
|
||||
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
|
||||
|
||||
/* Ignore ACKs unless we are or have just been transmitting. */
|
||||
switch (READ_ONCE(call->state)) {
|
||||
switch (__rxrpc_call_state(call)) {
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
|
||||
case RXRPC_CALL_SERVER_SEND_REPLY:
|
||||
@ -883,20 +902,20 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
|
||||
if (before(hard_ack, call->acks_hard_ack) ||
|
||||
after(hard_ack, call->tx_top))
|
||||
return rxrpc_proto_abort("AKW", call, 0);
|
||||
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window);
|
||||
if (nr_acks > call->tx_top - hard_ack)
|
||||
return rxrpc_proto_abort("AKN", call, 0);
|
||||
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
|
||||
|
||||
if (after(hard_ack, call->acks_hard_ack)) {
|
||||
if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
|
||||
rxrpc_end_tx_phase(call, false, "ETA");
|
||||
rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (nr_acks > 0) {
|
||||
if (offset > (int)skb->len - nr_acks)
|
||||
return rxrpc_proto_abort("XSA", call, 0);
|
||||
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
|
||||
rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
|
||||
nr_acks, &summary);
|
||||
}
|
||||
@ -918,7 +937,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
struct rxrpc_ack_summary summary = { 0 };
|
||||
|
||||
if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
|
||||
rxrpc_end_tx_phase(call, false, "ETL");
|
||||
rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ackall);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -963,27 +982,23 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
|
||||
switch (sp->hdr.type) {
|
||||
case RXRPC_PACKET_TYPE_DATA:
|
||||
rxrpc_input_data(call, skb);
|
||||
break;
|
||||
return rxrpc_input_data(call, skb);
|
||||
|
||||
case RXRPC_PACKET_TYPE_ACK:
|
||||
rxrpc_input_ack(call, skb);
|
||||
break;
|
||||
return rxrpc_input_ack(call, skb);
|
||||
|
||||
case RXRPC_PACKET_TYPE_BUSY:
|
||||
/* Just ignore BUSY packets from the server; the retry and
|
||||
* lifespan timers will take care of business. BUSY packets
|
||||
* from the client don't make sense.
|
||||
*/
|
||||
break;
|
||||
return;
|
||||
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
rxrpc_input_abort(call, skb);
|
||||
break;
|
||||
return rxrpc_input_abort(call, skb);
|
||||
|
||||
case RXRPC_PACKET_TYPE_ACKALL:
|
||||
rxrpc_input_ackall(call, skb);
|
||||
break;
|
||||
return rxrpc_input_ackall(call, skb);
|
||||
|
||||
default:
|
||||
break;
|
||||
@ -998,24 +1013,18 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
*/
|
||||
void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_connection *conn = call->conn;
|
||||
|
||||
switch (READ_ONCE(call->state)) {
|
||||
switch (__rxrpc_call_state(call)) {
|
||||
case RXRPC_CALL_SERVER_AWAIT_ACK:
|
||||
rxrpc_call_completed(call);
|
||||
fallthrough;
|
||||
case RXRPC_CALL_COMPLETE:
|
||||
break;
|
||||
default:
|
||||
if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN))
|
||||
rxrpc_send_abort_packet(call);
|
||||
rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ESHUTDOWN,
|
||||
rxrpc_eproto_improper_term);
|
||||
trace_rxrpc_improper_term(call);
|
||||
break;
|
||||
}
|
||||
|
||||
rxrpc_input_call_event(call, skb);
|
||||
|
||||
spin_lock(&conn->bundle->channel_lock);
|
||||
__rxrpc_disconnect_call(conn, call);
|
||||
spin_unlock(&conn->bundle->channel_lock);
|
||||
}
|
||||
|
@ -43,25 +43,17 @@ static void none_free_call_crypto(struct rxrpc_call *call)
|
||||
}
|
||||
|
||||
static int none_respond_to_challenge(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
u32 *_abort_code)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
|
||||
tracepoint_string("chall_none"));
|
||||
return -EPROTO;
|
||||
return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
|
||||
rxrpc_eproto_rxnull_challenge);
|
||||
}
|
||||
|
||||
static int none_verify_response(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
u32 *_abort_code)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
|
||||
tracepoint_string("resp_none"));
|
||||
return -EPROTO;
|
||||
return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
|
||||
rxrpc_eproto_rxnull_response);
|
||||
}
|
||||
|
||||
static void none_clear(struct rxrpc_connection *conn)
|
||||
|
@ -66,10 +66,32 @@ void rxrpc_error_report(struct sock *sk)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Directly produce an abort from a packet.
|
||||
*/
|
||||
bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
|
||||
s32 abort_code, int err)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
abort_code, err);
|
||||
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
||||
skb->priority = abort_code;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool rxrpc_bad_message(struct sk_buff *skb, enum rxrpc_abort_reason why)
|
||||
{
|
||||
return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EBADMSG);
|
||||
}
|
||||
|
||||
#define just_discard true
|
||||
|
||||
/*
|
||||
* Process event packets targeted at a local endpoint.
|
||||
*/
|
||||
static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
|
||||
static bool rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
char v;
|
||||
@ -81,22 +103,21 @@ static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
|
||||
if (v == 0)
|
||||
rxrpc_send_version_request(local, &sp->hdr, skb);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the wire header from a packet and translate the byte order.
|
||||
*/
|
||||
static noinline
|
||||
int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
|
||||
static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_wire_header whdr;
|
||||
|
||||
/* dig out the RxRPC connection details */
|
||||
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
|
||||
tracepoint_string("bad_hdr"));
|
||||
return -EBADMSG;
|
||||
}
|
||||
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
|
||||
return rxrpc_bad_message(skb, rxrpc_badmsg_short_hdr);
|
||||
|
||||
memset(sp, 0, sizeof(*sp));
|
||||
sp->hdr.epoch = ntohl(whdr.epoch);
|
||||
@ -110,7 +131,7 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
|
||||
sp->hdr.securityIndex = whdr.securityIndex;
|
||||
sp->hdr._rsvd = ntohs(whdr._rsvd);
|
||||
sp->hdr.serviceId = ntohs(whdr.serviceId);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -130,28 +151,28 @@ static bool rxrpc_extract_abort(struct sk_buff *skb)
|
||||
/*
|
||||
* Process packets received on the local endpoint
|
||||
*/
|
||||
static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
|
||||
static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
|
||||
{
|
||||
struct rxrpc_connection *conn;
|
||||
struct sockaddr_rxrpc peer_srx;
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct rxrpc_peer *peer = NULL;
|
||||
struct sk_buff *skb = *_skb;
|
||||
int ret = 0;
|
||||
bool ret = false;
|
||||
|
||||
skb_pull(skb, sizeof(struct udphdr));
|
||||
|
||||
sp = rxrpc_skb(skb);
|
||||
|
||||
/* dig out the RxRPC connection details */
|
||||
if (rxrpc_extract_header(sp, skb) < 0)
|
||||
goto bad_message;
|
||||
if (!rxrpc_extract_header(sp, skb))
|
||||
return just_discard;
|
||||
|
||||
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
|
||||
static int lose;
|
||||
if ((lose++ & 7) == 7) {
|
||||
trace_rxrpc_rx_lose(sp);
|
||||
return 0;
|
||||
return just_discard;
|
||||
}
|
||||
}
|
||||
|
||||
@ -160,28 +181,28 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
|
||||
switch (sp->hdr.type) {
|
||||
case RXRPC_PACKET_TYPE_VERSION:
|
||||
if (rxrpc_to_client(sp))
|
||||
return 0;
|
||||
rxrpc_input_version(local, skb);
|
||||
return 0;
|
||||
return just_discard;
|
||||
return rxrpc_input_version(local, skb);
|
||||
|
||||
case RXRPC_PACKET_TYPE_BUSY:
|
||||
if (rxrpc_to_server(sp))
|
||||
return 0;
|
||||
return just_discard;
|
||||
fallthrough;
|
||||
case RXRPC_PACKET_TYPE_ACK:
|
||||
case RXRPC_PACKET_TYPE_ACKALL:
|
||||
if (sp->hdr.callNumber == 0)
|
||||
goto bad_message;
|
||||
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
|
||||
break;
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
if (!rxrpc_extract_abort(skb))
|
||||
return 0; /* Just discard if malformed */
|
||||
return just_discard; /* Just discard if malformed */
|
||||
break;
|
||||
|
||||
case RXRPC_PACKET_TYPE_DATA:
|
||||
if (sp->hdr.callNumber == 0 ||
|
||||
sp->hdr.seq == 0)
|
||||
goto bad_message;
|
||||
if (sp->hdr.callNumber == 0)
|
||||
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
|
||||
if (sp->hdr.seq == 0)
|
||||
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
|
||||
|
||||
/* Unshare the packet so that it can be modified for in-place
|
||||
* decryption.
|
||||
@ -191,7 +212,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
|
||||
if (!skb) {
|
||||
rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
|
||||
*_skb = NULL;
|
||||
return 0;
|
||||
return just_discard;
|
||||
}
|
||||
|
||||
if (skb != *_skb) {
|
||||
@ -205,28 +226,28 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
|
||||
|
||||
case RXRPC_PACKET_TYPE_CHALLENGE:
|
||||
if (rxrpc_to_server(sp))
|
||||
return 0;
|
||||
return just_discard;
|
||||
break;
|
||||
case RXRPC_PACKET_TYPE_RESPONSE:
|
||||
if (rxrpc_to_client(sp))
|
||||
return 0;
|
||||
return just_discard;
|
||||
break;
|
||||
|
||||
/* Packet types 9-11 should just be ignored. */
|
||||
case RXRPC_PACKET_TYPE_PARAMS:
|
||||
case RXRPC_PACKET_TYPE_10:
|
||||
case RXRPC_PACKET_TYPE_11:
|
||||
return 0;
|
||||
return just_discard;
|
||||
|
||||
default:
|
||||
goto bad_message;
|
||||
return rxrpc_bad_message(skb, rxrpc_badmsg_unsupported_packet);
|
||||
}
|
||||
|
||||
if (sp->hdr.serviceId == 0)
|
||||
goto bad_message;
|
||||
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_service);
|
||||
|
||||
if (WARN_ON_ONCE(rxrpc_extract_addr_from_skb(&peer_srx, skb) < 0))
|
||||
return true; /* Unsupported address type - discard. */
|
||||
return just_discard; /* Unsupported address type. */
|
||||
|
||||
if (peer_srx.transport.family != local->srx.transport.family &&
|
||||
(peer_srx.transport.family == AF_INET &&
|
||||
@ -234,7 +255,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
|
||||
pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
|
||||
peer_srx.transport.family,
|
||||
local->srx.transport.family);
|
||||
return true; /* Wrong address type - discard. */
|
||||
return just_discard; /* Wrong address type. */
|
||||
}
|
||||
|
||||
if (rxrpc_to_client(sp)) {
|
||||
@ -242,12 +263,8 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
|
||||
conn = rxrpc_find_client_connection_rcu(local, &peer_srx, skb);
|
||||
conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
|
||||
rcu_read_unlock();
|
||||
if (!conn) {
|
||||
trace_rxrpc_abort(0, "NCC", sp->hdr.cid,
|
||||
sp->hdr.callNumber, sp->hdr.seq,
|
||||
RXKADINCONSISTENCY, EBADMSG);
|
||||
goto protocol_error;
|
||||
}
|
||||
if (!conn)
|
||||
return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_conn);
|
||||
|
||||
ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
|
||||
rxrpc_put_connection(conn, rxrpc_conn_put_call_input);
|
||||
@ -280,19 +297,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
|
||||
|
||||
ret = rxrpc_new_incoming_call(local, peer, NULL, &peer_srx, skb);
|
||||
rxrpc_put_peer(peer, rxrpc_peer_put_input);
|
||||
if (ret < 0)
|
||||
goto reject_packet;
|
||||
return 0;
|
||||
|
||||
bad_message:
|
||||
trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
RX_PROTOCOL_ERROR, EBADMSG);
|
||||
protocol_error:
|
||||
skb->priority = RX_PROTOCOL_ERROR;
|
||||
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
||||
reject_packet:
|
||||
rxrpc_reject_packet(local, skb);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -306,21 +311,23 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
|
||||
struct rxrpc_channel *chan;
|
||||
struct rxrpc_call *call = NULL;
|
||||
unsigned int channel;
|
||||
bool ret;
|
||||
|
||||
if (sp->hdr.securityIndex != conn->security_ix)
|
||||
goto wrong_security;
|
||||
return rxrpc_direct_abort(skb, rxrpc_eproto_wrong_security,
|
||||
RXKADINCONSISTENCY, -EBADMSG);
|
||||
|
||||
if (sp->hdr.serviceId != conn->service_id) {
|
||||
int old_id;
|
||||
|
||||
if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
|
||||
goto reupgrade;
|
||||
return rxrpc_protocol_error(skb, rxrpc_eproto_reupgrade);
|
||||
|
||||
old_id = cmpxchg(&conn->service_id, conn->orig_service_id,
|
||||
sp->hdr.serviceId);
|
||||
|
||||
if (old_id != conn->orig_service_id &&
|
||||
old_id != sp->hdr.serviceId)
|
||||
goto reupgrade;
|
||||
return rxrpc_protocol_error(skb, rxrpc_eproto_bad_upgrade);
|
||||
}
|
||||
|
||||
if (after(sp->hdr.serial, conn->hi_serial))
|
||||
@ -336,19 +343,19 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
|
||||
|
||||
/* Ignore really old calls */
|
||||
if (sp->hdr.callNumber < chan->last_call)
|
||||
return 0;
|
||||
return just_discard;
|
||||
|
||||
if (sp->hdr.callNumber == chan->last_call) {
|
||||
if (chan->call ||
|
||||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
|
||||
return 0;
|
||||
return just_discard;
|
||||
|
||||
/* For the previous service call, if completed successfully, we
|
||||
* discard all further packets.
|
||||
*/
|
||||
if (rxrpc_conn_is_service(conn) &&
|
||||
chan->last_type == RXRPC_PACKET_TYPE_ACK)
|
||||
return 0;
|
||||
return just_discard;
|
||||
|
||||
/* But otherwise we need to retransmit the final packet from
|
||||
* data cached in the connection record.
|
||||
@ -358,19 +365,17 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
|
||||
sp->hdr.seq,
|
||||
sp->hdr.serial,
|
||||
sp->hdr.flags);
|
||||
rxrpc_input_conn_packet(conn, skb);
|
||||
return 0;
|
||||
rxrpc_conn_retransmit_call(conn, skb, channel);
|
||||
return just_discard;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
call = rxrpc_try_get_call(rcu_dereference(chan->call),
|
||||
rxrpc_call_get_input);
|
||||
rcu_read_unlock();
|
||||
call = rxrpc_try_get_call(chan->call, rxrpc_call_get_input);
|
||||
|
||||
if (sp->hdr.callNumber > chan->call_id) {
|
||||
if (rxrpc_to_client(sp)) {
|
||||
rxrpc_put_call(call, rxrpc_call_put_input);
|
||||
goto reject_packet;
|
||||
return rxrpc_protocol_error(skb,
|
||||
rxrpc_eproto_unexpected_implicit_end);
|
||||
}
|
||||
|
||||
if (call) {
|
||||
@ -382,38 +387,14 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
|
||||
|
||||
if (!call) {
|
||||
if (rxrpc_to_client(sp))
|
||||
goto bad_message;
|
||||
if (rxrpc_new_incoming_call(conn->local, conn->peer, conn,
|
||||
peer_srx, skb) == 0)
|
||||
return 0;
|
||||
goto reject_packet;
|
||||
return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_call);
|
||||
return rxrpc_new_incoming_call(conn->local, conn->peer, conn,
|
||||
peer_srx, skb);
|
||||
}
|
||||
|
||||
rxrpc_input_call_event(call, skb);
|
||||
ret = rxrpc_input_call_event(call, skb);
|
||||
rxrpc_put_call(call, rxrpc_call_put_input);
|
||||
return 0;
|
||||
|
||||
wrong_security:
|
||||
trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
RXKADINCONSISTENCY, EBADMSG);
|
||||
skb->priority = RXKADINCONSISTENCY;
|
||||
goto post_abort;
|
||||
|
||||
reupgrade:
|
||||
trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
RX_PROTOCOL_ERROR, EBADMSG);
|
||||
goto protocol_error;
|
||||
|
||||
bad_message:
|
||||
trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
RX_PROTOCOL_ERROR, EBADMSG);
|
||||
protocol_error:
|
||||
skb->priority = RX_PROTOCOL_ERROR;
|
||||
post_abort:
|
||||
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
||||
reject_packet:
|
||||
rxrpc_reject_packet(conn->local, skb);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -421,6 +402,7 @@ reject_packet:
|
||||
*/
|
||||
int rxrpc_io_thread(void *data)
|
||||
{
|
||||
struct rxrpc_connection *conn;
|
||||
struct sk_buff_head rx_queue;
|
||||
struct rxrpc_local *local = data;
|
||||
struct rxrpc_call *call;
|
||||
@ -436,6 +418,24 @@ int rxrpc_io_thread(void *data)
|
||||
for (;;) {
|
||||
rxrpc_inc_stat(local->rxnet, stat_io_loop);
|
||||
|
||||
/* Deal with connections that want immediate attention. */
|
||||
conn = list_first_entry_or_null(&local->conn_attend_q,
|
||||
struct rxrpc_connection,
|
||||
attend_link);
|
||||
if (conn) {
|
||||
spin_lock_bh(&local->lock);
|
||||
list_del_init(&conn->attend_link);
|
||||
spin_unlock_bh(&local->lock);
|
||||
|
||||
rxrpc_input_conn_event(conn, NULL);
|
||||
rxrpc_put_connection(conn, rxrpc_conn_put_poke);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
|
||||
&local->client_conn_flags))
|
||||
rxrpc_discard_expired_client_conns(local);
|
||||
|
||||
/* Deal with calls that want immediate attention. */
|
||||
if ((call = list_first_entry_or_null(&local->call_attend_q,
|
||||
struct rxrpc_call,
|
||||
@ -450,12 +450,17 @@ int rxrpc_io_thread(void *data)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!list_empty(&local->new_client_calls))
|
||||
rxrpc_connect_client_calls(local);
|
||||
|
||||
/* Process received packets and errors. */
|
||||
if ((skb = __skb_dequeue(&rx_queue))) {
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
switch (skb->mark) {
|
||||
case RXRPC_SKB_MARK_PACKET:
|
||||
skb->priority = 0;
|
||||
rxrpc_input_packet(local, &skb);
|
||||
if (!rxrpc_input_packet(local, &skb))
|
||||
rxrpc_reject_packet(local, skb);
|
||||
trace_rxrpc_rx_done(skb->mark, skb->priority);
|
||||
rxrpc_free_skb(skb, rxrpc_skb_put_input);
|
||||
break;
|
||||
@ -463,6 +468,11 @@ int rxrpc_io_thread(void *data)
|
||||
rxrpc_input_error(local, skb);
|
||||
rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
|
||||
break;
|
||||
case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
|
||||
rxrpc_input_conn_event(sp->conn, skb);
|
||||
rxrpc_put_connection(sp->conn, rxrpc_conn_put_poke);
|
||||
rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
rxrpc_free_skb(skb, rxrpc_skb_put_unknown);
|
||||
@ -481,7 +491,11 @@ int rxrpc_io_thread(void *data)
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
should_stop = kthread_should_stop();
|
||||
if (!skb_queue_empty(&local->rx_queue) ||
|
||||
!list_empty(&local->call_attend_q)) {
|
||||
!list_empty(&local->call_attend_q) ||
|
||||
!list_empty(&local->conn_attend_q) ||
|
||||
!list_empty(&local->new_client_calls) ||
|
||||
test_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
|
||||
&local->client_conn_flags)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
continue;
|
||||
}
|
||||
|
@ -82,31 +82,59 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
|
||||
}
|
||||
}
|
||||
|
||||
static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
|
||||
{
|
||||
struct rxrpc_local *local =
|
||||
container_of(timer, struct rxrpc_local, client_conn_reap_timer);
|
||||
|
||||
if (local->kill_all_client_conns &&
|
||||
test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
|
||||
rxrpc_wake_up_io_thread(local);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new local endpoint.
|
||||
*/
|
||||
static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
|
||||
static struct rxrpc_local *rxrpc_alloc_local(struct net *net,
|
||||
const struct sockaddr_rxrpc *srx)
|
||||
{
|
||||
struct rxrpc_local *local;
|
||||
u32 tmp;
|
||||
|
||||
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
|
||||
if (local) {
|
||||
refcount_set(&local->ref, 1);
|
||||
atomic_set(&local->active_users, 1);
|
||||
local->rxnet = rxnet;
|
||||
local->net = net;
|
||||
local->rxnet = rxrpc_net(net);
|
||||
INIT_HLIST_NODE(&local->link);
|
||||
init_rwsem(&local->defrag_sem);
|
||||
init_completion(&local->io_thread_ready);
|
||||
skb_queue_head_init(&local->rx_queue);
|
||||
INIT_LIST_HEAD(&local->conn_attend_q);
|
||||
INIT_LIST_HEAD(&local->call_attend_q);
|
||||
|
||||
local->client_bundles = RB_ROOT;
|
||||
spin_lock_init(&local->client_bundles_lock);
|
||||
local->kill_all_client_conns = false;
|
||||
INIT_LIST_HEAD(&local->idle_client_conns);
|
||||
timer_setup(&local->client_conn_reap_timer,
|
||||
rxrpc_client_conn_reap_timeout, 0);
|
||||
|
||||
spin_lock_init(&local->lock);
|
||||
rwlock_init(&local->services_lock);
|
||||
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
||||
memcpy(&local->srx, srx, sizeof(*srx));
|
||||
local->srx.srx_service = 0;
|
||||
idr_init(&local->conn_ids);
|
||||
get_random_bytes(&tmp, sizeof(tmp));
|
||||
tmp &= 0x3fffffff;
|
||||
if (tmp == 0)
|
||||
tmp = 1;
|
||||
idr_set_cursor(&local->conn_ids, tmp);
|
||||
INIT_LIST_HEAD(&local->new_client_calls);
|
||||
spin_lock_init(&local->client_call_lock);
|
||||
|
||||
trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1);
|
||||
}
|
||||
|
||||
@ -248,7 +276,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
|
||||
goto found;
|
||||
}
|
||||
|
||||
local = rxrpc_alloc_local(rxnet, srx);
|
||||
local = rxrpc_alloc_local(net, srx);
|
||||
if (!local)
|
||||
goto nomem;
|
||||
|
||||
@ -407,6 +435,7 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
|
||||
* local endpoint.
|
||||
*/
|
||||
rxrpc_purge_queue(&local->rx_queue);
|
||||
rxrpc_purge_client_connections(local);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -10,15 +10,6 @@
|
||||
|
||||
unsigned int rxrpc_net_id;
|
||||
|
||||
static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
|
||||
{
|
||||
struct rxrpc_net *rxnet =
|
||||
container_of(timer, struct rxrpc_net, client_conn_reap_timer);
|
||||
|
||||
if (rxnet->live)
|
||||
rxrpc_queue_work(&rxnet->client_conn_reaper);
|
||||
}
|
||||
|
||||
static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
|
||||
{
|
||||
struct rxrpc_net *rxnet =
|
||||
@ -63,14 +54,6 @@ static __net_init int rxrpc_init_net(struct net *net)
|
||||
rxrpc_service_conn_reap_timeout, 0);
|
||||
|
||||
atomic_set(&rxnet->nr_client_conns, 0);
|
||||
rxnet->kill_all_client_conns = false;
|
||||
spin_lock_init(&rxnet->client_conn_cache_lock);
|
||||
mutex_init(&rxnet->client_conn_discard_lock);
|
||||
INIT_LIST_HEAD(&rxnet->idle_client_conns);
|
||||
INIT_WORK(&rxnet->client_conn_reaper,
|
||||
rxrpc_discard_expired_client_conns);
|
||||
timer_setup(&rxnet->client_conn_reap_timer,
|
||||
rxrpc_client_conn_reap_timeout, 0);
|
||||
|
||||
INIT_HLIST_HEAD(&rxnet->local_endpoints);
|
||||
mutex_init(&rxnet->local_mutex);
|
||||
|
@ -261,7 +261,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
|
||||
rxrpc_tx_point_call_ack);
|
||||
rxrpc_tx_backoff(call, ret);
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
if (!__rxrpc_call_is_complete(call)) {
|
||||
if (ret < 0)
|
||||
rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
|
||||
rxrpc_set_keepalive(call);
|
||||
@ -544,6 +544,62 @@ send_fragmentable:
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Transmit a connection-level abort.
|
||||
*/
|
||||
void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
|
||||
{
|
||||
struct rxrpc_wire_header whdr;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[2];
|
||||
__be32 word;
|
||||
size_t len;
|
||||
u32 serial;
|
||||
int ret;
|
||||
|
||||
msg.msg_name = &conn->peer->srx.transport;
|
||||
msg.msg_namelen = conn->peer->srx.transport_len;
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
whdr.epoch = htonl(conn->proto.epoch);
|
||||
whdr.cid = htonl(conn->proto.cid);
|
||||
whdr.callNumber = 0;
|
||||
whdr.seq = 0;
|
||||
whdr.type = RXRPC_PACKET_TYPE_ABORT;
|
||||
whdr.flags = conn->out_clientflag;
|
||||
whdr.userStatus = 0;
|
||||
whdr.securityIndex = conn->security_ix;
|
||||
whdr._rsvd = 0;
|
||||
whdr.serviceId = htons(conn->service_id);
|
||||
|
||||
word = htonl(conn->abort_code);
|
||||
|
||||
iov[0].iov_base = &whdr;
|
||||
iov[0].iov_len = sizeof(whdr);
|
||||
iov[1].iov_base = &word;
|
||||
iov[1].iov_len = sizeof(word);
|
||||
|
||||
len = iov[0].iov_len + iov[1].iov_len;
|
||||
|
||||
serial = atomic_inc_return(&conn->serial);
|
||||
whdr.serial = htonl(serial);
|
||||
|
||||
iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
|
||||
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
|
||||
if (ret < 0) {
|
||||
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
|
||||
rxrpc_tx_point_conn_abort);
|
||||
_debug("sendmsg failed: %d", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
|
||||
|
||||
conn->peer->last_tx_at = ktime_get_seconds();
|
||||
}
|
||||
|
||||
/*
|
||||
* Reject a packet through the local endpoint.
|
||||
*/
|
||||
@ -667,7 +723,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
|
||||
static inline void rxrpc_instant_resend(struct rxrpc_call *call,
|
||||
struct rxrpc_txbuf *txb)
|
||||
{
|
||||
if (call->state < RXRPC_CALL_COMPLETE)
|
||||
if (!__rxrpc_call_is_complete(call))
|
||||
kdebug("resend");
|
||||
}
|
||||
|
||||
|
@ -147,10 +147,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
|
||||
* assess the MTU size for the network interface through which this peer is
|
||||
* reached
|
||||
*/
|
||||
static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
|
||||
static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer)
|
||||
{
|
||||
struct net *net = sock_net(&rx->sk);
|
||||
struct net *net = local->net;
|
||||
struct dst_entry *dst;
|
||||
struct rtable *rt;
|
||||
struct flowi fl;
|
||||
@ -236,11 +236,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
|
||||
/*
|
||||
* Initialise peer record.
|
||||
*/
|
||||
static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
|
||||
static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
|
||||
unsigned long hash_key)
|
||||
{
|
||||
peer->hash_key = hash_key;
|
||||
rxrpc_assess_MTU_size(rx, peer);
|
||||
rxrpc_assess_MTU_size(local, peer);
|
||||
peer->mtu = peer->if_mtu;
|
||||
peer->rtt_last_req = ktime_get_real();
|
||||
|
||||
@ -272,8 +272,7 @@ static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
|
||||
/*
|
||||
* Set up a new peer.
|
||||
*/
|
||||
static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
|
||||
struct rxrpc_local *local,
|
||||
static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
|
||||
struct sockaddr_rxrpc *srx,
|
||||
unsigned long hash_key,
|
||||
gfp_t gfp)
|
||||
@ -285,7 +284,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
|
||||
peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client);
|
||||
if (peer) {
|
||||
memcpy(&peer->srx, srx, sizeof(*srx));
|
||||
rxrpc_init_peer(rx, peer, hash_key);
|
||||
rxrpc_init_peer(local, peer, hash_key);
|
||||
}
|
||||
|
||||
_leave(" = %p", peer);
|
||||
@ -304,14 +303,13 @@ static void rxrpc_free_peer(struct rxrpc_peer *peer)
|
||||
* since we've already done a search in the list from the non-reentrant context
|
||||
* (the data_ready handler) that is the only place we can add new peers.
|
||||
*/
|
||||
void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer)
|
||||
void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
|
||||
{
|
||||
struct rxrpc_net *rxnet = local->rxnet;
|
||||
unsigned long hash_key;
|
||||
|
||||
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
|
||||
rxrpc_init_peer(rx, peer, hash_key);
|
||||
rxrpc_init_peer(local, peer, hash_key);
|
||||
|
||||
spin_lock(&rxnet->peer_hash_lock);
|
||||
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
|
||||
@ -322,8 +320,7 @@ void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
|
||||
/*
|
||||
* obtain a remote transport endpoint for the specified address
|
||||
*/
|
||||
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
|
||||
struct rxrpc_local *local,
|
||||
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
|
||||
struct sockaddr_rxrpc *srx, gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_peer *peer, *candidate;
|
||||
@ -343,7 +340,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
|
||||
/* The peer is not yet present in hash - create a candidate
|
||||
* for a new record and then redo the search.
|
||||
*/
|
||||
candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
|
||||
candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
|
||||
if (!candidate) {
|
||||
_leave(" = NULL [nomem]");
|
||||
return NULL;
|
||||
|
@ -12,13 +12,13 @@
|
||||
|
||||
static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
|
||||
[RXRPC_CONN_UNUSED] = "Unused ",
|
||||
[RXRPC_CONN_CLIENT_UNSECURED] = "ClUnsec ",
|
||||
[RXRPC_CONN_CLIENT] = "Client ",
|
||||
[RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc",
|
||||
[RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ",
|
||||
[RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ",
|
||||
[RXRPC_CONN_SERVICE] = "SvSecure",
|
||||
[RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort",
|
||||
[RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort",
|
||||
[RXRPC_CONN_ABORTED] = "Aborted ",
|
||||
};
|
||||
|
||||
/*
|
||||
@ -51,6 +51,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
|
||||
struct rxrpc_local *local;
|
||||
struct rxrpc_call *call;
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
enum rxrpc_call_state state;
|
||||
unsigned long timeout = 0;
|
||||
rxrpc_seq_t acks_hard_ack;
|
||||
char lbuff[50], rbuff[50];
|
||||
@ -75,7 +76,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
|
||||
|
||||
sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
|
||||
|
||||
if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
|
||||
state = rxrpc_call_state(call);
|
||||
if (state != RXRPC_CALL_SERVER_PREALLOC) {
|
||||
timeout = READ_ONCE(call->expect_rx_by);
|
||||
timeout -= jiffies;
|
||||
}
|
||||
@ -92,7 +94,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
|
||||
call->call_id,
|
||||
rxrpc_is_service_call(call) ? "Svc" : "Clt",
|
||||
refcount_read(&call->ref),
|
||||
rxrpc_call_states[call->state],
|
||||
rxrpc_call_states[state],
|
||||
call->abort_code,
|
||||
call->debug_id,
|
||||
acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
|
||||
@ -143,6 +145,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rxrpc_connection *conn;
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
const char *state;
|
||||
char lbuff[50], rbuff[50];
|
||||
|
||||
if (v == &rxnet->conn_proc_list) {
|
||||
@ -163,9 +166,11 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
|
||||
}
|
||||
|
||||
sprintf(lbuff, "%pISpc", &conn->local->srx.transport);
|
||||
|
||||
sprintf(rbuff, "%pISpc", &conn->peer->srx.transport);
|
||||
print:
|
||||
state = rxrpc_is_conn_aborted(conn) ?
|
||||
rxrpc_call_completions[conn->completion] :
|
||||
rxrpc_conn_states[conn->state];
|
||||
seq_printf(seq,
|
||||
"UDP %-47.47s %-47.47s %4x %08x %s %3u %3d"
|
||||
" %s %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
@ -176,7 +181,7 @@ print:
|
||||
rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
|
||||
refcount_read(&conn->ref),
|
||||
atomic_read(&conn->active),
|
||||
rxrpc_conn_states[conn->state],
|
||||
state,
|
||||
key_serial(conn->key),
|
||||
atomic_read(&conn->serial),
|
||||
conn->hi_serial,
|
||||
|
@ -58,85 +58,6 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* Transition a call to the complete state.
|
||||
*/
|
||||
bool __rxrpc_set_call_completion(struct rxrpc_call *call,
|
||||
enum rxrpc_call_completion compl,
|
||||
u32 abort_code,
|
||||
int error)
|
||||
{
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
call->abort_code = abort_code;
|
||||
call->error = error;
|
||||
call->completion = compl;
|
||||
call->state = RXRPC_CALL_COMPLETE;
|
||||
trace_rxrpc_call_complete(call);
|
||||
wake_up(&call->waitq);
|
||||
rxrpc_notify_socket(call);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool rxrpc_set_call_completion(struct rxrpc_call *call,
|
||||
enum rxrpc_call_completion compl,
|
||||
u32 abort_code,
|
||||
int error)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
write_lock(&call->state_lock);
|
||||
ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
|
||||
write_unlock(&call->state_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that a call successfully completed.
|
||||
*/
|
||||
bool __rxrpc_call_completed(struct rxrpc_call *call)
|
||||
{
|
||||
return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
|
||||
}
|
||||
|
||||
bool rxrpc_call_completed(struct rxrpc_call *call)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
write_lock(&call->state_lock);
|
||||
ret = __rxrpc_call_completed(call);
|
||||
write_unlock(&call->state_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record that a call is locally aborted.
|
||||
*/
|
||||
bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
|
||||
rxrpc_seq_t seq, u32 abort_code, int error)
|
||||
{
|
||||
trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
|
||||
abort_code, error);
|
||||
return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
|
||||
abort_code, error);
|
||||
}
|
||||
|
||||
bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
|
||||
rxrpc_seq_t seq, u32 abort_code, int error)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
write_lock(&call->state_lock);
|
||||
ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
|
||||
write_unlock(&call->state_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pass a call terminating message to userspace.
|
||||
*/
|
||||
@ -168,7 +89,7 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid terminal call state %u\n", call->state);
|
||||
pr_err("Invalid terminal call state %u\n", call->completion);
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
@ -179,41 +100,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* End the packet reception phase.
|
||||
*/
|
||||
static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
|
||||
{
|
||||
rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
|
||||
|
||||
_enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
|
||||
|
||||
trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
|
||||
|
||||
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY)
|
||||
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
|
||||
|
||||
write_lock(&call->state_lock);
|
||||
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_CLIENT_RECV_REPLY:
|
||||
__rxrpc_call_completed(call);
|
||||
write_unlock(&call->state_lock);
|
||||
break;
|
||||
|
||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
|
||||
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
|
||||
write_unlock(&call->state_lock);
|
||||
rxrpc_propose_delay_ACK(call, serial,
|
||||
rxrpc_propose_ack_processing_op);
|
||||
break;
|
||||
default:
|
||||
write_unlock(&call->state_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Discard a packet we've used up and advance the Rx window by one.
|
||||
*/
|
||||
@ -244,10 +130,9 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
|
||||
|
||||
trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate,
|
||||
serial, call->rx_consumed);
|
||||
if (last) {
|
||||
rxrpc_end_rx_phase(call, serial);
|
||||
return;
|
||||
}
|
||||
|
||||
if (last)
|
||||
set_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags);
|
||||
|
||||
/* Check to see if there's an ACK that needs sending. */
|
||||
acked = atomic_add_return(call->rx_consumed - old_consumed,
|
||||
@ -272,7 +157,8 @@ static int rxrpc_verify_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
/*
|
||||
* Deliver messages to a call. This keeps processing packets until the buffer
|
||||
* is filled and we find either more DATA (returns 0) or the end of the DATA
|
||||
* (returns 1). If more packets are required, it returns -EAGAIN.
|
||||
* (returns 1). If more packets are required, it returns -EAGAIN and if the
|
||||
* call has failed it returns -EIO.
|
||||
*/
|
||||
static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
|
||||
struct msghdr *msg, struct iov_iter *iter,
|
||||
@ -288,7 +174,13 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
|
||||
rx_pkt_offset = call->rx_pkt_offset;
|
||||
rx_pkt_len = call->rx_pkt_len;
|
||||
|
||||
if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
|
||||
if (rxrpc_call_has_failed(call)) {
|
||||
seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
|
||||
ret = -EIO;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) {
|
||||
seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
|
||||
ret = 1;
|
||||
goto done;
|
||||
@ -312,14 +204,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
|
||||
|
||||
if (rx_pkt_offset == 0) {
|
||||
ret2 = rxrpc_verify_data(call, skb);
|
||||
rx_pkt_offset = sp->offset;
|
||||
rx_pkt_len = sp->len;
|
||||
trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq,
|
||||
rx_pkt_offset, rx_pkt_len, ret2);
|
||||
sp->offset, sp->len, ret2);
|
||||
if (ret2 < 0) {
|
||||
kdebug("verify = %d", ret2);
|
||||
ret = ret2;
|
||||
goto out;
|
||||
}
|
||||
rx_pkt_offset = sp->offset;
|
||||
rx_pkt_len = sp->len;
|
||||
} else {
|
||||
trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq,
|
||||
rx_pkt_offset, rx_pkt_len, 0);
|
||||
@ -494,36 +387,36 @@ try_again:
|
||||
msg->msg_namelen = len;
|
||||
}
|
||||
|
||||
switch (READ_ONCE(call->state)) {
|
||||
case RXRPC_CALL_CLIENT_RECV_REPLY:
|
||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||
ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
|
||||
flags, &copied);
|
||||
if (ret == -EAGAIN)
|
||||
ret = 0;
|
||||
|
||||
if (!skb_queue_empty(&call->recvmsg_queue))
|
||||
rxrpc_notify_socket(call);
|
||||
break;
|
||||
default:
|
||||
ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
|
||||
flags, &copied);
|
||||
if (ret == -EAGAIN)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret == -EIO)
|
||||
goto call_failed;
|
||||
if (ret < 0)
|
||||
goto error_unlock_call;
|
||||
|
||||
if (call->state == RXRPC_CALL_COMPLETE) {
|
||||
ret = rxrpc_recvmsg_term(call, msg);
|
||||
if (ret < 0)
|
||||
goto error_unlock_call;
|
||||
if (!(flags & MSG_PEEK))
|
||||
rxrpc_release_call(rx, call);
|
||||
msg->msg_flags |= MSG_EOR;
|
||||
ret = 1;
|
||||
}
|
||||
if (rxrpc_call_is_complete(call) &&
|
||||
skb_queue_empty(&call->recvmsg_queue))
|
||||
goto call_complete;
|
||||
if (rxrpc_call_has_failed(call))
|
||||
goto call_failed;
|
||||
|
||||
rxrpc_notify_socket(call);
|
||||
goto not_yet_complete;
|
||||
|
||||
call_failed:
|
||||
rxrpc_purge_queue(&call->recvmsg_queue);
|
||||
call_complete:
|
||||
ret = rxrpc_recvmsg_term(call, msg);
|
||||
if (ret < 0)
|
||||
goto error_unlock_call;
|
||||
if (!(flags & MSG_PEEK))
|
||||
rxrpc_release_call(rx, call);
|
||||
msg->msg_flags |= MSG_EOR;
|
||||
ret = 1;
|
||||
|
||||
not_yet_complete:
|
||||
if (ret == 0)
|
||||
msg->msg_flags |= MSG_MORE;
|
||||
else
|
||||
@ -586,49 +479,34 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
|
||||
size_t offset = 0;
|
||||
int ret;
|
||||
|
||||
_enter("{%d,%s},%zu,%d",
|
||||
call->debug_id, rxrpc_call_states[call->state],
|
||||
*_len, want_more);
|
||||
|
||||
ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING);
|
||||
_enter("{%d},%zu,%d", call->debug_id, *_len, want_more);
|
||||
|
||||
mutex_lock(&call->user_mutex);
|
||||
|
||||
switch (READ_ONCE(call->state)) {
|
||||
case RXRPC_CALL_CLIENT_RECV_REPLY:
|
||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||
ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
|
||||
*_len, 0, &offset);
|
||||
*_len -= offset;
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* We can only reach here with a partially full buffer if we
|
||||
* have reached the end of the data. We must otherwise have a
|
||||
* full buffer or have been given -EAGAIN.
|
||||
*/
|
||||
if (ret == 1) {
|
||||
if (iov_iter_count(iter) > 0)
|
||||
goto short_data;
|
||||
if (!want_more)
|
||||
goto read_phase_complete;
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!want_more)
|
||||
goto excess_data;
|
||||
ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset);
|
||||
*_len -= offset;
|
||||
if (ret == -EIO)
|
||||
goto call_failed;
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
case RXRPC_CALL_COMPLETE:
|
||||
goto call_complete;
|
||||
|
||||
default:
|
||||
ret = -EINPROGRESS;
|
||||
/* We can only reach here with a partially full buffer if we have
|
||||
* reached the end of the data. We must otherwise have a full buffer
|
||||
* or have been given -EAGAIN.
|
||||
*/
|
||||
if (ret == 1) {
|
||||
if (iov_iter_count(iter) > 0)
|
||||
goto short_data;
|
||||
if (!want_more)
|
||||
goto read_phase_complete;
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!want_more)
|
||||
goto excess_data;
|
||||
goto out;
|
||||
|
||||
read_phase_complete:
|
||||
ret = 1;
|
||||
out:
|
||||
@ -639,14 +517,18 @@ out:
|
||||
return ret;
|
||||
|
||||
short_data:
|
||||
trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
|
||||
trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_short_data,
|
||||
call->cid, call->call_id, call->rx_consumed,
|
||||
0, -EBADMSG);
|
||||
ret = -EBADMSG;
|
||||
goto out;
|
||||
excess_data:
|
||||
trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
|
||||
trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_excess_data,
|
||||
call->cid, call->call_id, call->rx_consumed,
|
||||
0, -EMSGSIZE);
|
||||
ret = -EMSGSIZE;
|
||||
goto out;
|
||||
call_complete:
|
||||
call_failed:
|
||||
*_abort = call->abort_code;
|
||||
ret = call->error;
|
||||
if (call->completion == RXRPC_CALL_SUCCEEDED) {
|
||||
|
@ -411,18 +411,15 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
struct rxrpc_crypt iv;
|
||||
struct scatterlist sg[16];
|
||||
bool aborted;
|
||||
u32 data_size, buf;
|
||||
u16 check;
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
if (sp->len < 8) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H",
|
||||
RXKADSEALEDINCON);
|
||||
goto protocol_error;
|
||||
}
|
||||
if (sp->len < 8)
|
||||
return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
|
||||
rxkad_abort_1_short_header);
|
||||
|
||||
/* Decrypt the skbuff in-place. TODO: We really want to decrypt
|
||||
* directly into the target buffer.
|
||||
@ -442,11 +439,9 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
skcipher_request_zero(req);
|
||||
|
||||
/* Extract the decrypted packet length */
|
||||
if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1",
|
||||
RXKADDATALEN);
|
||||
goto protocol_error;
|
||||
}
|
||||
if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0)
|
||||
return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
|
||||
rxkad_abort_1_short_encdata);
|
||||
sp->offset += sizeof(sechdr);
|
||||
sp->len -= sizeof(sechdr);
|
||||
|
||||
@ -456,26 +451,16 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
check = buf >> 16;
|
||||
check ^= seq ^ call->call_id;
|
||||
check &= 0xffff;
|
||||
if (check != 0) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C",
|
||||
RXKADSEALEDINCON);
|
||||
goto protocol_error;
|
||||
}
|
||||
|
||||
if (data_size > sp->len) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L",
|
||||
RXKADDATALEN);
|
||||
goto protocol_error;
|
||||
}
|
||||
if (check != 0)
|
||||
return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
|
||||
rxkad_abort_1_short_check);
|
||||
if (data_size > sp->len)
|
||||
return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
|
||||
rxkad_abort_1_short_data);
|
||||
sp->len = data_size;
|
||||
|
||||
_leave(" = 0 [dlen=%x]", data_size);
|
||||
return 0;
|
||||
|
||||
protocol_error:
|
||||
if (aborted)
|
||||
rxrpc_send_abort_packet(call);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -490,18 +475,15 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
struct rxrpc_crypt iv;
|
||||
struct scatterlist _sg[4], *sg;
|
||||
bool aborted;
|
||||
u32 data_size, buf;
|
||||
u16 check;
|
||||
int nsg, ret;
|
||||
|
||||
_enter(",{%d}", sp->len);
|
||||
|
||||
if (sp->len < 8) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H",
|
||||
RXKADSEALEDINCON);
|
||||
goto protocol_error;
|
||||
}
|
||||
if (sp->len < 8)
|
||||
return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
|
||||
rxkad_abort_2_short_header);
|
||||
|
||||
/* Decrypt the skbuff in-place. TODO: We really want to decrypt
|
||||
* directly into the target buffer.
|
||||
@ -513,7 +495,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
} else {
|
||||
sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
|
||||
if (!sg)
|
||||
goto nomem;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_init_table(sg, nsg);
|
||||
@ -537,11 +519,9 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
kfree(sg);
|
||||
|
||||
/* Extract the decrypted packet length */
|
||||
if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2",
|
||||
RXKADDATALEN);
|
||||
goto protocol_error;
|
||||
}
|
||||
if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0)
|
||||
return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
|
||||
rxkad_abort_2_short_len);
|
||||
sp->offset += sizeof(sechdr);
|
||||
sp->len -= sizeof(sechdr);
|
||||
|
||||
@ -551,30 +531,17 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
check = buf >> 16;
|
||||
check ^= seq ^ call->call_id;
|
||||
check &= 0xffff;
|
||||
if (check != 0) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C",
|
||||
RXKADSEALEDINCON);
|
||||
goto protocol_error;
|
||||
}
|
||||
if (check != 0)
|
||||
return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
|
||||
rxkad_abort_2_short_check);
|
||||
|
||||
if (data_size > sp->len) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L",
|
||||
RXKADDATALEN);
|
||||
goto protocol_error;
|
||||
}
|
||||
if (data_size > sp->len)
|
||||
return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
|
||||
rxkad_abort_2_short_data);
|
||||
|
||||
sp->len = data_size;
|
||||
_leave(" = 0 [dlen=%x]", data_size);
|
||||
return 0;
|
||||
|
||||
protocol_error:
|
||||
if (aborted)
|
||||
rxrpc_send_abort_packet(call);
|
||||
return -EPROTO;
|
||||
|
||||
nomem:
|
||||
_leave(" = -ENOMEM");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -590,7 +557,6 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
__be32 buf[2];
|
||||
} crypto __aligned(8);
|
||||
rxrpc_seq_t seq = sp->hdr.seq;
|
||||
bool aborted;
|
||||
int ret;
|
||||
u16 cksum;
|
||||
u32 x, y;
|
||||
@ -627,9 +593,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
cksum = 1; /* zero checksums are not permitted */
|
||||
|
||||
if (cksum != sp->hdr.cksum) {
|
||||
aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK",
|
||||
RXKADSEALEDINCON);
|
||||
goto protocol_error;
|
||||
ret = rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
|
||||
rxkad_abort_bad_checksum);
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (call->conn->security_level) {
|
||||
@ -647,13 +613,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
skcipher_request_free(req);
|
||||
return ret;
|
||||
|
||||
protocol_error:
|
||||
if (aborted)
|
||||
rxrpc_send_abort_packet(call);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -821,34 +783,30 @@ static int rxkad_encrypt_response(struct rxrpc_connection *conn,
|
||||
* respond to a challenge packet
|
||||
*/
|
||||
static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
u32 *_abort_code)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const struct rxrpc_key_token *token;
|
||||
struct rxkad_challenge challenge;
|
||||
struct rxkad_response *resp;
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
const char *eproto;
|
||||
u32 version, nonce, min_level, abort_code;
|
||||
int ret;
|
||||
u32 version, nonce, min_level;
|
||||
int ret = -EPROTO;
|
||||
|
||||
_enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
|
||||
|
||||
eproto = tracepoint_string("chall_no_key");
|
||||
abort_code = RX_PROTOCOL_ERROR;
|
||||
if (!conn->key)
|
||||
goto protocol_error;
|
||||
return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
|
||||
rxkad_abort_chall_no_key);
|
||||
|
||||
abort_code = RXKADEXPIRED;
|
||||
ret = key_validate(conn->key);
|
||||
if (ret < 0)
|
||||
goto other_error;
|
||||
return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret,
|
||||
rxkad_abort_chall_key_expired);
|
||||
|
||||
eproto = tracepoint_string("chall_short");
|
||||
abort_code = RXKADPACKETSHORT;
|
||||
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
|
||||
&challenge, sizeof(challenge)) < 0)
|
||||
goto protocol_error;
|
||||
return rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
|
||||
rxkad_abort_chall_short);
|
||||
|
||||
version = ntohl(challenge.version);
|
||||
nonce = ntohl(challenge.nonce);
|
||||
@ -856,15 +814,13 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
|
||||
|
||||
trace_rxrpc_rx_challenge(conn, sp->hdr.serial, version, nonce, min_level);
|
||||
|
||||
eproto = tracepoint_string("chall_ver");
|
||||
abort_code = RXKADINCONSISTENCY;
|
||||
if (version != RXKAD_VERSION)
|
||||
goto protocol_error;
|
||||
return rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
|
||||
rxkad_abort_chall_version);
|
||||
|
||||
abort_code = RXKADLEVELFAIL;
|
||||
ret = -EACCES;
|
||||
if (conn->security_level < min_level)
|
||||
goto other_error;
|
||||
return rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EACCES,
|
||||
rxkad_abort_chall_level);
|
||||
|
||||
token = conn->key->payload.data[0];
|
||||
|
||||
@ -893,13 +849,6 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
|
||||
ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad);
|
||||
kfree(resp);
|
||||
return ret;
|
||||
|
||||
protocol_error:
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
|
||||
ret = -EPROTO;
|
||||
other_error:
|
||||
*_abort_code = abort_code;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -910,20 +859,15 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
void *ticket, size_t ticket_len,
|
||||
struct rxrpc_crypt *_session_key,
|
||||
time64_t *_expiry,
|
||||
u32 *_abort_code)
|
||||
time64_t *_expiry)
|
||||
{
|
||||
struct skcipher_request *req;
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
struct rxrpc_crypt iv, key;
|
||||
struct scatterlist sg[1];
|
||||
struct in_addr addr;
|
||||
unsigned int life;
|
||||
const char *eproto;
|
||||
time64_t issue, now;
|
||||
bool little_endian;
|
||||
int ret;
|
||||
u32 abort_code;
|
||||
u8 *p, *q, *name, *end;
|
||||
|
||||
_enter("{%d},{%x}", conn->debug_id, key_serial(server_key));
|
||||
@ -935,10 +879,9 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
|
||||
|
||||
memcpy(&iv, &server_key->payload.data[2], sizeof(iv));
|
||||
|
||||
ret = -ENOMEM;
|
||||
req = skcipher_request_alloc(server_key->payload.data[0], GFP_NOFS);
|
||||
if (!req)
|
||||
goto temporary_error;
|
||||
return -ENOMEM;
|
||||
|
||||
sg_init_one(&sg[0], ticket, ticket_len);
|
||||
skcipher_request_set_callback(req, 0, NULL, NULL);
|
||||
@ -949,18 +892,21 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
|
||||
p = ticket;
|
||||
end = p + ticket_len;
|
||||
|
||||
#define Z(field) \
|
||||
({ \
|
||||
u8 *__str = p; \
|
||||
eproto = tracepoint_string("rxkad_bad_"#field); \
|
||||
q = memchr(p, 0, end - p); \
|
||||
if (!q || q - p > (field##_SZ)) \
|
||||
goto bad_ticket; \
|
||||
for (; p < q; p++) \
|
||||
if (!isprint(*p)) \
|
||||
goto bad_ticket; \
|
||||
p++; \
|
||||
__str; \
|
||||
#define Z(field, fieldl) \
|
||||
({ \
|
||||
u8 *__str = p; \
|
||||
q = memchr(p, 0, end - p); \
|
||||
if (!q || q - p > field##_SZ) \
|
||||
return rxrpc_abort_conn( \
|
||||
conn, skb, RXKADBADTICKET, -EPROTO, \
|
||||
rxkad_abort_resp_tkt_##fieldl); \
|
||||
for (; p < q; p++) \
|
||||
if (!isprint(*p)) \
|
||||
return rxrpc_abort_conn( \
|
||||
conn, skb, RXKADBADTICKET, -EPROTO, \
|
||||
rxkad_abort_resp_tkt_##fieldl); \
|
||||
p++; \
|
||||
__str; \
|
||||
})
|
||||
|
||||
/* extract the ticket flags */
|
||||
@ -969,20 +915,20 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
|
||||
p++;
|
||||
|
||||
/* extract the authentication name */
|
||||
name = Z(ANAME);
|
||||
name = Z(ANAME, aname);
|
||||
_debug("KIV ANAME: %s", name);
|
||||
|
||||
/* extract the principal's instance */
|
||||
name = Z(INST);
|
||||
name = Z(INST, inst);
|
||||
_debug("KIV INST : %s", name);
|
||||
|
||||
/* extract the principal's authentication domain */
|
||||
name = Z(REALM);
|
||||
name = Z(REALM, realm);
|
||||
_debug("KIV REALM: %s", name);
|
||||
|
||||
eproto = tracepoint_string("rxkad_bad_len");
|
||||
if (end - p < 4 + 8 + 4 + 2)
|
||||
goto bad_ticket;
|
||||
return rxrpc_abort_conn(conn, skb, RXKADBADTICKET, -EPROTO,
|
||||
rxkad_abort_resp_tkt_short);
|
||||
|
||||
/* get the IPv4 address of the entity that requested the ticket */
|
||||
memcpy(&addr, p, sizeof(addr));
|
||||
@ -1014,38 +960,23 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
|
||||
_debug("KIV ISSUE: %llx [%llx]", issue, now);
|
||||
|
||||
/* check the ticket is in date */
|
||||
if (issue > now) {
|
||||
abort_code = RXKADNOAUTH;
|
||||
ret = -EKEYREJECTED;
|
||||
goto other_error;
|
||||
}
|
||||
|
||||
if (issue < now - life) {
|
||||
abort_code = RXKADEXPIRED;
|
||||
ret = -EKEYEXPIRED;
|
||||
goto other_error;
|
||||
}
|
||||
if (issue > now)
|
||||
return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, -EKEYREJECTED,
|
||||
rxkad_abort_resp_tkt_future);
|
||||
if (issue < now - life)
|
||||
return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, -EKEYEXPIRED,
|
||||
rxkad_abort_resp_tkt_expired);
|
||||
|
||||
*_expiry = issue + life;
|
||||
|
||||
/* get the service name */
|
||||
name = Z(SNAME);
|
||||
name = Z(SNAME, sname);
|
||||
_debug("KIV SNAME: %s", name);
|
||||
|
||||
/* get the service instance name */
|
||||
name = Z(INST);
|
||||
name = Z(INST, sinst);
|
||||
_debug("KIV SINST: %s", name);
|
||||
return 0;
|
||||
|
||||
bad_ticket:
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
|
||||
abort_code = RXKADBADTICKET;
|
||||
ret = -EPROTO;
|
||||
other_error:
|
||||
*_abort_code = abort_code;
|
||||
return ret;
|
||||
temporary_error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1086,17 +1017,15 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
|
||||
* verify a response
|
||||
*/
|
||||
static int rxkad_verify_response(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
u32 *_abort_code)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct rxkad_response *response;
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
struct rxrpc_crypt session_key;
|
||||
struct key *server_key;
|
||||
const char *eproto;
|
||||
time64_t expiry;
|
||||
void *ticket;
|
||||
u32 abort_code, version, kvno, ticket_len, level;
|
||||
u32 version, kvno, ticket_len, level;
|
||||
__be32 csum;
|
||||
int ret, i;
|
||||
|
||||
@ -1104,22 +1033,18 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
||||
|
||||
server_key = rxrpc_look_up_server_security(conn, skb, 0, 0);
|
||||
if (IS_ERR(server_key)) {
|
||||
switch (PTR_ERR(server_key)) {
|
||||
ret = PTR_ERR(server_key);
|
||||
switch (ret) {
|
||||
case -ENOKEY:
|
||||
abort_code = RXKADUNKNOWNKEY;
|
||||
break;
|
||||
return rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, ret,
|
||||
rxkad_abort_resp_nokey);
|
||||
case -EKEYEXPIRED:
|
||||
abort_code = RXKADEXPIRED;
|
||||
break;
|
||||
return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret,
|
||||
rxkad_abort_resp_key_expired);
|
||||
default:
|
||||
abort_code = RXKADNOAUTH;
|
||||
break;
|
||||
return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, ret,
|
||||
rxkad_abort_resp_key_rejected);
|
||||
}
|
||||
trace_rxrpc_abort(0, "SVK",
|
||||
sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
abort_code, PTR_ERR(server_key));
|
||||
*_abort_code = abort_code;
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
@ -1127,11 +1052,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
||||
if (!response)
|
||||
goto temporary_error;
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_short");
|
||||
abort_code = RXKADPACKETSHORT;
|
||||
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
|
||||
response, sizeof(*response)) < 0)
|
||||
response, sizeof(*response)) < 0) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
|
||||
rxkad_abort_resp_short);
|
||||
goto protocol_error;
|
||||
}
|
||||
|
||||
version = ntohl(response->version);
|
||||
ticket_len = ntohl(response->ticket_len);
|
||||
@ -1139,20 +1065,23 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
||||
|
||||
trace_rxrpc_rx_response(conn, sp->hdr.serial, version, kvno, ticket_len);
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_ver");
|
||||
abort_code = RXKADINCONSISTENCY;
|
||||
if (version != RXKAD_VERSION)
|
||||
if (version != RXKAD_VERSION) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
|
||||
rxkad_abort_resp_version);
|
||||
goto protocol_error;
|
||||
}
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_tktlen");
|
||||
abort_code = RXKADTICKETLEN;
|
||||
if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
|
||||
if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO,
|
||||
rxkad_abort_resp_tkt_len);
|
||||
goto protocol_error;
|
||||
}
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_unkkey");
|
||||
abort_code = RXKADUNKNOWNKEY;
|
||||
if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
|
||||
if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO,
|
||||
rxkad_abort_resp_unknown_tkt);
|
||||
goto protocol_error;
|
||||
}
|
||||
|
||||
/* extract the kerberos ticket and decrypt and decode it */
|
||||
ret = -ENOMEM;
|
||||
@ -1160,15 +1089,15 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
||||
if (!ticket)
|
||||
goto temporary_error_free_resp;
|
||||
|
||||
eproto = tracepoint_string("rxkad_tkt_short");
|
||||
abort_code = RXKADPACKETSHORT;
|
||||
ret = skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
|
||||
ticket, ticket_len);
|
||||
if (ret < 0)
|
||||
goto temporary_error_free_ticket;
|
||||
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
|
||||
ticket, ticket_len) < 0) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
|
||||
rxkad_abort_resp_short_tkt);
|
||||
goto protocol_error;
|
||||
}
|
||||
|
||||
ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len,
|
||||
&session_key, &expiry, _abort_code);
|
||||
&session_key, &expiry);
|
||||
if (ret < 0)
|
||||
goto temporary_error_free_ticket;
|
||||
|
||||
@ -1176,56 +1105,61 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
||||
* response */
|
||||
rxkad_decrypt_response(conn, response, &session_key);
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_param");
|
||||
abort_code = RXKADSEALEDINCON;
|
||||
if (ntohl(response->encrypted.epoch) != conn->proto.epoch)
|
||||
goto protocol_error_free;
|
||||
if (ntohl(response->encrypted.cid) != conn->proto.cid)
|
||||
goto protocol_error_free;
|
||||
if (ntohl(response->encrypted.securityIndex) != conn->security_ix)
|
||||
if (ntohl(response->encrypted.epoch) != conn->proto.epoch ||
|
||||
ntohl(response->encrypted.cid) != conn->proto.cid ||
|
||||
ntohl(response->encrypted.securityIndex) != conn->security_ix) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
|
||||
rxkad_abort_resp_bad_param);
|
||||
goto protocol_error_free;
|
||||
}
|
||||
|
||||
csum = response->encrypted.checksum;
|
||||
response->encrypted.checksum = 0;
|
||||
rxkad_calc_response_checksum(response);
|
||||
eproto = tracepoint_string("rxkad_rsp_csum");
|
||||
if (response->encrypted.checksum != csum)
|
||||
if (response->encrypted.checksum != csum) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
|
||||
rxkad_abort_resp_bad_checksum);
|
||||
goto protocol_error_free;
|
||||
}
|
||||
|
||||
spin_lock(&conn->bundle->channel_lock);
|
||||
for (i = 0; i < RXRPC_MAXCALLS; i++) {
|
||||
struct rxrpc_call *call;
|
||||
u32 call_id = ntohl(response->encrypted.call_id[i]);
|
||||
u32 counter = READ_ONCE(conn->channels[i].call_counter);
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_callid");
|
||||
if (call_id > INT_MAX)
|
||||
goto protocol_error_unlock;
|
||||
if (call_id > INT_MAX) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
|
||||
rxkad_abort_resp_bad_callid);
|
||||
goto protocol_error_free;
|
||||
}
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_callctr");
|
||||
if (call_id < conn->channels[i].call_counter)
|
||||
goto protocol_error_unlock;
|
||||
if (call_id < counter) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
|
||||
rxkad_abort_resp_call_ctr);
|
||||
goto protocol_error_free;
|
||||
}
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_callst");
|
||||
if (call_id > conn->channels[i].call_counter) {
|
||||
call = rcu_dereference_protected(
|
||||
conn->channels[i].call,
|
||||
lockdep_is_held(&conn->bundle->channel_lock));
|
||||
if (call && call->state < RXRPC_CALL_COMPLETE)
|
||||
goto protocol_error_unlock;
|
||||
if (call_id > counter) {
|
||||
if (conn->channels[i].call) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
|
||||
rxkad_abort_resp_call_state);
|
||||
goto protocol_error_free;
|
||||
}
|
||||
conn->channels[i].call_counter = call_id;
|
||||
}
|
||||
}
|
||||
spin_unlock(&conn->bundle->channel_lock);
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_seq");
|
||||
abort_code = RXKADOUTOFSEQUENCE;
|
||||
if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1)
|
||||
if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO,
|
||||
rxkad_abort_resp_ooseq);
|
||||
goto protocol_error_free;
|
||||
}
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_level");
|
||||
abort_code = RXKADLEVELFAIL;
|
||||
level = ntohl(response->encrypted.level);
|
||||
if (level > RXRPC_SECURITY_ENCRYPT)
|
||||
if (level > RXRPC_SECURITY_ENCRYPT) {
|
||||
rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO,
|
||||
rxkad_abort_resp_level);
|
||||
goto protocol_error_free;
|
||||
}
|
||||
conn->security_level = level;
|
||||
|
||||
/* create a key to hold the security data and expiration time - after
|
||||
@ -1240,15 +1174,11 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
protocol_error_unlock:
|
||||
spin_unlock(&conn->bundle->channel_lock);
|
||||
protocol_error_free:
|
||||
kfree(ticket);
|
||||
protocol_error:
|
||||
kfree(response);
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
|
||||
key_put(server_key);
|
||||
*_abort_code = abort_code;
|
||||
return -EPROTO;
|
||||
|
||||
temporary_error_free_ticket:
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
|
||||
#include <trace/events/rxrpc.h>
|
||||
|
||||
MODULE_DESCRIPTION("rxperf test server (afs)");
|
||||
MODULE_AUTHOR("Red Hat, Inc.");
|
||||
@ -307,12 +309,14 @@ static void rxperf_deliver_to_call(struct work_struct *work)
|
||||
case -EOPNOTSUPP:
|
||||
abort_code = RXGEN_OPCODE;
|
||||
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
|
||||
abort_code, ret, "GOP");
|
||||
abort_code, ret,
|
||||
rxperf_abort_op_not_supported);
|
||||
goto call_complete;
|
||||
case -ENOTSUPP:
|
||||
abort_code = RX_USER_ABORT;
|
||||
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
|
||||
abort_code, ret, "GUA");
|
||||
abort_code, ret,
|
||||
rxperf_abort_op_not_supported);
|
||||
goto call_complete;
|
||||
case -EIO:
|
||||
pr_err("Call %u in bad state %u\n",
|
||||
@ -324,11 +328,13 @@ static void rxperf_deliver_to_call(struct work_struct *work)
|
||||
case -ENOMEM:
|
||||
case -EFAULT:
|
||||
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
|
||||
RXGEN_SS_UNMARSHAL, ret, "GUM");
|
||||
RXGEN_SS_UNMARSHAL, ret,
|
||||
rxperf_abort_unmarshal_error);
|
||||
goto call_complete;
|
||||
default:
|
||||
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
|
||||
RX_CALL_DEAD, ret, "GER");
|
||||
RX_CALL_DEAD, ret,
|
||||
rxperf_abort_general_error);
|
||||
goto call_complete;
|
||||
}
|
||||
}
|
||||
@ -523,7 +529,8 @@ static int rxperf_process_call(struct rxperf_call *call)
|
||||
|
||||
if (n == -ENOMEM)
|
||||
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
|
||||
RXGEN_SS_MARSHAL, -ENOMEM, "GOM");
|
||||
RXGEN_SS_MARSHAL, -ENOMEM,
|
||||
rxperf_abort_oom);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -97,38 +97,31 @@ found:
|
||||
*/
|
||||
int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
|
||||
{
|
||||
const struct rxrpc_security *sec;
|
||||
struct rxrpc_key_token *token;
|
||||
struct key *key = conn->key;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
_enter("{%d},{%x}", conn->debug_id, key_serial(key));
|
||||
|
||||
if (!key)
|
||||
return 0;
|
||||
|
||||
ret = key_validate(key);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (token = key->payload.data[0]; token; token = token->next) {
|
||||
sec = rxrpc_security_lookup(token->security_index);
|
||||
if (sec)
|
||||
if (token->security_index == conn->security->security_index)
|
||||
goto found;
|
||||
}
|
||||
return -EKEYREJECTED;
|
||||
|
||||
found:
|
||||
conn->security = sec;
|
||||
|
||||
ret = conn->security->init_connection_security(conn, token);
|
||||
if (ret < 0) {
|
||||
conn->security = &rxrpc_no_security;
|
||||
return ret;
|
||||
mutex_lock(&conn->security_lock);
|
||||
if (conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
|
||||
ret = conn->security->init_connection_security(conn, token);
|
||||
if (ret == 0) {
|
||||
spin_lock(&conn->state_lock);
|
||||
if (conn->state == RXRPC_CONN_CLIENT_UNSECURED)
|
||||
conn->state = RXRPC_CONN_CLIENT;
|
||||
spin_unlock(&conn->state_lock);
|
||||
}
|
||||
}
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
mutex_unlock(&conn->security_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -144,21 +137,15 @@ const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *rx,
|
||||
|
||||
sec = rxrpc_security_lookup(sp->hdr.securityIndex);
|
||||
if (!sec) {
|
||||
trace_rxrpc_abort(0, "SVS",
|
||||
sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
RX_INVALID_OPERATION, EKEYREJECTED);
|
||||
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
||||
skb->priority = RX_INVALID_OPERATION;
|
||||
rxrpc_direct_abort(skb, rxrpc_abort_unsupported_security,
|
||||
RX_INVALID_OPERATION, -EKEYREJECTED);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (sp->hdr.securityIndex != RXRPC_SECURITY_NONE &&
|
||||
!rx->securities) {
|
||||
trace_rxrpc_abort(0, "SVR",
|
||||
sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
RX_INVALID_OPERATION, EKEYREJECTED);
|
||||
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
||||
skb->priority = sec->no_key_abort;
|
||||
rxrpc_direct_abort(skb, rxrpc_abort_no_service_key,
|
||||
sec->no_key_abort, -EKEYREJECTED);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -191,9 +178,9 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
|
||||
sprintf(kdesc, "%u:%u",
|
||||
sp->hdr.serviceId, sp->hdr.securityIndex);
|
||||
|
||||
rcu_read_lock();
|
||||
read_lock(&conn->local->services_lock);
|
||||
|
||||
rx = rcu_dereference(conn->local->service);
|
||||
rx = conn->local->service;
|
||||
if (!rx)
|
||||
goto out;
|
||||
|
||||
@ -215,6 +202,6 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
|
||||
}
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
read_unlock(&conn->local->services_lock);
|
||||
return key;
|
||||
}
|
||||
|
@ -17,6 +17,81 @@
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* Propose an abort to be made in the I/O thread.
|
||||
*/
|
||||
bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
|
||||
enum rxrpc_abort_reason why)
|
||||
{
|
||||
_enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
|
||||
|
||||
if (!call->send_abort && !rxrpc_call_is_complete(call)) {
|
||||
call->send_abort_why = why;
|
||||
call->send_abort_err = error;
|
||||
call->send_abort_seq = 0;
|
||||
/* Request abort locklessly vs rxrpc_input_call_event(). */
|
||||
smp_store_release(&call->send_abort, abort_code);
|
||||
rxrpc_poke_call(call, rxrpc_call_poke_abort);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for a call to become connected. Interruption here doesn't cause the
|
||||
* call to be aborted.
|
||||
*/
|
||||
static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
|
||||
{
|
||||
DECLARE_WAITQUEUE(myself, current);
|
||||
int ret = 0;
|
||||
|
||||
_enter("%d", call->debug_id);
|
||||
|
||||
if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
|
||||
return call->error;
|
||||
|
||||
add_wait_queue_exclusive(&call->waitq, &myself);
|
||||
|
||||
for (;;) {
|
||||
ret = call->error;
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
switch (call->interruptibility) {
|
||||
case RXRPC_INTERRUPTIBLE:
|
||||
case RXRPC_PREINTERRUPTIBLE:
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
break;
|
||||
case RXRPC_UNINTERRUPTIBLE:
|
||||
default:
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
break;
|
||||
}
|
||||
if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) {
|
||||
ret = call->error;
|
||||
break;
|
||||
}
|
||||
if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
|
||||
call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
|
||||
signal_pending(current)) {
|
||||
ret = sock_intr_errno(*timeo);
|
||||
break;
|
||||
}
|
||||
*timeo = schedule_timeout(*timeo);
|
||||
}
|
||||
|
||||
remove_wait_queue(&call->waitq, &myself);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
if (ret == 0 && rxrpc_call_is_complete(call))
|
||||
ret = call->error;
|
||||
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if there's sufficient Tx queue space.
|
||||
*/
|
||||
@ -39,7 +114,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
|
||||
if (rxrpc_check_tx_space(call, NULL))
|
||||
return 0;
|
||||
|
||||
if (call->state >= RXRPC_CALL_COMPLETE)
|
||||
if (rxrpc_call_is_complete(call))
|
||||
return call->error;
|
||||
|
||||
if (signal_pending(current))
|
||||
@ -74,7 +149,7 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
|
||||
if (rxrpc_check_tx_space(call, &tx_win))
|
||||
return 0;
|
||||
|
||||
if (call->state >= RXRPC_CALL_COMPLETE)
|
||||
if (rxrpc_call_is_complete(call))
|
||||
return call->error;
|
||||
|
||||
if (timeout == 0 &&
|
||||
@ -103,7 +178,7 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
|
||||
if (rxrpc_check_tx_space(call, NULL))
|
||||
return 0;
|
||||
|
||||
if (call->state >= RXRPC_CALL_COMPLETE)
|
||||
if (rxrpc_call_is_complete(call))
|
||||
return call->error;
|
||||
|
||||
trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
|
||||
@ -168,7 +243,6 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
||||
struct rxrpc_txbuf *txb,
|
||||
rxrpc_notify_end_tx_t notify_end_tx)
|
||||
{
|
||||
unsigned long now;
|
||||
rxrpc_seq_t seq = txb->seq;
|
||||
bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
|
||||
|
||||
@ -191,36 +265,10 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
||||
poke = list_empty(&call->tx_sendmsg);
|
||||
list_add_tail(&txb->call_link, &call->tx_sendmsg);
|
||||
call->tx_prepared = seq;
|
||||
if (last)
|
||||
rxrpc_notify_end_tx(rx, call, notify_end_tx);
|
||||
spin_unlock(&call->tx_lock);
|
||||
|
||||
if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
|
||||
_debug("________awaiting reply/ACK__________");
|
||||
write_lock(&call->state_lock);
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
|
||||
rxrpc_notify_end_tx(rx, call, notify_end_tx);
|
||||
break;
|
||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
|
||||
now = jiffies;
|
||||
WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
|
||||
if (call->ackr_reason == RXRPC_ACK_DELAY)
|
||||
call->ackr_reason = 0;
|
||||
trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
|
||||
if (!last)
|
||||
break;
|
||||
fallthrough;
|
||||
case RXRPC_CALL_SERVER_SEND_REPLY:
|
||||
call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
|
||||
rxrpc_notify_end_tx(rx, call, notify_end_tx);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
write_unlock(&call->state_lock);
|
||||
}
|
||||
|
||||
if (poke)
|
||||
rxrpc_poke_call(call, rxrpc_call_poke_start);
|
||||
}
|
||||
@ -245,6 +293,16 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
|
||||
|
||||
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
|
||||
ret = rxrpc_wait_to_be_connected(call, &timeo);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
|
||||
ret = rxrpc_init_client_conn_security(call->conn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* this should be in poll */
|
||||
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
||||
|
||||
@ -252,15 +310,20 @@ reload:
|
||||
ret = -EPIPE;
|
||||
if (sk->sk_shutdown & SEND_SHUTDOWN)
|
||||
goto maybe_error;
|
||||
state = READ_ONCE(call->state);
|
||||
state = rxrpc_call_state(call);
|
||||
ret = -ESHUTDOWN;
|
||||
if (state >= RXRPC_CALL_COMPLETE)
|
||||
goto maybe_error;
|
||||
ret = -EPROTO;
|
||||
if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
|
||||
state != RXRPC_CALL_SERVER_ACK_REQUEST &&
|
||||
state != RXRPC_CALL_SERVER_SEND_REPLY)
|
||||
state != RXRPC_CALL_SERVER_SEND_REPLY) {
|
||||
/* Request phase complete for this client call */
|
||||
trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
|
||||
call->cid, call->call_id, call->rx_consumed,
|
||||
0, -EPROTO);
|
||||
goto maybe_error;
|
||||
}
|
||||
|
||||
ret = -EMSGSIZE;
|
||||
if (call->tx_total_len != -1) {
|
||||
@ -329,7 +392,7 @@ reload:
|
||||
|
||||
/* check for the far side aborting the call or a network error
|
||||
* occurring */
|
||||
if (call->state == RXRPC_CALL_COMPLETE)
|
||||
if (rxrpc_call_is_complete(call))
|
||||
goto call_terminated;
|
||||
|
||||
/* add the packet to the send queue if it's now full */
|
||||
@ -354,12 +417,9 @@ reload:
|
||||
|
||||
success:
|
||||
ret = copied;
|
||||
if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) {
|
||||
read_lock(&call->state_lock);
|
||||
if (call->error < 0)
|
||||
ret = call->error;
|
||||
read_unlock(&call->state_lock);
|
||||
}
|
||||
if (rxrpc_call_is_complete(call) &&
|
||||
call->error < 0)
|
||||
ret = call->error;
|
||||
out:
|
||||
call->tx_pending = txb;
|
||||
_leave(" = %d", ret);
|
||||
@ -543,7 +603,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
|
||||
atomic_inc_return(&rxrpc_debug_id));
|
||||
/* The socket is now unlocked */
|
||||
|
||||
rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
|
||||
_leave(" = %p\n", call);
|
||||
return call;
|
||||
}
|
||||
@ -556,7 +615,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
|
||||
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||
__releases(&rx->sk.sk_lock.slock)
|
||||
{
|
||||
enum rxrpc_call_state state;
|
||||
struct rxrpc_call *call;
|
||||
unsigned long now, j;
|
||||
bool dropped_lock = false;
|
||||
@ -598,10 +656,10 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||
return PTR_ERR(call);
|
||||
/* ... and we have the call lock. */
|
||||
ret = 0;
|
||||
if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
|
||||
if (rxrpc_call_is_complete(call))
|
||||
goto out_put_unlock;
|
||||
} else {
|
||||
switch (READ_ONCE(call->state)) {
|
||||
switch (rxrpc_call_state(call)) {
|
||||
case RXRPC_CALL_UNINITIALISED:
|
||||
case RXRPC_CALL_CLIENT_AWAIT_CONN:
|
||||
case RXRPC_CALL_SERVER_PREALLOC:
|
||||
@ -655,17 +713,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||
break;
|
||||
}
|
||||
|
||||
state = READ_ONCE(call->state);
|
||||
_debug("CALL %d USR %lx ST %d on CONN %p",
|
||||
call->debug_id, call->user_call_ID, state, call->conn);
|
||||
|
||||
if (state >= RXRPC_CALL_COMPLETE) {
|
||||
if (rxrpc_call_is_complete(call)) {
|
||||
/* it's too late for this call */
|
||||
ret = -ESHUTDOWN;
|
||||
} else if (p.command == RXRPC_CMD_SEND_ABORT) {
|
||||
rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED,
|
||||
rxrpc_abort_call_sendmsg);
|
||||
ret = 0;
|
||||
if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
|
||||
ret = rxrpc_send_abort_packet(call);
|
||||
} else if (p.command != RXRPC_CMD_SEND_DATA) {
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
@ -705,34 +759,17 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
|
||||
bool dropped_lock = false;
|
||||
int ret;
|
||||
|
||||
_enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
|
||||
_enter("{%d},", call->debug_id);
|
||||
|
||||
ASSERTCMP(msg->msg_name, ==, NULL);
|
||||
ASSERTCMP(msg->msg_control, ==, NULL);
|
||||
|
||||
mutex_lock(&call->user_mutex);
|
||||
|
||||
_debug("CALL %d USR %lx ST %d on CONN %p",
|
||||
call->debug_id, call->user_call_ID, call->state, call->conn);
|
||||
|
||||
switch (READ_ONCE(call->state)) {
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||
case RXRPC_CALL_SERVER_SEND_REPLY:
|
||||
ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
|
||||
notify_end_tx, &dropped_lock);
|
||||
break;
|
||||
case RXRPC_CALL_COMPLETE:
|
||||
read_lock(&call->state_lock);
|
||||
ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
|
||||
notify_end_tx, &dropped_lock);
|
||||
if (ret == -ESHUTDOWN)
|
||||
ret = call->error;
|
||||
read_unlock(&call->state_lock);
|
||||
break;
|
||||
default:
|
||||
/* Request phase complete for this client call */
|
||||
trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send"));
|
||||
ret = -EPROTO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!dropped_lock)
|
||||
mutex_unlock(&call->user_mutex);
|
||||
@ -747,24 +784,20 @@ EXPORT_SYMBOL(rxrpc_kernel_send_data);
|
||||
* @call: The call to be aborted
|
||||
* @abort_code: The abort code to stick into the ABORT packet
|
||||
* @error: Local error value
|
||||
* @why: 3-char string indicating why.
|
||||
* @why: Indication as to why.
|
||||
*
|
||||
* Allow a kernel service to abort a call, if it's still in an abortable state
|
||||
* and return true if the call was aborted, false if it was already complete.
|
||||
*/
|
||||
bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
|
||||
u32 abort_code, int error, const char *why)
|
||||
u32 abort_code, int error, enum rxrpc_abort_reason why)
|
||||
{
|
||||
bool aborted;
|
||||
|
||||
_enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
|
||||
_enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
|
||||
|
||||
mutex_lock(&call->user_mutex);
|
||||
|
||||
aborted = rxrpc_abort_call(why, call, 0, abort_code, error);
|
||||
if (aborted)
|
||||
rxrpc_send_abort_packet(call);
|
||||
|
||||
aborted = rxrpc_propose_abort(call, abort_code, error, why);
|
||||
mutex_unlock(&call->user_mutex);
|
||||
return aborted;
|
||||
}
|
||||
|
@ -134,6 +134,11 @@ static int valid_label(const struct nlattr *attr,
|
||||
{
|
||||
const u32 *label = nla_data(attr);
|
||||
|
||||
if (nla_len(attr) != sizeof(*label)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
|
||||
return -EINVAL;
|
||||
@ -145,7 +150,8 @@ static int valid_label(const struct nlattr *attr,
|
||||
static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
|
||||
[TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
|
||||
[TCA_MPLS_PROTO] = { .type = NLA_U16 },
|
||||
[TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
|
||||
[TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
|
||||
valid_label),
|
||||
[TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7),
|
||||
[TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
|
||||
[TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1),
|
||||
|
@ -1133,6 +1133,11 @@ skip:
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (new && new->ops == &noqueue_qdisc_ops) {
|
||||
NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = cops->graft(parent, cl, new, &old, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1179,8 +1179,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
|
||||
bool addr_match = false;
|
||||
bool sign_match = false;
|
||||
bool link_up = false;
|
||||
bool link_is_reset = false;
|
||||
bool accept_addr = false;
|
||||
bool reset = true;
|
||||
bool reset = false;
|
||||
char *if_name;
|
||||
unsigned long intv;
|
||||
u16 session;
|
||||
@ -1200,14 +1201,14 @@ void tipc_node_check_dest(struct net *net, u32 addr,
|
||||
/* Prepare to validate requesting node's signature and media address */
|
||||
l = le->link;
|
||||
link_up = l && tipc_link_is_up(l);
|
||||
link_is_reset = l && tipc_link_is_reset(l);
|
||||
addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
|
||||
sign_match = (signature == n->signature);
|
||||
|
||||
/* These three flags give us eight permutations: */
|
||||
|
||||
if (sign_match && addr_match && link_up) {
|
||||
/* All is fine. Do nothing. */
|
||||
reset = false;
|
||||
/* All is fine. Ignore requests. */
|
||||
/* Peer node is not a container/local namespace */
|
||||
if (!n->peer_hash_mix)
|
||||
n->peer_hash_mix = hash_mixes;
|
||||
@ -1232,6 +1233,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
|
||||
*/
|
||||
accept_addr = true;
|
||||
*respond = true;
|
||||
reset = true;
|
||||
} else if (!sign_match && addr_match && link_up) {
|
||||
/* Peer node rebooted. Two possibilities:
|
||||
* - Delayed re-discovery; this link endpoint has already
|
||||
@ -1263,6 +1265,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
|
||||
n->signature = signature;
|
||||
accept_addr = true;
|
||||
*respond = true;
|
||||
reset = true;
|
||||
}
|
||||
|
||||
if (!accept_addr)
|
||||
@ -1291,6 +1294,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
|
||||
tipc_link_fsm_evt(l, LINK_RESET_EVT);
|
||||
if (n->state == NODE_FAILINGOVER)
|
||||
tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
|
||||
link_is_reset = tipc_link_is_reset(l);
|
||||
le->link = l;
|
||||
n->link_cnt++;
|
||||
tipc_node_calculate_timer(n, l);
|
||||
@ -1303,7 +1307,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
|
||||
memcpy(&le->maddr, maddr, sizeof(*maddr));
|
||||
exit:
|
||||
tipc_node_write_unlock(n);
|
||||
if (reset && l && !tipc_link_is_reset(l))
|
||||
if (reset && !link_is_reset)
|
||||
tipc_node_link_down(n, b->identity, false);
|
||||
tipc_node_put(n);
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ void producer(struct sockaddr_un *consumer_addr)
|
||||
|
||||
wait_for_signal(pipefd[0]);
|
||||
if (connect(cfd, (struct sockaddr *)consumer_addr,
|
||||
sizeof(struct sockaddr)) != 0) {
|
||||
sizeof(*consumer_addr)) != 0) {
|
||||
perror("Connect failed");
|
||||
kill(0, SIGTERM);
|
||||
exit(1);
|
||||
|
@ -12,19 +12,27 @@
|
||||
# In addition this script also checks if forcing a specific field in the
|
||||
# outer header is working.
|
||||
|
||||
# Return 4 by default (Kselftest SKIP code)
|
||||
ERR=4
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
echo "Please run as root."
|
||||
exit 0
|
||||
exit $ERR
|
||||
fi
|
||||
if ! which tcpdump > /dev/null 2>&1; then
|
||||
echo "No tcpdump found. Required for this test."
|
||||
exit 0
|
||||
exit $ERR
|
||||
fi
|
||||
|
||||
expected_tos="0x00"
|
||||
expected_ttl="0"
|
||||
failed=false
|
||||
|
||||
readonly NS0=$(mktemp -u ns0-XXXXXXXX)
|
||||
readonly NS1=$(mktemp -u ns1-XXXXXXXX)
|
||||
|
||||
RUN_NS0="ip netns exec ${NS0}"
|
||||
|
||||
get_random_tos() {
|
||||
# Get a random hex tos value between 0x00 and 0xfc, a multiple of 4
|
||||
echo "0x$(tr -dc '0-9a-f' < /dev/urandom | head -c 1)\
|
||||
@ -61,7 +69,6 @@ setup() {
|
||||
local vlan="$5"
|
||||
local test_tos="0x00"
|
||||
local test_ttl="0"
|
||||
local ns="ip netns exec testing"
|
||||
|
||||
# We don't want a test-tos of 0x00,
|
||||
# because this is the value that we get when no tos is set.
|
||||
@ -94,14 +101,15 @@ setup() {
|
||||
printf "│%7s │%6s │%6s │%13s │%13s │%6s │" \
|
||||
"$type" "$outer" "$inner" "$tos" "$ttl" "$vlan"
|
||||
|
||||
# Create 'testing' netns, veth pair and connect main ns with testing ns
|
||||
ip netns add testing
|
||||
ip link add type veth
|
||||
ip link set veth1 netns testing
|
||||
ip link set veth0 up
|
||||
$ns ip link set veth1 up
|
||||
ip addr flush dev veth0
|
||||
$ns ip addr flush dev veth1
|
||||
# Create netns NS0 and NS1 and connect them with a veth pair
|
||||
ip netns add "${NS0}"
|
||||
ip netns add "${NS1}"
|
||||
ip link add name veth0 netns "${NS0}" type veth \
|
||||
peer name veth1 netns "${NS1}"
|
||||
ip -netns "${NS0}" link set dev veth0 up
|
||||
ip -netns "${NS1}" link set dev veth1 up
|
||||
ip -netns "${NS0}" address flush dev veth0
|
||||
ip -netns "${NS1}" address flush dev veth1
|
||||
|
||||
local local_addr1=""
|
||||
local local_addr2=""
|
||||
@ -127,51 +135,59 @@ setup() {
|
||||
if [ "$type" = "gre" ]; then
|
||||
type="gretap"
|
||||
fi
|
||||
ip addr add 198.18.0.1/24 dev veth0
|
||||
$ns ip addr add 198.18.0.2/24 dev veth1
|
||||
ip link add name tep0 type $type $local_addr1 remote \
|
||||
198.18.0.2 tos $test_tos ttl $test_ttl $vxlan $geneve
|
||||
$ns ip link add name tep1 type $type $local_addr2 remote \
|
||||
198.18.0.1 tos $test_tos ttl $test_ttl $vxlan $geneve
|
||||
ip -netns "${NS0}" address add 198.18.0.1/24 dev veth0
|
||||
ip -netns "${NS1}" address add 198.18.0.2/24 dev veth1
|
||||
ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \
|
||||
remote 198.18.0.2 tos $test_tos ttl $test_ttl \
|
||||
$vxlan $geneve
|
||||
ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \
|
||||
remote 198.18.0.1 tos $test_tos ttl $test_ttl \
|
||||
$vxlan $geneve
|
||||
elif [ "$outer" = "6" ]; then
|
||||
if [ "$type" = "gre" ]; then
|
||||
type="ip6gretap"
|
||||
fi
|
||||
ip addr add fdd1:ced0:5d88:3fce::1/64 dev veth0
|
||||
$ns ip addr add fdd1:ced0:5d88:3fce::2/64 dev veth1
|
||||
ip link add name tep0 type $type $local_addr1 \
|
||||
remote fdd1:ced0:5d88:3fce::2 tos $test_tos ttl $test_ttl \
|
||||
$vxlan $geneve
|
||||
$ns ip link add name tep1 type $type $local_addr2 \
|
||||
remote fdd1:ced0:5d88:3fce::1 tos $test_tos ttl $test_ttl \
|
||||
$vxlan $geneve
|
||||
ip -netns "${NS0}" address add fdd1:ced0:5d88:3fce::1/64 \
|
||||
dev veth0 nodad
|
||||
ip -netns "${NS1}" address add fdd1:ced0:5d88:3fce::2/64 \
|
||||
dev veth1 nodad
|
||||
ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \
|
||||
remote fdd1:ced0:5d88:3fce::2 tos $test_tos \
|
||||
ttl $test_ttl $vxlan $geneve
|
||||
ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \
|
||||
remote fdd1:ced0:5d88:3fce::1 tos $test_tos \
|
||||
ttl $test_ttl $vxlan $geneve
|
||||
fi
|
||||
|
||||
# Bring L2-tunnel link up and create VLAN on top
|
||||
ip link set tep0 up
|
||||
$ns ip link set tep1 up
|
||||
ip addr flush dev tep0
|
||||
$ns ip addr flush dev tep1
|
||||
ip -netns "${NS0}" link set tep0 up
|
||||
ip -netns "${NS1}" link set tep1 up
|
||||
ip -netns "${NS0}" address flush dev tep0
|
||||
ip -netns "${NS1}" address flush dev tep1
|
||||
local parent
|
||||
if $vlan; then
|
||||
parent="vlan99-"
|
||||
ip link add link tep0 name ${parent}0 type vlan id 99
|
||||
$ns ip link add link tep1 name ${parent}1 type vlan id 99
|
||||
ip link set ${parent}0 up
|
||||
$ns ip link set ${parent}1 up
|
||||
ip addr flush dev ${parent}0
|
||||
$ns ip addr flush dev ${parent}1
|
||||
ip -netns "${NS0}" link add link tep0 name ${parent}0 \
|
||||
type vlan id 99
|
||||
ip -netns "${NS1}" link add link tep1 name ${parent}1 \
|
||||
type vlan id 99
|
||||
ip -netns "${NS0}" link set dev ${parent}0 up
|
||||
ip -netns "${NS1}" link set dev ${parent}1 up
|
||||
ip -netns "${NS0}" address flush dev ${parent}0
|
||||
ip -netns "${NS1}" address flush dev ${parent}1
|
||||
else
|
||||
parent="tep"
|
||||
fi
|
||||
|
||||
# Assign inner IPv4/IPv6 addresses
|
||||
if [ "$inner" = "4" ] || [ "$inner" = "other" ]; then
|
||||
ip addr add 198.19.0.1/24 brd + dev ${parent}0
|
||||
$ns ip addr add 198.19.0.2/24 brd + dev ${parent}1
|
||||
ip -netns "${NS0}" address add 198.19.0.1/24 brd + dev ${parent}0
|
||||
ip -netns "${NS1}" address add 198.19.0.2/24 brd + dev ${parent}1
|
||||
elif [ "$inner" = "6" ]; then
|
||||
ip addr add fdd4:96cf:4eae:443b::1/64 dev ${parent}0
|
||||
$ns ip addr add fdd4:96cf:4eae:443b::2/64 dev ${parent}1
|
||||
ip -netns "${NS0}" address add fdd4:96cf:4eae:443b::1/64 \
|
||||
dev ${parent}0 nodad
|
||||
ip -netns "${NS1}" address add fdd4:96cf:4eae:443b::2/64 \
|
||||
dev ${parent}1 nodad
|
||||
fi
|
||||
}
|
||||
|
||||
@ -192,10 +208,10 @@ verify() {
|
||||
ping_dst="198.19.0.3" # Generates ARPs which are not IPv4/IPv6
|
||||
fi
|
||||
if [ "$tos_ttl" = "inherit" ]; then
|
||||
ping -i 0.1 $ping_dst -Q "$expected_tos" -t "$expected_ttl" \
|
||||
2>/dev/null 1>&2 & ping_pid="$!"
|
||||
${RUN_NS0} ping -i 0.1 $ping_dst -Q "$expected_tos" \
|
||||
-t "$expected_ttl" 2>/dev/null 1>&2 & ping_pid="$!"
|
||||
else
|
||||
ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!"
|
||||
${RUN_NS0} ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!"
|
||||
fi
|
||||
local tunnel_type_offset tunnel_type_proto req_proto_offset req_offset
|
||||
if [ "$type" = "gre" ]; then
|
||||
@ -216,10 +232,12 @@ verify() {
|
||||
req_proto_offset="$((req_proto_offset + 4))"
|
||||
req_offset="$((req_offset + 4))"
|
||||
fi
|
||||
out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
|
||||
ip[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip[$req_proto_offset] = 0x01 and \
|
||||
ip[$req_offset] = 0x08 2>/dev/null | head -n 1)"
|
||||
out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
|
||||
-i veth0 -n \
|
||||
ip[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip[$req_proto_offset] = 0x01 and \
|
||||
ip[$req_offset] = 0x08 2>/dev/null \
|
||||
| head -n 1)"
|
||||
elif [ "$inner" = "6" ]; then
|
||||
req_proto_offset="44"
|
||||
req_offset="78"
|
||||
@ -231,10 +249,12 @@ verify() {
|
||||
req_proto_offset="$((req_proto_offset + 4))"
|
||||
req_offset="$((req_offset + 4))"
|
||||
fi
|
||||
out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
|
||||
ip[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip[$req_proto_offset] = 0x3a and \
|
||||
ip[$req_offset] = 0x80 2>/dev/null | head -n 1)"
|
||||
out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
|
||||
-i veth0 -n \
|
||||
ip[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip[$req_proto_offset] = 0x3a and \
|
||||
ip[$req_offset] = 0x80 2>/dev/null \
|
||||
| head -n 1)"
|
||||
elif [ "$inner" = "other" ]; then
|
||||
req_proto_offset="36"
|
||||
req_offset="45"
|
||||
@ -250,11 +270,13 @@ verify() {
|
||||
expected_tos="0x00"
|
||||
expected_ttl="64"
|
||||
fi
|
||||
out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
|
||||
ip[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip[$req_proto_offset] = 0x08 and \
|
||||
ip[$((req_proto_offset + 1))] = 0x06 and \
|
||||
ip[$req_offset] = 0x01 2>/dev/null | head -n 1)"
|
||||
out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
|
||||
-i veth0 -n \
|
||||
ip[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip[$req_proto_offset] = 0x08 and \
|
||||
ip[$((req_proto_offset + 1))] = 0x06 and \
|
||||
ip[$req_offset] = 0x01 2>/dev/null \
|
||||
| head -n 1)"
|
||||
fi
|
||||
elif [ "$outer" = "6" ]; then
|
||||
if [ "$type" = "gre" ]; then
|
||||
@ -273,10 +295,12 @@ verify() {
|
||||
req_proto_offset="$((req_proto_offset + 4))"
|
||||
req_offset="$((req_offset + 4))"
|
||||
fi
|
||||
out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
|
||||
ip6[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip6[$req_proto_offset] = 0x01 and \
|
||||
ip6[$req_offset] = 0x08 2>/dev/null | head -n 1)"
|
||||
out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
|
||||
-i veth0 -n \
|
||||
ip6[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip6[$req_proto_offset] = 0x01 and \
|
||||
ip6[$req_offset] = 0x08 2>/dev/null \
|
||||
| head -n 1)"
|
||||
elif [ "$inner" = "6" ]; then
|
||||
local req_proto_offset="72"
|
||||
local req_offset="106"
|
||||
@ -288,10 +312,12 @@ verify() {
|
||||
req_proto_offset="$((req_proto_offset + 4))"
|
||||
req_offset="$((req_offset + 4))"
|
||||
fi
|
||||
out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
|
||||
ip6[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip6[$req_proto_offset] = 0x3a and \
|
||||
ip6[$req_offset] = 0x80 2>/dev/null | head -n 1)"
|
||||
out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
|
||||
-i veth0 -n \
|
||||
ip6[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip6[$req_proto_offset] = 0x3a and \
|
||||
ip6[$req_offset] = 0x80 2>/dev/null \
|
||||
| head -n 1)"
|
||||
elif [ "$inner" = "other" ]; then
|
||||
local req_proto_offset="64"
|
||||
local req_offset="73"
|
||||
@ -307,15 +333,17 @@ verify() {
|
||||
expected_tos="0x00"
|
||||
expected_ttl="64"
|
||||
fi
|
||||
out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
|
||||
ip6[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip6[$req_proto_offset] = 0x08 and \
|
||||
ip6[$((req_proto_offset + 1))] = 0x06 and \
|
||||
ip6[$req_offset] = 0x01 2>/dev/null | head -n 1)"
|
||||
out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
|
||||
-i veth0 -n \
|
||||
ip6[$tunnel_type_offset] = $tunnel_type_proto and \
|
||||
ip6[$req_proto_offset] = 0x08 and \
|
||||
ip6[$((req_proto_offset + 1))] = 0x06 and \
|
||||
ip6[$req_offset] = 0x01 2>/dev/null \
|
||||
| head -n 1)"
|
||||
fi
|
||||
fi
|
||||
kill -9 $ping_pid
|
||||
wait $ping_pid 2>/dev/null
|
||||
wait $ping_pid 2>/dev/null || true
|
||||
result="FAIL"
|
||||
if [ "$outer" = "4" ]; then
|
||||
captured_ttl="$(get_field "ttl" "$out")"
|
||||
@ -351,11 +379,35 @@ verify() {
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
ip link del veth0 2>/dev/null
|
||||
ip netns del testing 2>/dev/null
|
||||
ip link del tep0 2>/dev/null
|
||||
ip netns del "${NS0}" 2>/dev/null
|
||||
ip netns del "${NS1}" 2>/dev/null
|
||||
}
|
||||
|
||||
exit_handler() {
|
||||
# Don't exit immediately if one of the intermediate commands fails.
|
||||
# We might be called at the end of the script, when the network
|
||||
# namespaces have already been deleted. So cleanup() may fail, but we
|
||||
# still need to run until 'exit $ERR' or the script won't return the
|
||||
# correct error code.
|
||||
set +e
|
||||
|
||||
cleanup
|
||||
|
||||
exit $ERR
|
||||
}
|
||||
|
||||
# Restore the default SIGINT handler (just in case) and exit.
|
||||
# The exit handler will take care of cleaning everything up.
|
||||
interrupted() {
|
||||
trap - INT
|
||||
|
||||
exit $ERR
|
||||
}
|
||||
|
||||
set -e
|
||||
trap exit_handler EXIT
|
||||
trap interrupted INT
|
||||
|
||||
printf "┌────────┬───────┬───────┬──────────────┬"
|
||||
printf "──────────────┬───────┬────────┐\n"
|
||||
for type in gre vxlan geneve; do
|
||||
@ -385,6 +437,10 @@ done
|
||||
printf "└────────┴───────┴───────┴──────────────┴"
|
||||
printf "──────────────┴───────┴────────┘\n"
|
||||
|
||||
# All tests done.
|
||||
# Set ERR appropriately: it will be returned by the exit handler.
|
||||
if $failed; then
|
||||
exit 1
|
||||
ERR=1
|
||||
else
|
||||
ERR=0
|
||||
fi
|
||||
|
Loading…
Reference in New Issue
Block a user