Searched refs:ops (Results 1 - 25 of 132) sorted by relevance

123456

/deb_dpdk/lib/librte_mempool/
H A Drte_mempool_ops.c47 /* add a new ops struct in rte_mempool_ops_table, return its index. */
51 struct rte_mempool_ops *ops; local
60 "Maximum number of mempool ops structs exceeded\n");
68 "Missing callback while registering mempool ops\n");
72 if (strlen(h->name) >= sizeof(ops->name) - 1) {
81 ops = &rte_mempool_ops_table.ops[ops_index];
82 snprintf(ops->name, sizeof(ops->name), "%s", h->name);
83 ops
98 struct rte_mempool_ops *ops; local
108 struct rte_mempool_ops *ops; local
120 struct rte_mempool_ops *ops; local
131 struct rte_mempool_ops *ops = NULL; local
[all...]
/deb_dpdk/lib/librte_eal/linuxapp/kni/ethtool/igb/
H A De1000_api.c41 if (hw->mac.ops.init_params) {
42 ret_val = hw->mac.ops.init_params(hw);
67 if (hw->nvm.ops.init_params) {
68 ret_val = hw->nvm.ops.init_params(hw);
93 if (hw->phy.ops.init_params) {
94 ret_val = hw->phy.ops.init_params(hw);
119 if (hw->mbx.ops.init_params) {
120 ret_val = hw->mbx.ops.init_params(hw);
310 if (hw->mac.ops.get_bus_info)
311 return hw->mac.ops
[all...]
H A De1000_mbx.c76 if (mbx->ops.read)
77 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
101 else if (mbx->ops.write)
102 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
121 if (mbx->ops.check_for_msg)
122 ret_val = mbx->ops.check_for_msg(hw, mbx_id);
141 if (mbx->ops.check_for_ack)
142 ret_val = mbx->ops.check_for_ack(hw, mbx_id);
161 if (mbx->ops.check_for_rst)
162 ret_val = mbx->ops
[all...]
H A De1000_phy.c63 phy->ops.init_params = e1000_null_ops_generic;
64 phy->ops.acquire = e1000_null_ops_generic;
65 phy->ops.check_polarity = e1000_null_ops_generic;
66 phy->ops.check_reset_block = e1000_null_ops_generic;
67 phy->ops.commit = e1000_null_ops_generic;
68 phy->ops.force_speed_duplex = e1000_null_ops_generic;
69 phy->ops.get_cfg_done = e1000_null_ops_generic;
70 phy->ops.get_cable_length = e1000_null_ops_generic;
71 phy->ops.get_info = e1000_null_ops_generic;
72 phy->ops
[all...]
/deb_dpdk/drivers/net/e1000/base/
H A De1000_api.c47 if (hw->mac.ops.init_params) {
48 ret_val = hw->mac.ops.init_params(hw);
73 if (hw->nvm.ops.init_params) {
74 ret_val = hw->nvm.ops.init_params(hw);
99 if (hw->phy.ops.init_params) {
100 ret_val = hw->phy.ops.init_params(hw);
125 if (hw->mbx.ops.init_params) {
126 ret_val = hw->mbx.ops.init_params(hw);
512 if (hw->mac.ops.get_bus_info)
513 return hw->mac.ops
[all...]
H A De1000_82541.c96 phy->ops.check_polarity = e1000_check_polarity_igp;
97 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
98 phy->ops.get_cable_length = e1000_get_cable_length_igp_82541;
99 phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
100 phy->ops.get_info = e1000_get_phy_info_igp;
101 phy->ops.read_reg = e1000_read_phy_reg_igp;
102 phy->ops.reset = e1000_phy_hw_reset_82541;
103 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541;
104 phy->ops.write_reg = e1000_write_phy_reg_igp;
105 phy->ops
[all...]
H A De1000_82540.c76 phy->ops.check_polarity = e1000_check_polarity_m88;
77 phy->ops.commit = e1000_phy_sw_reset_generic;
78 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
79 phy->ops.get_cable_length = e1000_get_cable_length_m88;
80 phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
81 phy->ops.read_reg = e1000_read_phy_reg_m88;
82 phy->ops.reset = e1000_phy_hw_reset_generic;
83 phy->ops.write_reg = e1000_write_phy_reg_m88;
84 phy->ops.get_info = e1000_get_phy_info_m88;
85 phy->ops
[all...]
H A De1000_phy.c75 phy->ops.init_params = e1000_null_ops_generic;
76 phy->ops.acquire = e1000_null_ops_generic;
77 phy->ops.check_polarity = e1000_null_ops_generic;
78 phy->ops.check_reset_block = e1000_null_ops_generic;
79 phy->ops.commit = e1000_null_ops_generic;
80 phy->ops.force_speed_duplex = e1000_null_ops_generic;
81 phy->ops.get_cfg_done = e1000_null_ops_generic;
82 phy->ops.get_cable_length = e1000_null_ops_generic;
83 phy->ops.get_info = e1000_null_ops_generic;
84 phy->ops
[all...]
H A De1000_80003es2lan.c99 phy->ops.power_up = e1000_power_up_phy_copper;
100 phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
108 phy->ops.acquire = e1000_acquire_phy_80003es2lan;
109 phy->ops.check_polarity = e1000_check_polarity_m88;
110 phy->ops.check_reset_block = e1000_check_reset_block_generic;
111 phy->ops.commit = e1000_phy_sw_reset_generic;
112 phy->ops.get_cfg_done = e1000_get_cfg_done_80003es2lan;
113 phy->ops.get_info = e1000_get_phy_info_m88;
114 phy->ops.release = e1000_release_phy_80003es2lan;
115 phy->ops
[all...]
H A De1000_82571.c109 phy->ops.check_reset_block = e1000_check_reset_block_generic;
110 phy->ops.reset = e1000_phy_hw_reset_generic;
111 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82571;
112 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
113 phy->ops.power_up = e1000_power_up_phy_copper;
114 phy->ops.power_down = e1000_power_down_phy_copper_82571;
120 phy->ops.get_cfg_done = e1000_get_cfg_done_82571;
121 phy->ops.get_info = e1000_get_phy_info_igp;
122 phy->ops.check_polarity = e1000_check_polarity_igp;
123 phy->ops
[all...]
H A De1000_ich8lan.c199 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
204 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
226 hw->phy.ops.release(hw);
230 hw->phy.ops.acquire(hw);
241 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
243 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
325 ret_val = hw->phy.ops.acquire(hw);
364 if (hw->phy.ops.check_reset_block(hw)) {
393 hw->phy.ops.release(hw);
397 if (hw->phy.ops
[all...]
H A De1000_82575.c171 phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic;
172 phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic;
179 phy->ops.power_up = e1000_power_up_phy_copper;
180 phy->ops.power_down = e1000_power_down_phy_copper_82575;
185 phy->ops.acquire = e1000_acquire_phy_82575;
186 phy->ops.check_reset_block = e1000_check_reset_block_generic;
187 phy->ops.commit = e1000_phy_sw_reset_generic;
188 phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
189 phy->ops.release = e1000_release_phy_82575;
194 phy->ops
[all...]
H A De1000_82542.c86 nvm->ops.read = e1000_read_nvm_microwire;
87 nvm->ops.release = e1000_stop_nvm;
88 nvm->ops.write = e1000_write_nvm_microwire;
89 nvm->ops.update = e1000_update_nvm_checksum_generic;
90 nvm->ops.validate = e1000_validate_nvm_checksum_generic;
116 mac->ops.get_bus_info = e1000_get_bus_info_82542;
118 mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci;
120 mac->ops.reset_hw = e1000_reset_hw_82542;
122 mac->ops.init_hw = e1000_init_hw_82542;
124 mac->ops
[all...]
/deb_dpdk/drivers/net/ixgbe/base/
H A Dixgbe_api.c68 if (hw->mac.ops.get_rtrup2tc)
69 hw->mac.ops.get_rtrup2tc(hw, map);
263 return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
276 return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
292 return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
306 if (hw->mac.ops.enable_relaxed_ordering)
307 hw->mac.ops.enable_relaxed_ordering(hw);
319 return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
331 return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
347 return ixgbe_call_func(hw, hw->mac.ops
[all...]
H A Dixgbe_x550.c62 mac->ops.dmac_config = ixgbe_dmac_config_X550;
63 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
64 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
65 mac->ops.setup_eee = ixgbe_setup_eee_X550;
66 mac->ops.set_source_address_pruning =
68 mac->ops.set_ethertype_anti_spoofing =
71 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
72 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
73 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
74 eeprom->ops
[all...]
H A Dixgbe_x540.c72 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
73 eeprom->ops.read = ixgbe_read_eerd_X540;
74 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
75 eeprom->ops.write = ixgbe_write_eewr_X540;
76 eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
77 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
78 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
79 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
82 phy->ops.init = ixgbe_init_phy_ops_generic;
83 phy->ops
[all...]
H A Dixgbe_82598.c130 phy->ops.init = ixgbe_init_phy_ops_82598;
133 mac->ops.start_hw = ixgbe_start_hw_82598;
134 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
135 mac->ops.reset_hw = ixgbe_reset_hw_82598;
136 mac->ops.get_media_type = ixgbe_get_media_type_82598;
137 mac->ops.get_supported_physical_layer =
139 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
140 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
141 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
142 mac->ops
[all...]
H A Dixgbe_phy.c130 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
164 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
171 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
207 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
230 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
236 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
260 phy->ops.identify = ixgbe_identify_phy_generic;
261 phy->ops.reset = ixgbe_reset_phy_generic;
262 phy->ops.read_reg = ixgbe_read_phy_reg_generic;
263 phy->ops
[all...]
/deb_dpdk/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/
H A Dixgbe_api.c138 return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
151 return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
167 return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
180 return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
192 return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
208 return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
222 return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
235 return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
248 return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
264 return ixgbe_call_func(hw, hw->mac.ops
[all...]
H A Dixgbe_x540.c58 eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
59 eeprom->ops.read = &ixgbe_read_eerd_X540;
60 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
61 eeprom->ops.write = &ixgbe_write_eewr_X540;
62 eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
63 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
64 eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
65 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
68 phy->ops.init = &ixgbe_init_phy_ops_generic;
69 phy->ops
[all...]
/deb_dpdk/drivers/crypto/zuc/
H A Drte_zuc_pmd.c189 process_zuc_cipher_op(struct rte_crypto_op **ops, argument
202 if (unlikely(ops[i]->sym->cipher.iv.length != ZUC_IV_KEY_LENGTH)) {
203 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
208 if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
209 || ((ops[i]->sym->cipher.data.offset
211 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
216 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
217 (ops[i]->sym->cipher.data.offset >> 3);
218 dst[i] = ops[i]->sym->m_dst ?
219 rte_pktmbuf_mtod(ops[
239 process_zuc_hash_op(struct rte_crypto_op **ops, struct zuc_session *session, uint8_t num_ops) argument
304 process_ops(struct rte_crypto_op **ops, struct zuc_session *session, struct zuc_qp *qp, uint8_t num_ops, uint16_t *accumulated_enqueued_ops) argument
358 zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) argument
[all...]
/deb_dpdk/drivers/net/ixgbe/
H A Dixgbe_bypass.c88 FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
94 adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
114 /* Only allow BYPASS ops on the first port */
121 /* set bypass ops. */
122 adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic;
123 adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic;
124 adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic;
125 adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic;
131 hw->mac.ops.disable_tx_laser = NULL;
132 hw->mac.ops
[all...]
/deb_dpdk/drivers/net/fm10k/base/
H A Dfm10k_api.c131 return fm10k_call_func(hw, hw->mac.ops.reset_hw, (hw),
143 return fm10k_call_func(hw, hw->mac.ops.init_hw, (hw),
155 return fm10k_call_func(hw, hw->mac.ops.stop_hw, (hw),
168 return fm10k_call_func(hw, hw->mac.ops.start_hw, (hw),
180 return fm10k_call_func(hw, hw->mac.ops.get_bus_info, (hw),
194 if (hw->mac.ops.is_slot_appropriate)
195 return hw->mac.ops.is_slot_appropriate(hw);
212 return fm10k_call_func(hw, hw->mac.ops.update_vlan, (hw, vid, idx, set),
225 return fm10k_call_func(hw, hw->mac.ops.read_mac_addr, (hw),
237 if (hw->mac.ops
[all...]
/deb_dpdk/drivers/crypto/kasumi/
H A Drte_kasumi_pmd.c190 process_kasumi_cipher_op(struct rte_crypto_op **ops, argument
202 if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) {
203 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
208 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
209 (ops[i]->sym->cipher.data.offset >> 3);
210 dst[i] = ops[i]->sym->m_dst ?
211 rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
212 (ops[i]->sym->cipher.data.offset >> 3) :
213 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
214 (ops[
263 process_kasumi_hash_op(struct rte_crypto_op **ops, struct kasumi_session *session, uint8_t num_ops) argument
337 process_ops(struct rte_crypto_op **ops, struct kasumi_session *session, struct kasumi_qp *qp, uint8_t num_ops, uint16_t *accumulated_enqueued_ops) argument
442 kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) argument
[all...]
/deb_dpdk/drivers/crypto/snow3g/
H A Drte_snow3g_pmd.c190 process_snow3g_cipher_op(struct rte_crypto_op **ops, argument
202 if (unlikely(ops[i]->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
203 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
208 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
209 (ops[i]->sym->cipher.data.offset >> 3);
210 dst[i] = ops[i]->sym->m_dst ?
211 rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
212 (ops[i]->sym->cipher.data.offset >> 3) :
213 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
214 (ops[
262 process_snow3g_hash_op(struct rte_crypto_op **ops, struct snow3g_session *session, uint8_t num_ops) argument
326 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, struct snow3g_qp *qp, uint8_t num_ops, uint16_t *accumulated_enqueued_ops) argument
431 snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) argument
[all...]

Completed in 32 milliseconds

123456