aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Berg2013-02-25 09:01:34 -0600
committerJohannes Berg2013-02-26 15:09:12 -0600
commit8a964f44e01ad3bbc208c3e80d931ba91b9ea786 (patch)
treedfcd5d77e86358daf79df6810933d1e9ddc86aba
parentdc4a787c8f52f9d6e46156953f8014a3353bcbc7 (diff)
downloadam43-linux-kernel-8a964f44e01ad3bbc208c3e80d931ba91b9ea786.tar.gz
am43-linux-kernel-8a964f44e01ad3bbc208c3e80d931ba91b9ea786.tar.xz
am43-linux-kernel-8a964f44e01ad3bbc208c3e80d931ba91b9ea786.zip
iwlwifi: always copy first 16 bytes of commands
The FH hardware will always write back to the scratch field in commands, even host commands not just TX commands, which can overwrite parts of the command. This is problematic if the command is re-used (with IWL_HCMD_DFL_NOCOPY) and can cause calibration issues. Address this problem by always putting at least the first 16 bytes into the buffer we also use for the command header and therefore make the DMA engine write back into this. For commands that are smaller than 16 bytes also always map enough memory for the DMA engine to write back to. Cc: stable@vger.kernel.org Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h10
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c75
3 files changed, 71 insertions, 23 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 9a0f45ec9e0..10f01793d7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -349,25 +349,23 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
349TRACE_EVENT(iwlwifi_dev_hcmd, 349TRACE_EVENT(iwlwifi_dev_hcmd,
350 TP_PROTO(const struct device *dev, 350 TP_PROTO(const struct device *dev,
351 struct iwl_host_cmd *cmd, u16 total_size, 351 struct iwl_host_cmd *cmd, u16 total_size,
352 const void *hdr, size_t hdr_len), 352 struct iwl_cmd_header *hdr),
353 TP_ARGS(dev, cmd, total_size, hdr, hdr_len), 353 TP_ARGS(dev, cmd, total_size, hdr),
354 TP_STRUCT__entry( 354 TP_STRUCT__entry(
355 DEV_ENTRY 355 DEV_ENTRY
356 __dynamic_array(u8, hcmd, total_size) 356 __dynamic_array(u8, hcmd, total_size)
357 __field(u32, flags) 357 __field(u32, flags)
358 ), 358 ),
359 TP_fast_assign( 359 TP_fast_assign(
360 int i, offset = hdr_len; 360 int i, offset = sizeof(*hdr);
361 361
362 DEV_ASSIGN; 362 DEV_ASSIGN;
363 __entry->flags = cmd->flags; 363 __entry->flags = cmd->flags;
364 memcpy(__get_dynamic_array(hcmd), hdr, hdr_len); 364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
365 365
366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
367 if (!cmd->len[i]) 367 if (!cmd->len[i])
368 continue; 368 continue;
369 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
370 continue;
371 memcpy((u8 *)__get_dynamic_array(hcmd) + offset, 369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
372 cmd->data[i], cmd->len[i]); 370 cmd->data[i], cmd->len[i]);
373 offset += cmd->len[i]; 371 offset += cmd->len[i];
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index aa2a39a637d..3d62e805535 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -182,6 +182,15 @@ struct iwl_queue {
182#define TFD_TX_CMD_SLOTS 256 182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32 183#define TFD_CMD_SLOTS 32
184 184
185/*
186 * The FH will write back to the first TB only, so we need
187 * to copy some data into the buffer regardless of whether
188 * it should be mapped or not. This indicates how much to
189 * copy, even for HCMDs it must be big enough to fit the
190 * DRAM scratch from the TX cmd, at least 16 bytes.
191 */
192#define IWL_HCMD_MIN_COPY_SIZE 16
193
185struct iwl_pcie_txq_entry { 194struct iwl_pcie_txq_entry {
186 struct iwl_device_cmd *cmd; 195 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd; 196 struct iwl_device_cmd *copy_cmd;
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8e9e3212fe7..8b625a7f568 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1152,10 +1152,12 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1152 void *dup_buf = NULL; 1152 void *dup_buf = NULL;
1153 dma_addr_t phys_addr; 1153 dma_addr_t phys_addr;
1154 int idx; 1154 int idx;
1155 u16 copy_size, cmd_size; 1155 u16 copy_size, cmd_size, dma_size;
1156 bool had_nocopy = false; 1156 bool had_nocopy = false;
1157 int i; 1157 int i;
1158 u32 cmd_pos; 1158 u32 cmd_pos;
1159 const u8 *cmddata[IWL_MAX_CMD_TFDS];
1160 u16 cmdlen[IWL_MAX_CMD_TFDS];
1159 1161
1160 copy_size = sizeof(out_cmd->hdr); 1162 copy_size = sizeof(out_cmd->hdr);
1161 cmd_size = sizeof(out_cmd->hdr); 1163 cmd_size = sizeof(out_cmd->hdr);
@@ -1164,8 +1166,23 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1164 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); 1166 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
1165 1167
1166 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1168 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1169 cmddata[i] = cmd->data[i];
1170 cmdlen[i] = cmd->len[i];
1171
1167 if (!cmd->len[i]) 1172 if (!cmd->len[i])
1168 continue; 1173 continue;
1174
1175 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
1176 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
1177 int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
1178
1179 if (copy > cmdlen[i])
1180 copy = cmdlen[i];
1181 cmdlen[i] -= copy;
1182 cmddata[i] += copy;
1183 copy_size += copy;
1184 }
1185
1169 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1186 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1170 had_nocopy = true; 1187 had_nocopy = true;
1171 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1188 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
@@ -1185,7 +1202,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1185 goto free_dup_buf; 1202 goto free_dup_buf;
1186 } 1203 }
1187 1204
1188 dup_buf = kmemdup(cmd->data[i], cmd->len[i], 1205 dup_buf = kmemdup(cmddata[i], cmdlen[i],
1189 GFP_ATOMIC); 1206 GFP_ATOMIC);
1190 if (!dup_buf) 1207 if (!dup_buf)
1191 return -ENOMEM; 1208 return -ENOMEM;
@@ -1195,7 +1212,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1195 idx = -EINVAL; 1212 idx = -EINVAL;
1196 goto free_dup_buf; 1213 goto free_dup_buf;
1197 } 1214 }
1198 copy_size += cmd->len[i]; 1215 copy_size += cmdlen[i];
1199 } 1216 }
1200 cmd_size += cmd->len[i]; 1217 cmd_size += cmd->len[i];
1201 } 1218 }
@@ -1242,14 +1259,31 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1242 1259
1243 /* and copy the data that needs to be copied */ 1260 /* and copy the data that needs to be copied */
1244 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1261 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1262 copy_size = sizeof(out_cmd->hdr);
1245 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1263 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1246 if (!cmd->len[i]) 1264 int copy = 0;
1265
1266 if (!cmd->len)
1247 continue; 1267 continue;
1248 if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1268
1249 IWL_HCMD_DFL_DUP)) 1269 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
1250 break; 1270 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
1251 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); 1271 copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
1252 cmd_pos += cmd->len[i]; 1272
1273 if (copy > cmd->len[i])
1274 copy = cmd->len[i];
1275 }
1276
1277 /* copy everything if not nocopy/dup */
1278 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1279 IWL_HCMD_DFL_DUP)))
1280 copy = cmd->len[i];
1281
1282 if (copy) {
1283 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1284 cmd_pos += copy;
1285 copy_size += copy;
1286 }
1253 } 1287 }
1254 1288
1255 WARN_ON_ONCE(txq->entries[idx].copy_cmd); 1289 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
@@ -1275,7 +1309,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1275 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1309 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1276 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1310 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1277 1311
1278 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, 1312 /*
1313 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
1314 * still map at least that many bytes for the hardware to write back to.
1315 * We have enough space, so that's not a problem.
1316 */
1317 dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
1318
1319 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
1279 DMA_BIDIRECTIONAL); 1320 DMA_BIDIRECTIONAL);
1280 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 1321 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1281 idx = -ENOMEM; 1322 idx = -ENOMEM;
@@ -1283,14 +1324,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1283 } 1324 }
1284 1325
1285 dma_unmap_addr_set(out_meta, mapping, phys_addr); 1326 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1286 dma_unmap_len_set(out_meta, len, copy_size); 1327 dma_unmap_len_set(out_meta, len, dma_size);
1287 1328
1288 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); 1329 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1289 1330
1331 /* map the remaining (adjusted) nocopy/dup fragments */
1290 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1332 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1291 const void *data = cmd->data[i]; 1333 const void *data = cmddata[i];
1292 1334
1293 if (!cmd->len[i]) 1335 if (!cmdlen[i])
1294 continue; 1336 continue;
1295 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1337 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1296 IWL_HCMD_DFL_DUP))) 1338 IWL_HCMD_DFL_DUP)))
@@ -1298,7 +1340,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1298 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1299 data = dup_buf; 1341 data = dup_buf;
1300 phys_addr = dma_map_single(trans->dev, (void *)data, 1342 phys_addr = dma_map_single(trans->dev, (void *)data,
1301 cmd->len[i], DMA_BIDIRECTIONAL); 1343 cmdlen[i], DMA_BIDIRECTIONAL);
1302 if (dma_mapping_error(trans->dev, phys_addr)) { 1344 if (dma_mapping_error(trans->dev, phys_addr)) {
1303 iwl_pcie_tfd_unmap(trans, out_meta, 1345 iwl_pcie_tfd_unmap(trans, out_meta,
1304 &txq->tfds[q->write_ptr], 1346 &txq->tfds[q->write_ptr],
@@ -1307,7 +1349,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1307 goto out; 1349 goto out;
1308 } 1350 }
1309 1351
1310 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); 1352 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
1311 } 1353 }
1312 1354
1313 out_meta->flags = cmd->flags; 1355 out_meta->flags = cmd->flags;
@@ -1317,8 +1359,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1317 1359
1318 txq->need_update = 1; 1360 txq->need_update = 1;
1319 1361
1320 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, 1362 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
1321 &out_cmd->hdr, copy_size);
1322 1363
1323 /* start timer if queue currently empty */ 1364 /* start timer if queue currently empty */
1324 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 1365 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)