Rename paddr -> iova and vaddr -> virt to make it clearer how these
addresses are used. This is important for a subsequent patch that makes
a distinction between the physical address (physical address of the
system memory from the CPU's point of view) and the IOVA (physical
address of the system memory from the device's point of view).
Signed-off-by: Thierry Reding <treding@nvidia.com>
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
- u32 *firmware_vaddr = falcon->firmware.vaddr;
+ u32 *virt = falcon->firmware.virt;
size_t i;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
size_t i;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
+ virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
- struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
- os = falcon->firmware.vaddr + bin->os_header_offset;
+ os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
int falcon_init(struct falcon *falcon)
{
int falcon_init(struct falcon *falcon)
{
- falcon->firmware.vaddr = NULL;
+ falcon->firmware.virt = NULL;
- if (!falcon->firmware.vaddr)
+ if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
- falcon_writel(falcon, (falcon->firmware.paddr +
+ falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
const struct firmware *firmware;
/* Raw firmware data */
const struct firmware *firmware;
/* Raw firmware data */
- dma_addr_t paddr;
- void *vaddr;
+ dma_addr_t iova;
+ dma_addr_t phys;
+ void *virt;
size_t size;
/* Parsed firmware information */
size_t size;
/* Parsed firmware information */
- hdr = vic->falcon.firmware.vaddr;
+ hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.vaddr +
+ hdr = vic->falcon.firmware.virt +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.paddr + fce_bin_data_offset)
+ (vic->falcon.firmware.iova + fce_bin_data_offset)
>> 8);
err = falcon_wait_idle(&vic->falcon);
>> 8);
err = falcon_wait_idle(&vic->falcon);
host1x_channel_put(vic->channel);
host1x_client_iommu_detach(client);
host1x_channel_put(vic->channel);
host1x_client_iommu_detach(client);
+ if (client->group) {
+ dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
+ vic->falcon.firmware.size, DMA_TO_DEVICE);
tegra_drm_free(tegra, vic->falcon.firmware.size,
tegra_drm_free(tegra, vic->falcon.firmware.size,
- vic->falcon.firmware.vaddr,
- vic->falcon.firmware.paddr);
- else
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ } else {
dma_free_coherent(vic->dev, vic->falcon.firmware.size,
dma_free_coherent(vic->dev, vic->falcon.firmware.size,
- vic->falcon.firmware.vaddr,
- vic->falcon.firmware.paddr);
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ }
{
struct host1x_client *client = &vic->client.base;
struct tegra_drm *tegra = vic->client.drm;
{
struct host1x_client *client = &vic->client.base;
struct tegra_drm *tegra = vic->client.drm;
size_t size;
void *virt;
int err;
size_t size;
void *virt;
int err;
- if (vic->falcon.firmware.vaddr)
+ if (vic->falcon.firmware.virt)
return 0;
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
return 0;
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
size = vic->falcon.firmware.size;
if (!client->group) {
size = vic->falcon.firmware.size;
if (!client->group) {
- virt = dma_alloc_coherent(vic->dev, size, &phys, GFP_KERNEL);
+ virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
- err = dma_mapping_error(vic->dev, phys);
+ err = dma_mapping_error(vic->dev, iova);
if (err < 0)
return err;
} else {
if (err < 0)
return err;
} else {
- virt = tegra_drm_alloc(tegra, size, &phys);
+ virt = tegra_drm_alloc(tegra, size, &iova);
- vic->falcon.firmware.vaddr = virt;
- vic->falcon.firmware.paddr = phys;
+ vic->falcon.firmware.virt = virt;
+ vic->falcon.firmware.iova = iova;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
* knows what memory pages to flush the cache for.
*/
if (client->group) {
* knows what memory pages to flush the cache for.
*/
if (client->group) {
phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
err = dma_mapping_error(vic->dev, phys);
if (err < 0)
goto cleanup;
phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
err = dma_mapping_error(vic->dev, phys);
if (err < 0)
goto cleanup;
- /*
- * If the DMA API mapped this through a bounce buffer, the
- * dma_sync_single_for_device() call below will not be able
- * to flush the caches for the right memory pages. Output a
- * big warning in that case so that the DMA mask can be set
- * properly and the bounce buffer avoided.
- */
- WARN(phys != vic->falcon.firmware.paddr,
- "check DMA mask setting for %s\n", dev_name(vic->dev));
+ vic->falcon.firmware.phys = phys;
- dma_sync_single_for_device(vic->dev, phys, size, DMA_TO_DEVICE);
-
- if (client->group)
- dma_unmap_single(vic->dev, phys, size, DMA_TO_DEVICE);
-
return 0;
cleanup:
if (!client->group)
return 0;
cleanup:
if (!client->group)
- dma_free_coherent(vic->dev, size, virt, phys);
+ dma_free_coherent(vic->dev, size, virt, iova);
- tegra_drm_free(tegra, size, virt, phys);
+ tegra_drm_free(tegra, size, virt, iova);