1
0
Fork 0

Merge branch 'drm-etnaviv-next' of git://git.pengutronix.de/git/lst/linux into drm-next

Notable changes:

- Cleanups from Fabio to some error paths and proper error propagation.

- Lots of refactoring and new code to support the new MMU version 2,
still relatively unoptimized and doesn't yet provide better process
isolation than MMUv1, but enough to get newer cores up and running.

- New hardware support: GC3000, as found on the NXP i.MX6 QuadPlus SoC.

* 'drm-etnaviv-next' of git://git.pengutronix.de/git/lst/linux: (25 commits)
  drm/etnaviv: mark whole context as lost in recover worker
  drm/etnaviv: record correct cmdbuf IOVA in dump
  drm/etnaviv: space out IOVA layout for cmdbufs on MMUv2
  drm/etnaviv: fix up model and revision for GC2000+
  drm/etnaviv: implement IOMMUv2 translation
  drm/etnaviv: handle MMU exception in IRQ handler
  drm/etnaviv: add flushing logic for MMUv2
  drm/etnaviv: add function to construct MMUv2 init buffer
  drm/etnaviv: map cmdbuf through MMU on version 2
  drm/etnaviv: split out iova search and MMU reaping logic
  drm/etnaviv: split out FE start
  drm/etnaviv: split out wait for gpu idle
  drm/etnaviv: move gpu_va() to etnaviv mmu
  drm/etnaviv: remove unused iommu_v2 header
  drm/etnaviv: move IOMMU domain allocation into etnaviv MMU
  drm/etnaviv: indirect IOMMU restore through etnaviv MMU
  drm/etnaviv: move linear window setup into etnaviv_iommuv1_restore
  drm/etnaviv: rename etnaviv_iommu_domain_restore to etnaviv_iommuv1_restore
  drm/etnaviv: only check if the cmdbuf is inside the linear window on MMUv1
  drm/etnaviv: only try to use the linear window on MMUv1
  ...
steinar/wifi_calib_4_9_kernel
Dave Airlie 2016-09-28 11:24:05 +10:00
commit 81c5d6aa39
12 changed files with 588 additions and 227 deletions

View File

@ -21,6 +21,7 @@
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
#include "state_3d.xml.h"
#include "cmdstream.xml.h"
@ -117,11 +118,6 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
VIVS_GL_PIPE_SELECT_PIPE(pipe));
}
static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
{
return buf->paddr - gpu->memory_base;
}
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buf, u32 off, u32 len)
{
@ -129,7 +125,7 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
u32 *ptr = buf->vaddr + off;
dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
ptr, etnaviv_iommu_get_cmdbuf_va(gpu, buf) + off, size - len * 4 - off);
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
ptr, len * 4, 0);
@ -162,7 +158,7 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
buffer->user_size = 0;
return gpu_va(gpu, buffer) + buffer->user_size;
return etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + buffer->user_size;
}
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@ -173,7 +169,41 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
buffer->user_size = 0;
CMD_WAIT(buffer);
CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
buffer->user_size - 4);
return buffer->user_size / 8;
}
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
buffer->user_size = 0;
if (gpu->identity.features & chipFeatures_PIPE_3D) {
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
}
if (gpu->identity.features & chipFeatures_PIPE_2D) {
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
}
CMD_END(buffer);
buffer->user_size = ALIGN(buffer->user_size, 8);
return buffer->user_size / 8;
}
@ -231,7 +261,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
link_target = gpu_va(gpu, cmdbuf);
link_target = etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf);
link_dwords = cmdbuf->size / 8;
/*
@ -246,8 +276,12 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
extra_dwords = 1;
/* flush command */
if (gpu->mmu->need_flush)
extra_dwords += 1;
if (gpu->mmu->need_flush) {
if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1;
else
extra_dwords += 3;
}
/* pipe switch commands */
if (gpu->switch_context)
@ -257,12 +291,23 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (gpu->mmu->need_flush) {
/* Add the MMU flush */
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
} else {
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
VIVS_MMUv2_CONFIGURATION_MODE_MASK |
VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
CMD_SEM(buffer, SYNC_RECIPIENT_FE,
SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE,
SYNC_RECIPIENT_PE);
}
gpu->mmu->need_flush = false;
}
@ -301,7 +346,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (drm_debug & DRM_UT_DRIVER)
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
return_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
return_target, etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf), cmdbuf->vaddr);
if (drm_debug & DRM_UT_DRIVER) {
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,

View File

@ -96,6 +96,7 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf);

View File

@ -175,11 +175,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
gpu->buffer->size, gpu->buffer->paddr);
gpu->buffer->size,
etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer));
list_for_each_entry(cmd, &gpu->active_cmd_list, node)
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
cmd->size, cmd->paddr);
cmd->size,
etnaviv_iommu_get_cmdbuf_va(gpu, cmd));
/* Reserve space for the bomap */
if (n_bomap_pages) {

View File

@ -22,8 +22,6 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_iommu.h"
#include "etnaviv_iommu_v2.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
@ -329,6 +327,18 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.revision = 0x1051;
}
}
/*
* NXP likes to call the GPU on the i.MX6QP GC2000+, but in
* reality it's just a re-branded GC3000. We can identify this
* core by the upper half of the revision register being all 1.
* Fix model/rev here, so all other places can refer to this
* core by its real identity.
*/
if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
gpu->identity.model = chipModel_GC3000;
gpu->identity.revision &= 0xffff;
}
}
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
@ -528,6 +538,14 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
}
void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
{
gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
VIVS_FE_COMMAND_CONTROL_ENABLE |
VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
}
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
u16 prefetch;
@ -568,33 +586,20 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
}
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
/* setup the MMU page table pointers */
etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
/* setup the MMU */
etnaviv_iommu_restore(gpu);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
gpu->buffer->paddr - gpu->memory_base);
gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
VIVS_FE_COMMAND_CONTROL_ENABLE |
VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
prefetch);
}
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
{
int ret, i;
struct iommu_domain *iommu;
enum etnaviv_iommu_version version;
bool mmuv2;
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0) {
@ -642,32 +647,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
/* Setup IOMMU.. eventually we will (I think) do this once per context
* and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space:
*/
mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
if (!mmuv2) {
iommu = etnaviv_iommu_domain_alloc(gpu);
version = ETNAVIV_IOMMU_V1;
} else {
iommu = etnaviv_iommu_v2_domain_alloc(gpu);
version = ETNAVIV_IOMMU_V2;
}
if (!iommu) {
dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
ret = -ENOMEM;
goto fail;
}
gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
if (!gpu->mmu) {
gpu->mmu = etnaviv_iommu_new(gpu);
if (IS_ERR(gpu->mmu)) {
dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
iommu_domain_free(iommu);
ret = -ENOMEM;
ret = PTR_ERR(gpu->mmu);
goto fail;
}
@ -678,7 +661,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
dev_err(gpu->dev, "could not create command buffer\n");
goto destroy_iommu;
}
if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
ret = -EINVAL;
dev_err(gpu->dev,
"command buffer outside valid memory window\n");
@ -867,45 +852,6 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
}
#endif
/*
* Power Management:
*/
static int enable_clk(struct etnaviv_gpu *gpu)
{
if (gpu->clk_core)
clk_prepare_enable(gpu->clk_core);
if (gpu->clk_shader)
clk_prepare_enable(gpu->clk_shader);
return 0;
}
static int disable_clk(struct etnaviv_gpu *gpu)
{
if (gpu->clk_core)
clk_disable_unprepare(gpu->clk_core);
if (gpu->clk_shader)
clk_disable_unprepare(gpu->clk_shader);
return 0;
}
static int enable_axi(struct etnaviv_gpu *gpu)
{
if (gpu->clk_bus)
clk_prepare_enable(gpu->clk_bus);
return 0;
}
static int disable_axi(struct etnaviv_gpu *gpu)
{
if (gpu->clk_bus)
clk_disable_unprepare(gpu->clk_bus);
return 0;
}
/*
* Hangcheck detection for locked gpu:
*/
@ -945,7 +891,7 @@ static void recover_worker(struct work_struct *work)
gpu->completed_fence = gpu->active_fence;
etnaviv_gpu_hw_init(gpu);
gpu->switch_context = true;
gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@ -1178,6 +1124,9 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
if (!cmdbuf)
return NULL;
if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
size = ALIGN(size, SZ_4K);
cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
GFP_KERNEL);
if (!cmdbuf->vaddr) {
@ -1193,6 +1142,7 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
{
etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
cmdbuf->paddr);
kfree(cmdbuf);
@ -1333,8 +1283,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (ret < 0)
return ret;
mutex_lock(&gpu->lock);
/*
* TODO
*
@ -1348,16 +1296,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (unlikely(event == ~0U)) {
DRM_ERROR("no free event\n");
ret = -EBUSY;
goto out_unlock;
goto out_pm_put;
}
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
goto out_unlock;
goto out_pm_put;
}
mutex_lock(&gpu->lock);
gpu->event[event].fence = fence;
submit->fence = fence->seqno;
gpu->active_fence = submit->fence;
@ -1395,9 +1345,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
hangcheck_timer_reset(gpu);
ret = 0;
out_unlock:
mutex_unlock(&gpu->lock);
out_pm_put:
etnaviv_gpu_pm_put(gpu);
return ret;
@ -1425,6 +1375,21 @@ static irqreturn_t irq_handler(int irq, void *data)
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
}
if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
int i;
dev_err_ratelimited(gpu->dev,
"MMU fault status 0x%08x\n",
gpu_read(gpu, VIVS_MMUv2_STATUS));
for (i = 0; i < 4; i++) {
dev_err_ratelimited(gpu->dev,
"MMU %d fault addr 0x%08x\n",
i, gpu_read(gpu,
VIVS_MMUv2_EXCEPTION_ADDR(i)));
}
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
}
while ((event = ffs(intr)) != 0) {
struct fence *fence;
@ -1466,39 +1431,72 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
{
int ret;
ret = enable_clk(gpu);
if (ret)
return ret;
if (gpu->clk_bus) {
ret = clk_prepare_enable(gpu->clk_bus);
if (ret)
return ret;
}
ret = enable_axi(gpu);
if (ret) {
disable_clk(gpu);
return ret;
if (gpu->clk_core) {
ret = clk_prepare_enable(gpu->clk_core);
if (ret)
goto disable_clk_bus;
}
if (gpu->clk_shader) {
ret = clk_prepare_enable(gpu->clk_shader);
if (ret)
goto disable_clk_core;
}
return 0;
disable_clk_core:
if (gpu->clk_core)
clk_disable_unprepare(gpu->clk_core);
disable_clk_bus:
if (gpu->clk_bus)
clk_disable_unprepare(gpu->clk_bus);
return ret;
}
static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
{
int ret;
ret = disable_axi(gpu);
if (ret)
return ret;
ret = disable_clk(gpu);
if (ret)
return ret;
if (gpu->clk_shader)
clk_disable_unprepare(gpu->clk_shader);
if (gpu->clk_core)
clk_disable_unprepare(gpu->clk_core);
if (gpu->clk_bus)
clk_disable_unprepare(gpu->clk_bus);
return 0;
}
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
if ((idle & gpu->idle_mask) == gpu->idle_mask)
return 0;
if (time_is_before_jiffies(timeout)) {
dev_warn(gpu->dev,
"timed out waiting for idle: idle=0x%x\n",
idle);
return -ETIMEDOUT;
}
udelay(5);
} while (1);
}
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
if (gpu->buffer) {
unsigned long timeout;
/* Replace the last WAIT with END */
etnaviv_buffer_end(gpu);
@ -1507,22 +1505,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
* happen quickly (as the WAIT is only 200 cycles). If
* we fail, just warn and continue.
*/
timeout = jiffies + msecs_to_jiffies(100);
do {
u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
if ((idle & gpu->idle_mask) == gpu->idle_mask)
break;
if (time_is_before_jiffies(timeout)) {
dev_warn(gpu->dev,
"timed out waiting for idle: idle=0x%x\n",
idle);
break;
}
udelay(5);
} while (1);
etnaviv_gpu_wait_idle(gpu, 100);
}
return etnaviv_gpu_clk_disable(gpu);
@ -1634,7 +1617,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
int err = 0;
int err;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
if (!gpu)
@ -1651,16 +1634,15 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0) {
err = gpu->irq;
dev_err(dev, "failed to get irq: %d\n", err);
goto fail;
dev_err(dev, "failed to get irq: %d\n", gpu->irq);
return gpu->irq;
}
err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
dev_name(gpu->dev), gpu);
if (err) {
dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
goto fail;
return err;
}
/* Get Clocks: */
@ -1694,13 +1676,10 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
err = component_add(&pdev->dev, &gpu_ops);
if (err < 0) {
dev_err(&pdev->dev, "failed to register component: %d\n", err);
goto fail;
return err;
}
return 0;
fail:
return err;
}
static int etnaviv_gpu_platform_remove(struct platform_device *pdev)

View File

@ -160,6 +160,8 @@ struct etnaviv_cmdbuf {
dma_addr_t paddr;
u32 size;
u32 user_size;
/* vram node used if the cmdbuf is mapped through the MMUv2 */
struct drm_mm_node vram_node;
/* fence after which this buffer is to be disposed */
struct fence *fence;
/* target exec state */
@ -214,6 +216,8 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
extern struct platform_driver etnaviv_gpu_driver;

View File

@ -196,12 +196,19 @@ static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
.dump = etnaviv_iommuv1_dump,
};
void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
struct iommu_domain *domain)
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
struct etnaviv_iommu_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
u32 pgtable;
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
/* set page table address in MC */
pgtable = (u32)etnaviv_domain->pgtable.paddr;
@ -212,7 +219,7 @@ void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommu_domain *etnaviv_domain;
int ret;

View File

@ -17,12 +17,12 @@
#ifndef __ETNAVIV_IOMMU_H__
#define __ETNAVIV_IOMMU_H__
#include <linux/iommu.h>
struct etnaviv_gpu;
struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
struct iommu_domain *domain);
struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_IOMMU_H__ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
* Copyright (C) 2016 Etnaviv Project
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@ -22,12 +22,267 @@
#include <linux/bitops.h>
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_iommu.h"
#include "state.xml.h"
#include "state_hi.xml.h"
#define MMUv2_PTE_PRESENT BIT(0)
#define MMUv2_PTE_EXCEPTION BIT(1)
#define MMUv2_PTE_WRITEABLE BIT(2)
struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
#define MMUv2_MTLB_MASK 0xffc00000
#define MMUv2_MTLB_SHIFT 22
#define MMUv2_STLB_MASK 0x003ff000
#define MMUv2_STLB_SHIFT 12
#define MMUv2_MAX_STLB_ENTRIES 1024
struct etnaviv_iommuv2_domain {
struct iommu_domain domain;
struct device *dev;
void *bad_page_cpu;
dma_addr_t bad_page_dma;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
/* S(lave) TLB aka second level pagetable */
u32 *stlb_cpu[1024];
dma_addr_t stlb_dma[1024];
};
static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
{
/* TODO */
return container_of(domain, struct etnaviv_iommuv2_domain, domain);
}
static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int mtlb_entry, stlb_entry;
u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
if (size != SZ_4K)
return -EINVAL;
if (prot & IOMMU_WRITE)
entry |= MMUv2_PTE_WRITEABLE;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
return 0;
}
static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int mtlb_entry, stlb_entry;
if (size != SZ_4K)
return -EINVAL;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
return SZ_4K;
}
static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int mtlb_entry, stlb_entry;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
}
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
u32 *p;
int ret, i, j;
/* allocate scratch page */
etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
SZ_4K,
&etnaviv_domain->bad_page_dma,
GFP_KERNEL);
if (!etnaviv_domain->bad_page_cpu) {
ret = -ENOMEM;
goto fail_mem;
}
p = etnaviv_domain->bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
SZ_4K,
&etnaviv_domain->mtlb_dma,
GFP_KERNEL);
if (!etnaviv_domain->mtlb_cpu) {
ret = -ENOMEM;
goto fail_mem;
}
/* pre-populate STLB pages (may want to switch to on-demand later) */
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
etnaviv_domain->stlb_cpu[i] =
dma_alloc_coherent(etnaviv_domain->dev,
SZ_4K,
&etnaviv_domain->stlb_dma[i],
GFP_KERNEL);
if (!etnaviv_domain->stlb_cpu[i]) {
ret = -ENOMEM;
goto fail_mem;
}
p = etnaviv_domain->stlb_cpu[i];
for (j = 0; j < SZ_4K / 4; j++)
*p++ = MMUv2_PTE_EXCEPTION;
etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
MMUv2_PTE_PRESENT;
}
return 0;
fail_mem:
if (etnaviv_domain->bad_page_cpu)
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->bad_page_cpu,
etnaviv_domain->bad_page_dma);
if (etnaviv_domain->mtlb_cpu)
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->stlb_cpu[i],
etnaviv_domain->stlb_dma[i]);
}
return ret;
}
static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int i;
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->bad_page_cpu,
etnaviv_domain->bad_page_dma);
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
etnaviv_domain->stlb_cpu[i],
etnaviv_domain->stlb_dma[i]);
}
vfree(etnaviv_domain);
}
static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
size_t dump_size = SZ_4K;
int i;
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
dump_size += SZ_4K;
return dump_size;
}
static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int i;
memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
buf += SZ_4K;
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}
static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
.ops = {
.domain_free = etnaviv_iommuv2_domain_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
.iova_to_phys = etnaviv_iommuv2_iova_to_phys,
.pgsize_bitmap = SZ_4K,
},
.dump_size = etnaviv_iommuv2_dump_size,
.dump = etnaviv_iommuv2_dump,
};
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
u16 prefetch;
/* If the MMU is already enabled the state is still there. */
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return;
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
(u32)etnaviv_domain->bad_page_dma);
etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain;
int ret;
etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
if (!etnaviv_domain)
return NULL;
etnaviv_domain->dev = gpu->dev;
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
etnaviv_domain->domain.geometry.aperture_start = 0;
etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
ret = etnaviv_iommuv2_init(etnaviv_domain);
if (ret)
goto out_free;
return &etnaviv_domain->domain;
out_free:
vfree(etnaviv_domain);
return NULL;
}

View File

@ -1,25 +0,0 @@
/*
* Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ETNAVIV_IOMMU_V2_H__
#define __ETNAVIV_IOMMU_V2_H__
#include <linux/iommu.h>
struct etnaviv_gpu;
struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_IOMMU_V2_H__ */

View File

@ -14,9 +14,11 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "common.xml.h"
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
@ -101,40 +103,21 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
drm_mm_remove_node(&mapping->vram_node);
}
int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
struct etnaviv_vram_mapping *mapping)
static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct drm_mm_node *node, size_t size)
{
struct etnaviv_vram_mapping *free = NULL;
struct sg_table *sgt = etnaviv_obj->sgt;
struct drm_mm_node *node;
int ret;
lockdep_assert_held(&etnaviv_obj->lock);
lockdep_assert_held(&mmu->lock);
mutex_lock(&mmu->lock);
/* v1 MMU can optimize single entry (contiguous) scatterlists */
if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
u32 iova;
iova = sg_dma_address(sgt->sgl) - memory_base;
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
list_add_tail(&mapping->mmu_node, &mmu->mappings);
mutex_unlock(&mmu->lock);
return 0;
}
}
node = &mapping->vram_node;
while (1) {
struct etnaviv_vram_mapping *m, *n;
struct list_head list;
bool found;
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
size, 0, mmu->last_iova, ~0UL,
DRM_MM_SEARCH_DEFAULT);
if (ret != -ENOSPC)
@ -151,7 +134,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
}
/* Try to retire some entries */
drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
drm_mm_init_scan(&mmu->mm, size, 0, 0);
found = 0;
INIT_LIST_HEAD(&list);
@ -212,6 +195,38 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
mmu->need_flush = true;
}
return ret;
}
int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
struct etnaviv_vram_mapping *mapping)
{
struct sg_table *sgt = etnaviv_obj->sgt;
struct drm_mm_node *node;
int ret;
lockdep_assert_held(&etnaviv_obj->lock);
mutex_lock(&mmu->lock);
/* v1 MMU can optimize single entry (contiguous) scatterlists */
if (mmu->version == ETNAVIV_IOMMU_V1 &&
sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
u32 iova;
iova = sg_dma_address(sgt->sgl) - memory_base;
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
list_add_tail(&mapping->mmu_node, &mmu->mappings);
mutex_unlock(&mmu->lock);
return 0;
}
}
node = &mapping->vram_node;
ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
if (ret < 0) {
mutex_unlock(&mmu->lock);
return ret;
@ -256,30 +271,102 @@ void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
kfree(mmu);
}
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
struct iommu_domain *domain, enum etnaviv_iommu_version version)
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
{
enum etnaviv_iommu_version version;
struct etnaviv_iommu *mmu;
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
return ERR_PTR(-ENOMEM);
mmu->domain = domain;
if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
version = ETNAVIV_IOMMU_V1;
} else {
mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
version = ETNAVIV_IOMMU_V2;
}
if (!mmu->domain) {
dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
kfree(mmu);
return ERR_PTR(-ENOMEM);
}
mmu->gpu = gpu;
mmu->version = version;
mutex_init(&mmu->lock);
INIT_LIST_HEAD(&mmu->mappings);
drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
domain->geometry.aperture_end -
domain->geometry.aperture_start + 1);
drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
mmu->domain->geometry.aperture_end -
mmu->domain->geometry.aperture_start + 1);
iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
return mmu;
}
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
{
if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
etnaviv_iommuv1_restore(gpu);
else
etnaviv_iommuv2_restore(gpu);
}
u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buf)
{
struct etnaviv_iommu *mmu = gpu->mmu;
if (mmu->version == ETNAVIV_IOMMU_V1) {
return buf->paddr - gpu->memory_base;
} else {
int ret;
if (buf->vram_node.allocated)
return (u32)buf->vram_node.start;
mutex_lock(&mmu->lock);
ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
if (ret < 0) {
mutex_unlock(&mmu->lock);
return 0;
}
ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
buf->size, IOMMU_READ);
if (ret < 0) {
drm_mm_remove_node(&buf->vram_node);
mutex_unlock(&mmu->lock);
return 0;
}
/*
* At least on GC3000 the FE MMU doesn't properly flush old TLB
* entries. Make sure to space the command buffers out in a way
* that the FE MMU prefetch won't load invalid entries.
*/
mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
gpu->mmu->need_flush = true;
mutex_unlock(&mmu->lock);
return (u32)buf->vram_node.start;
}
}
void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buf)
{
struct etnaviv_iommu *mmu = gpu->mmu;
if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
mutex_lock(&mmu->lock);
iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
drm_mm_remove_node(&buf->vram_node);
mutex_unlock(&mmu->lock);
}
}
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
{
struct etnaviv_iommu_ops *ops;

View File

@ -62,10 +62,15 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
struct etnaviv_vram_mapping *mapping);
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buf);
void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buf);
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
struct iommu_domain *domain, enum etnaviv_iommu_version version);
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_MMU_H__ */

View File

@ -8,10 +8,10 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53)
- common.xml ( 18437 bytes, from 2015-12-12 09:02:53)
- state_hi.xml ( 25620 bytes, from 2016-08-19 22:07:37)
- common.xml ( 20583 bytes, from 2016-06-07 05:22:38)
Copyright (C) 2015
Copyright (C) 2016
*/
@ -78,9 +78,10 @@ Copyright (C) 2015
#define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200
#define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010
#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x7fffffff
#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x3fffffff
#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0
#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
#define VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION 0x40000000
#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000
#define VIVS_HI_INTR_ENBL 0x00000014