This patch fixes 64k page support by using PAGE_MASK and appropriate pagesize defines in several places.
Signed-off-by: Thomas Klein <tklein@de.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
data[i++] = port->port_res[0].swqe_refill_th;
data[i++] = port->resets;
data[i++] = port->port_res[0].swqe_refill_th;
data[i++] = port->resets;
- cb6 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb6 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb6) {
ehea_error("no mem for cb6");
return;
if (!cb6) {
ehea_error("no mem for cb6");
return;
memset(stats, 0, sizeof(*stats));
memset(stats, 0, sizeof(*stats));
- cb2 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb2) {
ehea_error("no mem for cb2");
goto out;
if (!cb2) {
ehea_error("no mem for cb2");
goto out;
u64 hret;
struct hcp_ehea_port_cb0 *cb0;
u64 hret;
struct hcp_ehea_port_cb0 *cb0;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_ATOMIC); /* May be called via */
- if (!cb0) { /* ehea_neq_tasklet() */
+ cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
+ if (!cb0) { /* ehea_neq_tasklet() */
ehea_error("no mem for cb0");
ret = -ENOMEM;
goto out;
ehea_error("no mem for cb0");
ret = -ENOMEM;
goto out;
- cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
ret = -ENOMEM;
if (!cb4) {
ehea_error("no mem for cb4");
ret = -ENOMEM;
struct hcp_ehea_port_cb0 *cb0;
ret = -ENOMEM;
struct hcp_ehea_port_cb0 *cb0;
ret = -ENOMEM;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
if ((enable && port->promisc) || (!enable && !port->promisc))
return;
if ((enable && port->promisc) || (!enable && !port->promisc))
return;
- cb7 = kzalloc(H_CB_ALIGNMENT, GFP_ATOMIC);
+ cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (!cb7) {
ehea_error("no mem for cb7");
goto out;
if (!cb7) {
ehea_error("no mem for cb7");
goto out;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
if (port->vgrp)
port->vgrp->vlan_devices[vid] = NULL;
if (port->vgrp)
port->vgrp->vlan_devices[vid] = NULL;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
u64 dummy64 = 0;
struct hcp_modify_qp_cb0* cb0;
u64 dummy64 = 0;
struct hcp_modify_qp_cb0* cb0;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
if (!cb0) {
ret = -ENOMEM;
goto out;
- cb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb) {
ret = -ENOMEM;
goto out;
if (!cb) {
ret = -ENOMEM;
goto out;
goto out;
/* Enable Jumbo frames */
goto out;
/* Enable Jumbo frames */
- cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
} else {
if (!cb4) {
ehea_error("no mem for cb4");
} else {
const u8 pagesize, const u8 queue_type,
const u64 log_pageaddr, const u64 count)
{
const u8 pagesize, const u8 queue_type,
const u64 log_pageaddr, const u64 count)
{
- if ((count > 1) && (log_pageaddr & 0xfff)) {
+ if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
ehea_error("not on pageboundary");
return H_PARAMETER;
}
ehea_error("not on pageboundary");
return H_PARAMETER;
}
static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
u64 paddr_user)
{
static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
u64 paddr_user)
{
- epas->kernel.addr = ioremap(paddr_kernel, PAGE_SIZE);
+ /* To support 64k pages we must round to 64k page boundary */
+ epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
+ (paddr_kernel & ~PAGE_MASK);
epas->user.addr = paddr_user;
}
static inline void hcp_epas_dtor(struct h_epas *epas)
{
if (epas->kernel.addr)
epas->user.addr = paddr_user;
}
static inline void hcp_epas_dtor(struct h_epas *epas)
{
if (epas->kernel.addr)
- iounmap(epas->kernel.addr);
+ iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK));
epas->user.addr = 0;
epas->kernel.addr = 0;
epas->user.addr = 0;
epas->kernel.addr = 0;
start = KERNELBASE;
end = (u64)high_memory;
start = KERNELBASE;
end = (u64)high_memory;
- nr_pages = (end - start) / PAGE_SIZE;
+ nr_pages = (end - start) / EHEA_PAGESIZE;
pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!pt) {
pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!pt) {
if (nr_pages > 1) {
u64 num_pages = min(nr_pages, (u64)512);
for (i = 0; i < num_pages; i++)
if (nr_pages > 1) {
u64 num_pages = min(nr_pages, (u64)512);
for (i = 0; i < num_pages; i++)
- pt[i] = virt_to_abs((void*)(((u64)start)
- + ((k++) *
- PAGE_SIZE)));
+ pt[i] = virt_to_abs((void*)(((u64)start) +
+ ((k++) *
+ EHEA_PAGESIZE)));
hret = ehea_h_register_rpage_mr(adapter->handle,
adapter->mr.handle, 0,
hret = ehea_h_register_rpage_mr(adapter->handle,
adapter->mr.handle, 0,
num_pages);
nr_pages -= num_pages;
} else {
num_pages);
nr_pages -= num_pages;
} else {
- u64 abs_adr = virt_to_abs((void*)(((u64)start)
- + (k * PAGE_SIZE)));
+ u64 abs_adr = virt_to_abs((void*)(((u64)start) +
+ (k * EHEA_PAGESIZE)));
+
hret = ehea_h_register_rpage_mr(adapter->handle,
adapter->mr.handle, 0,
0, abs_adr,1);
hret = ehea_h_register_rpage_mr(adapter->handle,
adapter->mr.handle, 0,
0, abs_adr,1);