Skip to content

Commit

Permalink
Linux Kernel 3.10.1 compiles and runs (.rej cleaned up)
Browse files Browse the repository at this point in the history
Applied:
/LineageOS/android/kernel/sony/msm8994$ patch -p1 -R <
/home/dj/Downloads/linuxkernelpatches/patch-3.10.1-2

Change-Id: Ib3b345f638d6220208eb03707a3ee86533dfcd33
  • Loading branch information
djStolen committed Jan 15, 2024
1 parent 2471dc6 commit b510ff9
Show file tree
Hide file tree
Showing 84 changed files with 235 additions and 675 deletions.
8 changes: 0 additions & 8 deletions Documentation/parisc/registers
Original file line number Diff line number Diff line change
Expand Up @@ -77,14 +77,6 @@ PSW default E value 0
Shadow Registers used by interruption handler code
TOC enable bit 1

=========================================================================

The PA-RISC architecture defines 7 registers as "shadow registers".
Those are used in RETURN FROM INTERRUPTION AND RESTORE instruction to reduce
the state save and restore time by eliminating the need for general register
(GR) saves and restores in interruption handlers.
Shadow registers are the GRs 1, 8, 9, 16, 17, 24, and 25.

=========================================================================
Register usage notes, originally from John Marvin, with some additional
notes from Randolph Chung.
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 2
SUBLEVEL = 1
EXTRAVERSION =
NAME = Unicycling Gorilla

Expand Down
8 changes: 2 additions & 6 deletions arch/arm/boot/dts/imx23.dtsi
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,8 @@
};

cpus {
#address-cells = <0>;
#size-cells = <0>;

cpu {
compatible = "arm,arm926ej-s";
device_type = "cpu";
cpu@0 {
compatible = "arm,arm926ejs";
};
};

Expand Down
8 changes: 2 additions & 6 deletions arch/arm/boot/dts/imx28.dtsi
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,8 @@
};

cpus {
#address-cells = <0>;
#size-cells = <0>;

cpu {
compatible = "arm,arm926ej-s";
device_type = "cpu";
cpu@0 {
compatible = "arm,arm926ejs";
};
};

Expand Down
2 changes: 0 additions & 2 deletions arch/arm/boot/dts/imx6dl.dtsi
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,12 @@

cpu@0 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
};

cpu@1 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <1>;
next-level-cache = <&L2>;
};
Expand Down
4 changes: 0 additions & 4 deletions arch/arm/boot/dts/imx6q.dtsi
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

cpu@0 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
operating-points = <
Expand All @@ -40,21 +39,18 @@

cpu@1 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <1>;
next-level-cache = <&L2>;
};

cpu@2 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <2>;
next-level-cache = <&L2>;
};

cpu@3 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <3>;
next-level-cache = <&L2>;
};
Expand Down
10 changes: 1 addition & 9 deletions arch/arm/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })

#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
cpumask_t *mask);
#else /* !CONFIG_ARM_ERRATA_798181 */
static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
cpumask_t *mask)
{
}
#endif /* CONFIG_ARM_ERRATA_798181 */
DECLARE_PER_CPU(atomic64_t, active_asids);

#else /* !CONFIG_CPU_HAS_ASID */

Expand Down
1 change: 0 additions & 1 deletion arch/arm/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,6 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}

perf_callchain_store(entry, regs->ARM_pc);
tail = (struct frame_tail __user *)regs->ARM_fp - 1;

while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
Expand Down
18 changes: 16 additions & 2 deletions arch/arm/kernel/smp_tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,29 @@ static void broadcast_tlb_a15_erratum(void)

static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
int this_cpu;
int cpu, this_cpu;
cpumask_t mask = { CPU_BITS_NONE };

if (!erratum_a15_798181())
return;

dummy_flush_tlb_a15_erratum();
this_cpu = get_cpu();
a15_erratum_get_cpumask(this_cpu, mm, &mask);
for_each_online_cpu(cpu) {
if (cpu == this_cpu)
continue;
/*
* We only need to send an IPI if the other CPUs are running
* the same ASID as the one being invalidated. There is no
* need for locking around the active_asids check since the
* switch_mm() function has at least one dmb() (as required by
* this workaround) in case a context switch happens on
* another CPU after the condition below.
*/
if (atomic64_read(&mm->context.id) ==
atomic64_read(&per_cpu(active_asids, cpu)))
cpumask_set_cpu(cpu, &mask);
}
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
put_cpu();
}
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/kernel/smp_twd.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb,
* changing cpu.
*/
if (flags == POST_RATE_CHANGE)
on_each_cpu(twd_update_frequency,
smp_call_function(twd_update_frequency,
(void *)&cnd->new_rate, 1);

return NOTIFY_OK;
Expand Down
8 changes: 4 additions & 4 deletions arch/arm/mach-shmobile/setup-emev2.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,14 +287,14 @@ static struct gpio_em_config gio3_config = {
static struct resource gio3_resources[] = {
[0] = {
.name = "GIO_096",
.start = 0xe0050180,
.end = 0xe00501ab,
.start = 0xe0050100,
.end = 0xe005012b,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "GIO_096",
.start = 0xe00501c0,
.end = 0xe00501df,
.start = 0xe0050140,
.end = 0xe005015f,
.flags = IORESOURCE_MEM,
},
[2] = {
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mach-shmobile/setup-r8a73a4.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ enum { SCIFA0, SCIFA1, SCIFB0, SCIFB1, SCIFB2, SCIFB3 };
static const struct plat_sci_port scif[] = {
SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */
SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */
SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */
SCIFB_DATA(SCIFB0, 0xe6c50000, gic_spi(145)), /* SCIFB0 */
SCIFB_DATA(SCIFB1, 0xe6c30000, gic_spi(149)), /* SCIFB1 */
SCIFB_DATA(SCIFB2, 0xe6ce0000, gic_spi(150)), /* SCIFB2 */
SCIFB_DATA(SCIFB3, 0xe6cf0000, gic_spi(151)), /* SCIFB3 */
Expand Down
55 changes: 10 additions & 45 deletions arch/arm/mm/context.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,43 +40,19 @@
* non 64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS ASID_FIRST_VERSION
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)

#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)

static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);

static DEFINE_PER_CPU(atomic64_t, active_asids);
DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;

#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
cpumask_t *mask)
{
int cpu;
unsigned long flags;
u64 context_id, asid;

raw_spin_lock_irqsave(&cpu_asid_lock, flags);
context_id = mm->context.id.counter;
for_each_online_cpu(cpu) {
if (cpu == this_cpu)
continue;
/*
* We only need to send an IPI if the other CPUs are
* running the same ASID as the one being invalidated.
*/
asid = per_cpu(active_asids, cpu).counter;
if (asid == 0)
asid = per_cpu(reserved_asids, cpu);
if (context_id == asid)
cpumask_set_cpu(cpu, mask);
}
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
}
#endif

#ifdef CONFIG_ARM_LPAE
static void cpu_set_reserved_ttbr0(void)
{
Expand Down Expand Up @@ -154,16 +130,7 @@ static void flush_context(unsigned int cpu)
asid = 0;
} else {
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
/*
* If this CPU has already been through a
* rollover, but hasn't run another task in
* the meantime, we must preserve its reserved
* ASID, as this is the only trace we have of
* the process it is still running.
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(asid & ~ASID_MASK, asid_map);
__set_bit(ASID_TO_IDX(asid), asid_map);
}
per_cpu(reserved_asids, i) = asid;
}
Expand Down Expand Up @@ -202,19 +169,17 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
/*
* Allocate a free ASID. If we can't find one, take a
* note of the currently active ASIDs and mark the TLBs
* as requiring flushes. We always count from ASID #1,
* as we reserve ASID #0 to switch via TTBR0 and indicate
* rollover events.
* as requiring flushes.
*/
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation);
flush_context(cpu);
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
}
__set_bit(asid, asid_map);
asid |= generation;
asid = generation | IDX_TO_ASID(asid);
cpumask_clear(mm_cpumask(mm));
}

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,7 @@ void __init mem_init(void)

#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, 0, NULL);
free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
#endif

free_highpages();
Expand Down
1 change: 0 additions & 1 deletion arch/c6x/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
#include <linux/initrd.h>

#include <asm/sections.h>
#include <asm/uaccess.h>

/*
* ZERO_PAGE is a special page that is used for zero-initialized
Expand Down
9 changes: 3 additions & 6 deletions arch/parisc/include/asm/special_insns.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,9 @@ static inline void set_eiem(unsigned long val)
cr; \
})

#define mtsp(val, cr) \
{ if (__builtin_constant_p(val) && ((val) == 0)) \
__asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \
else \
__asm__ __volatile__("mtsp %0,%1" \
#define mtsp(gr, cr) \
__asm__ __volatile__("mtsp %0,%1" \
: /* no outputs */ \
: "r" (val), "i" (cr) : "memory"); }
: "r" (gr), "i" (cr) : "memory")

#endif /* __PARISC_SPECIAL_INSNS_H */
5 changes: 2 additions & 3 deletions arch/parisc/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,13 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
unsigned long flags, sid;
unsigned long flags;

/* For one page, it's not worth testing the split_tlb variable */

mb();
sid = vma->vm_mm->context;
mtsp(vma->vm_mm->context,1);
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
Expand Down
2 changes: 1 addition & 1 deletion arch/parisc/kernel/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -440,8 +440,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
else {
unsigned long flags;

purge_tlb_start(flags);
mtsp(sid, 1);
purge_tlb_start(flags);
if (split_tlb) {
while (npages--) {
pdtlb(start);
Expand Down
Loading

0 comments on commit b510ff9

Please sign in to comment.