Initial fastmem support

* PCSX2 fastmem depression

* Move away from PCSX2 fastmem

* Add enum_flag_ops.hpp

* Finally building on Windows

* Almost got a PoC

* Fix arm64 builds

* This somehow works

* This also works...

* Properly fix fastmem

* Add free region manager

* Update boost

* Add ScopeExit

* Comment out asserts on Linux/Mac/Android

* Comment out ASSERT_MSG asserts too

* Fix derp

* Attempt to fix Android

* Disable fastmem on Android

* Fix Android again maybe pt 2

* android pls

* AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

* AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

* Update host_memory.cpp

* Properly reset memory arena on reset

* Proper ashmem code for Android

* more

* Add temporary Android buildjet script for faster prototype builds

* Fix fastmem (again)

* Clean up shared memory
This commit is contained in:
wheremyfoodat 2024-12-03 22:51:46 +02:00 committed by GitHub
parent 28461a1d44
commit c088911168
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
18 changed files with 2139 additions and 13 deletions

View file

@ -0,0 +1,55 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <mutex>
#include <boost/icl/interval_set.hpp>
namespace Common {
class FreeRegionManager {
public:
explicit FreeRegionManager() = default;
~FreeRegionManager() = default;
void SetAddressSpace(void* start, size_t size) {
this->FreeBlock(start, size);
}
std::pair<void*, size_t> FreeBlock(void* block_ptr, size_t size) {
std::scoped_lock lk(m_mutex);
// Check to see if we are adjacent to any regions.
auto start_address = reinterpret_cast<uintptr_t>(block_ptr);
auto end_address = start_address + size;
auto it = m_free_regions.find({start_address - 1, end_address + 1});
// If we are, join with them, ensuring we stay in bounds.
if (it != m_free_regions.end()) {
start_address = std::min(start_address, it->lower());
end_address = std::max(end_address, it->upper());
}
// Free the relevant region.
m_free_regions.insert({start_address, end_address});
// Return the adjusted pointers.
block_ptr = reinterpret_cast<void*>(start_address);
size = end_address - start_address;
return {block_ptr, size};
}
void AllocateBlock(void* block_ptr, size_t size) {
std::scoped_lock lk(m_mutex);
auto address = reinterpret_cast<uintptr_t>(block_ptr);
m_free_regions.subtract({address, address + size});
}
private:
std::mutex m_mutex;
boost::icl::interval_set<uintptr_t> m_free_regions;
};
} // namespace Common

View file

@ -0,0 +1,75 @@
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include "enum_flag_ops.hpp"
#include "helpers.hpp"
#include "host_memory/virtual_buffer.h"
namespace Common {
enum class MemoryPermission : u32 {
Read = 1 << 0,
Write = 1 << 1,
ReadWrite = Read | Write,
Execute = 1 << 2,
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission)
/**
* A low level linear memory buffer, which supports multiple mappings
* Its purpose is to rebuild a given sparse memory layout, including mirrors.
*/
class HostMemory {
public:
explicit HostMemory(size_t backing_size_, size_t virtual_size_, bool useFastmem);
~HostMemory();
/**
* Copy constructors. They shall return a copy of the buffer without the mappings.
* TODO: Implement them with COW if needed.
*/
HostMemory(const HostMemory& other) = delete;
HostMemory& operator=(const HostMemory& other) = delete;
/**
* Move constructors. They will move the buffer and the mappings to the new object.
*/
HostMemory(HostMemory&& other) noexcept;
HostMemory& operator=(HostMemory&& other) noexcept;
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms, bool separate_heap);
void Unmap(size_t virtual_offset, size_t length, bool separate_heap);
void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);
void EnableDirectMappedAddress();
void ClearBackingRegion(size_t physical_offset, size_t length, u32 fill_value);
[[nodiscard]] u8* BackingBasePointer() noexcept { return backing_base; }
[[nodiscard]] const u8* BackingBasePointer() const noexcept { return backing_base; }
[[nodiscard]] u8* VirtualBasePointer() noexcept { return virtual_base; }
[[nodiscard]] const u8* VirtualBasePointer() const noexcept { return virtual_base; }
bool IsInVirtualRange(void* address) const noexcept { return address >= virtual_base && address < virtual_base + virtual_size; }
private:
size_t backing_size{};
size_t virtual_size{};
// Low level handler for the platform dependent memory routines
class Impl;
std::unique_ptr<Impl> impl;
u8* backing_base{};
u8* virtual_base{};
size_t virtual_base_offset{};
// Fallback if fastmem is not supported on this platform
std::unique_ptr<Common::VirtualBuffer<u8>> fallback_buffer;
};
} // namespace Common

View file

@ -0,0 +1,78 @@
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <utility>
namespace detail {
template <class F>
class ScopeGuard {
ScopeGuard(const ScopeGuard&) = delete;
ScopeGuard& operator=(const ScopeGuard&) = delete;
private:
F f;
bool active;
public:
constexpr ScopeGuard(F f_) : f(std::move(f_)), active(true) {}
constexpr ~ScopeGuard() {
if (active) {
f();
}
}
constexpr void Cancel() { active = false; }
constexpr ScopeGuard(ScopeGuard&& rhs) : f(std::move(rhs.f)), active(rhs.active) { rhs.Cancel(); }
ScopeGuard& operator=(ScopeGuard&& rhs) = delete;
};
template <class F>
constexpr ScopeGuard<F> MakeScopeGuard(F f) {
return ScopeGuard<F>(std::move(f));
}
enum class ScopeGuardOnExit {};
template <typename F>
constexpr ScopeGuard<F> operator+(ScopeGuardOnExit, F&& f) {
return ScopeGuard<F>(std::forward<F>(f));
}
} // namespace detail
#define CONCATENATE_IMPL(s1, s2) s1##s2
#define CONCATENATE(s1, s2) CONCATENATE_IMPL(s1, s2)
#ifdef __COUNTER__
#define ANONYMOUS_VARIABLE(pref) CONCATENATE(pref, __COUNTER__)
#else
#define ANONYMOUS_VARIABLE(pref) CONCATENATE(pref, __LINE__)
#endif
/**
* This macro is similar to SCOPE_EXIT, except the object is caller managed. This is intended to be
* used when the caller might want to cancel the ScopeExit.
*/
#define SCOPE_GUARD detail::ScopeGuardOnExit() + [&]()
/**
* This macro allows you to conveniently specify a block of code that will run on scope exit. Handy
* for doing ad-hoc clean-up tasks in a function with multiple returns.
*
* Example usage:
* \code
* const int saved_val = g_foo;
* g_foo = 55;
* SCOPE_EXIT{ g_foo = saved_val; };
*
* if (Bar()) {
* return 0;
* } else {
* return 20;
* }
* \endcode
*/
#define SCOPE_EXIT auto ANONYMOUS_VARIABLE(SCOPE_EXIT_STATE_) = SCOPE_GUARD

View file

@ -0,0 +1,68 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "helpers.hpp"
#include <utility>
namespace Common {
void* AllocateMemoryPages(std::size_t size) noexcept;
void FreeMemoryPages(void* base, std::size_t size) noexcept;
template <typename T>
class VirtualBuffer final {
public:
// TODO: Uncomment this and change Common::PageTable::PageInfo to be trivially constructible
// using std::atomic_ref once libc++ has support for it
// static_assert(
// std::is_trivially_constructible_v<T>,
// "T must be trivially constructible, as non-trivial constructors will not be executed "
// "with the current allocator");
constexpr VirtualBuffer() = default;
explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} {
base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size));
}
~VirtualBuffer() noexcept { FreeMemoryPages(base_ptr, alloc_size); }
VirtualBuffer(const VirtualBuffer&) = delete;
VirtualBuffer& operator=(const VirtualBuffer&) = delete;
VirtualBuffer(VirtualBuffer&& other) noexcept
: alloc_size{std::exchange(other.alloc_size, 0)}, base_ptr{std::exchange(other.base_ptr), nullptr} {}
VirtualBuffer& operator=(VirtualBuffer&& other) noexcept {
alloc_size = std::exchange(other.alloc_size, 0);
base_ptr = std::exchange(other.base_ptr, nullptr);
return *this;
}
void resize(std::size_t count) {
const auto new_size = count * sizeof(T);
if (new_size == alloc_size) {
return;
}
FreeMemoryPages(base_ptr, alloc_size);
alloc_size = new_size;
base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size));
}
[[nodiscard]] constexpr const T& operator[](std::size_t index) const { return base_ptr[index]; }
[[nodiscard]] constexpr T& operator[](std::size_t index) { return base_ptr[index]; }
[[nodiscard]] constexpr T* data() { return base_ptr; }
[[nodiscard]] constexpr const T* data() const { return base_ptr; }
[[nodiscard]] constexpr std::size_t size() const { return alloc_size / sizeof(T); }
private:
std::size_t alloc_size{};
T* base_ptr{};
};
} // namespace Common